[
  {
    "path": ".git-blame-ignore-revs",
    "content": "# Initial pre-commit reformat\ndf811d0deacebfd6cc77e8bf501d9b87ff006fb5\n"
  },
  {
    "path": ".gitattributes",
    "content": "# Set the default behavior to have all files normalized to Unix-style\n# line endings upon check-in.\n* text=auto\n\n# Declare files that will always have CRLF line endings on checkout.\n*.bat text eol=crlf\n\n# Denote all files that are truly binary and should not be modified.\n*.dll binary\n*.exp binary\n*.lib binary\n*.pdb binary\n*.exe binary\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE.md",
    "content": "Help us improve the Jupyter Enterprise Gateway project by reporting issues\nor asking questions.\n\n## Description\n\n## Screenshots / Logs\n\nIf applicable, add screenshots and/or logs to help explain your problem.\nTo generate better logs, please run the gateway with `--debug` command line parameter.\n\n## Environment\n\n- Enterprise Gateway Version \\[e.g. 1.x, 2.x, ...\\]\n- Platform: \\[e.g. YARN, Kubernetes ...\\]\n- Others \\[e.g. Jupyter Server 5.7, JupyterHub 1.0, etc\\]\n"
  },
  {
    "path": ".github/codeql/codeql-config.yml",
    "content": "name: \"Enterprise Gateway CodeQL config\"\n\nqueries:\n  - uses: security-and-quality\n\npaths-ignore:\n  - enterprise_gateway/tests\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "version: 2\nupdates:\n  # Set update schedule for GitHub Actions\n  - package-ecosystem: \"github-actions\"\n    directory: \"/\"\n    schedule:\n      # Check for updates to GitHub Actions once a week (Mondays by default)\n      interval: \"weekly\"\n  # Set update schedule for pip\n  - package-ecosystem: \"pip\"\n    directory: \"/\"\n    schedule:\n      # Check for updates to Python deps once a week (Mondays by default)\n      interval: \"weekly\"\n"
  },
  {
    "path": ".github/workflows/build.yml",
    "content": "name: Builds\non:\n  push:\n  pull_request:\n\njobs:\n  build:\n    runs-on: ${{ matrix.os }}\n    env:\n      ASYNC_TEST_TIMEOUT: 60\n      KERNEL_LAUNCH_TIMEOUT: 120\n      CONDA_HOME: /usr/share/miniconda\n    strategy:\n      fail-fast: false\n      matrix:\n        os: [ubuntu-latest]\n        python-version: [\"3.10\", \"3.11\"]\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v4\n        with:\n          clean: true\n      - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1\n      - name: Display dependency info\n        run: |\n          python --version\n          pip --version\n          conda --version\n      - name: Add SBT launcher\n        uses: sbt/setup-sbt@v1\n      - name: Install Python dependencies\n        run: |\n          pip install \".[test]\"\n      - name: Build and install Jupyter Enterprise Gateway\n        uses: nick-invision/retry@v3.0.0\n        with:\n          timeout_minutes: 10\n          max_attempts: 2\n          command: |\n            make clean dist enterprise-gateway-demo test-install-wheel\n      - name: Log current Python dependencies version\n        run: |\n          pip freeze\n      - name: Run unit tests\n        uses: nick-invision/retry@v3.0.0\n        with:\n          timeout_minutes: 3\n          max_attempts: 1\n          command: |\n            make test\n      - name: Run integration tests\n        run: |\n          # Run integration tests with debug output\n          make itest-yarn-debug\n      - name: Collect logs\n        if: success() || failure()\n        run: |\n          python --version\n          pip --version\n          pip list\n          echo \"==== Docker Container Logs ====\"\n          docker logs itest-yarn\n          echo \"==== Docker Container Status ====\"\n          docker ps -a\n          echo \"==== Enterprise Gateway Log ====\"\n          docker exec -it itest-yarn cat /usr/local/share/jupyter/enterprise-gateway.log || true\n      - name: Run linters\n        run: |\n          make lint\n      - name: Bump versions\n        run: |\n          pipx run tbump --dry-run --no-tag --no-push 100.100.100rc0\n\n  link_check:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1\n        with:\n          python_version: \"3.11\"\n      - name: Install Python dependencies\n        run: |\n          pip install \".[test]\"\n      - uses: jupyterlab/maintainer-tools/.github/actions/check-links@v1\n        with:\n          ignore_links: |-\n            http://my-gateway-server\\.com:8888|https://docs\\.openshift\\.com/.*|https://docs\\.redhat\\.com/.*\n\n  build_docs:\n    runs-on: windows-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v4\n      - name: Base Setup\n        uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1\n        with:\n          python_version: \"3.11\"\n      - name: Build Docs\n        run: make docs\n\n  test_minimum_versions:\n    name: Test Minimum Versions\n    timeout-minutes: 20\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1\n        with:\n          python_version: \"3.11\"\n      - name: Install dependencies with minimum versions\n        run: |\n          pip install \".[test]\"\n      - name: Run the unit tests\n        run: |\n          pytest -vv -W default || pytest -vv -W default --lf\n\n  make_sdist:\n    name: Make SDist\n    runs-on: ubuntu-latest\n    timeout-minutes: 10\n    steps:\n      - uses: actions/checkout@v4\n      - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1\n        with:\n          python_version: \"3.11\"\n      - uses: jupyterlab/maintainer-tools/.github/actions/make-sdist@v1\n\n  test_sdist:\n    runs-on: ubuntu-latest\n    needs: [make_sdist]\n    name: Install from SDist and Test\n    timeout-minutes: 20\n    steps:\n      - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1\n        with:\n          python_version: \"3.11\"\n      - uses: jupyterlab/maintainer-tools/.github/actions/test-sdist@v1\n\n  python_tests_check: # This job does nothing and is only used for the branch protection\n    if: always()\n    needs:\n      - build\n      - link_check\n      - test_minimum_versions\n      - build_docs\n      - test_sdist\n    runs-on: ubuntu-latest\n    steps:\n      - name: Decide whether the needed jobs succeeded or failed\n        uses: re-actors/alls-green@release/v1\n        with:\n          jobs: ${{ toJSON(needs) }}\n"
  },
  {
    "path": ".github/workflows/codeql-analysis.yml",
    "content": "# For most projects, this workflow file will not need changing; you simply need\n# to commit it to your repository.\n#\n# You may wish to alter this file to override the set of languages analyzed,\n# or to provide custom queries or build logic.\n#\n# ******** NOTE ********\n# We have attempted to detect the languages in your repository. Please check\n# the `language` matrix defined below to confirm you have the correct set of\n# supported CodeQL languages.\n#\nname: \"CodeQL Checks\"\n\non:\n  push:\n    branches: [main]\n  pull_request:\n    # The branches below must be a subset of the branches above\n    branches: [main]\n  schedule:\n    - cron: \"24 7 * * 1\"\n\njobs:\n  analyze:\n    name: Analyze\n    runs-on: ubuntu-latest\n    permissions:\n      actions: read\n      contents: read\n      security-events: write\n\n    strategy:\n      fail-fast: false\n      matrix:\n        language: [\"python\"]\n        # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]\n        # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support\n\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@v4\n        with:\n          # We must fetch at least the immediate parents so that if this is\n          # a pull request then we can checkout the head.\n          fetch-depth: 2\n\n      # Initializes the CodeQL tools for scanning.\n      - name: Initialize CodeQL\n        uses: github/codeql-action/init@v3\n        with:\n          languages: ${{ matrix.language }}\n          config-file: ./.github/codeql/codeql-config.yml\n          # If you wish to specify custom queries, you can do so here or in a config file.\n          # By default, queries listed here will override any specified in a config file.\n          # Prefix the list here with \"+\" to use these queries and those in the config file.\n\n          # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs\n          # queries: security-extended,security-and-quality\n\n      # Autobuild attempts to build any compiled languages  (C/C++, C#, or Java).\n      # If this step fails, then you should remove it and run the build manually (see below)\n      - name: Autobuild\n        uses: github/codeql-action/autobuild@v3\n\n      # ℹ️ Command-line programs to run using the OS shell.\n      # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun\n\n      #   If the Autobuild fails above, remove it and uncomment the following three lines.\n      #   modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.\n\n      # - run: |\n      #   echo \"Run, Build Application using script\"\n      #   ./location_of_script_within_repo/buildscript.sh\n\n      - name: Perform CodeQL Analysis\n        uses: github/codeql-action/analyze@v3\n"
  },
  {
    "path": ".gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nenv/\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\n*.egg-info/\n.installed.cfg\n*.egg\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*,cover\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n.DS_Store\n.ipynb_checkpoints/\n\n# PyCharm\n.idea/\n*.iml\n\n# Build-related\n.image-*\n\n# Jekyll\n_site/\n.sass-cache/\n\n# Debug-related\n.kube/\n\n# vscode ide stuff\n*.code-workspace\n.history/\n.vscode/\n\n# jetbrains ide stuff\n*.iml\n.idea/\n"
  },
  {
    "path": ".pre-commit-config.yaml",
    "content": "ci:\n  autoupdate_schedule: monthly\n\nrepos:\n  - repo: https://github.com/pre-commit/pre-commit-hooks\n    rev: v4.5.0\n    hooks:\n      - id: check-case-conflict\n      - id: check-ast\n      - id: check-docstring-first\n      - id: check-executables-have-shebangs\n      - id: check-added-large-files\n      - id: check-case-conflict\n      - id: check-merge-conflict\n      - id: check-json\n      - id: check-toml\n      - id: check-yaml\n        exclude: etc/kubernetes/.*.yaml\n      - id: end-of-file-fixer\n      - id: trailing-whitespace\n\n  - repo: https://github.com/python-jsonschema/check-jsonschema\n    rev: 0.27.4\n    hooks:\n      - id: check-github-workflows\n\n  - repo: https://github.com/executablebooks/mdformat\n    rev: 0.7.17\n    hooks:\n      - id: mdformat\n        additional_dependencies:\n          [mdformat-gfm, mdformat-frontmatter, mdformat-footnote]\n\n  - repo: https://github.com/psf/black\n    rev: 24.2.0\n    hooks:\n      - id: black\n\n  - repo: https://github.com/charliermarsh/ruff-pre-commit\n    rev: v0.3.0\n    hooks:\n      - id: ruff\n        args: [\"--fix\"]\n"
  },
  {
    "path": ".readthedocs.yaml",
    "content": "version: 2\nbuild:\n  os: \"ubuntu-22.04\"\n  tools:\n    python: \"mambaforge-22.9\"\nsphinx:\n  configuration: docs/source/conf.py\nconda:\n  environment: docs/environment.yml\n"
  },
  {
    "path": "LICENSE.md",
    "content": "# Licensing terms\n\nThis project is licensed under the terms of the Modified BSD License\n(also known as New or Revised or 3-Clause BSD), as follows:\n\n- Copyright (c) 2001-2015, IPython Development Team\n- Copyright (c) 2015-, Jupyter Development Team\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\nRedistributions of source code must retain the above copyright notice, this\nlist of conditions and the following disclaimer.\n\nRedistributions in binary form must reproduce the above copyright notice, this\nlist of conditions and the following disclaimer in the documentation and/or\nother materials provided with the distribution.\n\nNeither the name of the Jupyter Development Team nor the names of its\ncontributors may be used to endorse or promote products derived from this\nsoftware without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n## About the Jupyter Development Team\n\nThe Jupyter Development Team is the set of all contributors to the Jupyter project.\nThis includes all of the Jupyter Subprojects, which are the different repositories\nunder the [jupyter](https://github.com/jupyter/) GitHub organization.\n\nThe core team that coordinates development on GitHub can be found here:\nhttps://github.com/jupyter/.\n\n## Our copyright policy\n\nJupyter uses a shared copyright model. Each contributor maintains copyright\nover their contributions to Jupyter. But, it is important to note that these\ncontributions are typically only changes to the repositories. Thus, the Jupyter\nsource code, in its entirety is not the copyright of any single person or\ninstitution. Instead, it is the collective copyright of the entire Jupyter\nDevelopment Team. If individual contributors want to maintain a record of what\nchanges/contributions they have specific copyright on, they should indicate\ntheir copyright in the commit message of the change, when they commit the\nchange to one of the Jupyter repositories.\n\nWith this in mind, the following banner should be used in any source code file\nto indicate the copyright and license terms:\n\n```\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n```\n"
  },
  {
    "path": "Makefile",
    "content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n.PHONY: help clean clean-env dev dev-http docs install bdist sdist test release check_dists \\\n    clean-images clean-enterprise-gateway clean-demo-base clean-kernel-images clean-enterprise-gateway \\\n    clean-kernel-py clean-kernel-spark-py clean-kernel-r clean-kernel-spark-r clean-kernel-scala clean-kernel-tf-py \\\n    clean-kernel-tf-gpu-py clean-kernel-image-puller push-images push-enterprise-gateway-demo push-demo-base \\\n    push-kernel-images push-enterprise-gateway push-kernel-py push-kernel-spark-py push-kernel-r push-kernel-spark-r \\\n    push-kernel-scala push-kernel-tf-py push-kernel-tf-gpu-py push-kernel-image-puller publish helm-chart\n\nSA?=source activate\nENV:=enterprise-gateway-dev\nSHELL:=/bin/bash\nMULTIARCH_BUILD?=\nTARGET_ARCH?=undefined\n\nVERSION?=3.3.0.dev0\nSPARK_VERSION?=3.2.1\n\nifeq (dev, $(findstring dev, $(VERSION)))\n    TAG:=dev\nelse\n    TAG:=$(VERSION)\nendif\n\n\nWHEEL_FILES:=$(shell find . -type f ! -path \"./build/*\" ! -path \"./etc/*\" ! -path \"./docs/*\" ! -path \"./.git/*\" ! -path \"./.idea/*\" ! -path \"./dist/*\" ! -path \"./.image-*\" ! -path \"*/__pycache__/*\" )\nWHEEL_FILE:=dist/jupyter_enterprise_gateway-$(VERSION)-py3-none-any.whl\nSDIST_FILE:=dist/jupyter_enterprise_gateway-$(VERSION).tar.gz\nDIST_FILES=$(WHEEL_FILE) $(SDIST_FILE)\n\nHELM_DESIRED_VERSION:=v3.18.3  # Pin the version of helm to use (v3.18.3 is latest as of 6/21/25)\nHELM_CHART_VERSION:=$(shell grep version: etc/kubernetes/helm/enterprise-gateway/Chart.yaml | sed 's/version: //')\nHELM_CHART_PACKAGE:=dist/enterprise-gateway-$(HELM_CHART_VERSION).tgz\nHELM_CHART:=dist/jupyter_enterprise_gateway_helm-$(VERSION).tar.gz\nHELM_CHART_DIR:=etc/kubernetes/helm/enterprise-gateway\nHELM_CHART_FILES:=$(shell find $(HELM_CHART_DIR) -type f ! -name .DS_Store)\nHELM_INSTALL_DIR?=/usr/local/bin\n\nhelp:\n# http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html\n\t@grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = \":.*?## \"}; {printf \"\\033[36m%-30s\\033[0m %s\\n\", $$1, $$2}'\n\nenv: ## Make a dev environment\n\t-conda env create --file requirements.yml --name $(ENV)\n\t-conda env config vars set PYTHONPATH=$(PWD) --name $(ENV)\n\nactivate: ## Print instructions to activate the virtualenv (default: enterprise-gateway-dev)\n\t@echo \"Run \\`$(SA) $(ENV)\\` to activate the environment.\"\n\nclean: ## Make a clean source tree\n\t-rm -rf dist\n\t-rm -rf build\n\t-rm -rf *.egg-info\n\t-find . -name target -type d -exec rm -fr {} +\n\t-find . -name __pycache__  -type d -exec rm -fr {} +\n\t-find enterprise_gateway -name '*.pyc' -exec rm -fr {} +\n\t-find website -name '.sass-cache' -type d -exec rm -fr {} +\n\t-find website -name '_site' -type d -exec rm -fr {} +\n\t-find website -name 'build' -type d -exec rm -fr {} +\n\t-make -C docs clean\n\t-make -C etc clean\n\nclean-env: ## Remove conda env\n\t-conda env remove -n $(ENV) -y\n\nlint: ## Check code style\n\t@pip install -q -e \".[lint]\"\n\t@pip install -q pipx\n\truff check .\n\tblack --check --diff --color .\n\tmdformat --check *.md\n\tpipx run 'validate-pyproject[all]' pyproject.toml\n\tpipx run interrogate -v .\n\nrun-dev: test-install-wheel ## Make a server in jupyter_websocket mode\n\tpython enterprise_gateway\n\ndocs: ## Make HTML documentation\n\tmake -C docs requirements html SPHINXOPTS=\"-W\"\n\nkernelspecs:  kernelspecs_all kernelspecs_yarn kernelspecs_conductor kernelspecs_kubernetes kernelspecs_docker kernel_image_files ## Create archives with sample kernelspecs\nkernelspecs_all kernelspecs_yarn kernelspecs_conductor kernelspecs_kubernetes kernelspecs_docker kernel_image_files:\n\tmake VERSION=$(VERSION) TAG=$(TAG) SPARK_VERSION=$(SPARK_VERSION) -C  etc $@\n\ntest-install: dist test-install-wheel test-install-tar ## Install and minimally run EG with the wheel and tar distributions\n\ntest-install-wheel:\n\tpip uninstall -y jupyter_enterprise_gateway\n\tpip install dist/jupyter_enterprise_gateway-*.whl && \\\n\t\tjupyter enterprisegateway --help\n\ntest-install-tar:\n\tpip uninstall -y jupyter_enterprise_gateway\n\tpip install dist/jupyter_enterprise_gateway-*.tar.gz && \\\n\t\tjupyter enterprisegateway --help\n\nbdist: $(WHEEL_FILE)\n\n$(WHEEL_FILE): $(WHEEL_FILES)\n\tpip install build && python -m build --wheel . \\\n\t\t&& rm -rf *.egg-info && chmod 0755 dist/*.*\n\nsdist: $(SDIST_FILE)\n\n$(SDIST_FILE): $(WHEEL_FILES)\n\tpip install build && python -m build --sdist . \\\n\t\t&& rm -rf *.egg-info && chmod 0755 dist/*.*\n\nhelm-chart: helm-install $(HELM_CHART) ## Make helm chart distribution\n\nhelm-install: $(HELM_INSTALL_DIR)/helm\n\n$(HELM_INSTALL_DIR)/helm: # Download and install helm\n\tcurl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 -o /tmp/get_helm.sh \\\n\t&& chmod +x /tmp/get_helm.sh \\\n\t&& DESIRED_VERSION=$(HELM_DESIRED_VERSION) /tmp/get_helm.sh \\\n\t&& rm -f /tmp/get_helm.sh\n\nhelm-lint: helm-clean\n\thelm lint $(HELM_CHART_DIR)\n\nhelm-clean: # Remove any .DS_Store files that might wind up in the package\n\t$(shell find etc/kubernetes/helm -type f -name '.DS_Store' -exec rm -f {} \\;)\n\n$(HELM_CHART): $(HELM_CHART_FILES)\n\tmake helm-lint\n\thelm package $(HELM_CHART_DIR) -d dist\n\tmv $(HELM_CHART_PACKAGE) $(HELM_CHART)  # Rename output to match other assets\n\ndist: lint bdist sdist kernelspecs helm-chart ## Make source, binary, kernelspecs and helm chart distributions to dist folder\n\nTEST_DEBUG_OPTS:=\n\ntest-debug:\n\tmake TEST_DEBUG_OPTS=\"--nocapture --nologcapture --logging-level=10\" test\n\ntest: TEST?=\ntest: ## Run unit tests\nifeq ($(TEST),)\n\tpytest -vv $(TEST_DEBUG_OPTS)\nelse\n# e.g., make test TEST=\"test_gatewayapp.py::TestGatewayAppConfig\"\n\tpytest -vv $(TEST_DEBUG_OPTS) enterprise_gateway/tests/$(TEST)\nendif\n\nrelease: dist check_dists ## Make a wheel + source release on PyPI\n\ttwine upload $(DIST_FILES)\n\ncheck_dists:\n\tpip install twine && twine check --strict $(DIST_FILES)\n\n# Here for doc purposes\ndocker-images:  ## Build docker images (includes kernel-based images)\nkernel-images: ## Build kernel-based docker images\n\n# Actual working targets...\ndocker-images: demo-base enterprise-gateway-demo kernel-images enterprise-gateway kernel-py kernel-spark-py kernel-r kernel-spark-r kernel-scala kernel-tf-py kernel-tf-gpu-py kernel-image-puller\n\nenterprise-gateway-demo kernel-images enterprise-gateway kernel-py kernel-spark-py kernel-r kernel-spark-r kernel-scala kernel-tf-py kernel-tf-gpu-py kernel-image-puller:\n\tmake WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) NO_CACHE=$(NO_CACHE) TAG=$(TAG) SPARK_VERSION=$(SPARK_VERSION) MULTIARCH_BUILD=$(MULTIARCH_BUILD) TARGET_ARCH=$(TARGET_ARCH) -C etc $@\n\ndemo-base:\n\tmake WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) NO_CACHE=$(NO_CACHE) TAG=$(SPARK_VERSION) SPARK_VERSION=$(SPARK_VERSION) MULTIARCH_BUILD=$(MULTIARCH_BUILD) TARGET_ARCH=$(TARGET_ARCH) -C etc $@\n\n# Here for doc purposes\nclean-images: clean-demo-base ## Remove docker images (includes kernel-based images)\nclean-kernel-images: ## Remove kernel-based images\n\nclean-images clean-enterprise-gateway-demo clean-kernel-images clean-enterprise-gateway clean-kernel-py clean-kernel-spark-py clean-kernel-r clean-kernel-spark-r clean-kernel-scala clean-kernel-tf-py clean-kernel-tf-gpu-py clean-kernel-image-puller:\n\tmake WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) TAG=$(TAG) -C etc $@\n\nclean-demo-base:\n\tmake WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) TAG=$(SPARK_VERSION) -C etc $@\n\npush-images: push-demo-base\npush-images push-enterprise-gateway-demo push-kernel-images push-enterprise-gateway push-kernel-py push-kernel-spark-py push-kernel-r push-kernel-spark-r push-kernel-scala push-kernel-tf-py push-kernel-tf-gpu-py push-kernel-image-puller:\n\tmake WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) TAG=$(TAG) -C etc $@\n\npush-demo-base:\n\tmake WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) TAG=$(SPARK_VERSION) -C etc $@\n\npublish: NO_CACHE=--no-cache\npublish: clean clean-images dist docker-images push-images\n\n# itest should have these targets up to date: bdist kernelspecs docker-enterprise-gateway\n\nitest: itest-docker itest-yarn\n\n# itest configurable settings\n# indicates two things:\n# this prefix is used by itest to determine hostname to test against, in addtion,\n# if itests will be run locally with docker-prep target, this will set the hostname within that container as well\nITEST_HOSTNAME_PREFIX?=itest\n\n# indicates the user to emulate.  This equates to 'KERNEL_USERNAME'...\nITEST_USER?=bob\n# indicates the other set of options to use.  At this time, only the python notebooks succeed, so we're skipping R and Scala.\nITEST_OPTIONS?=\n\n# here's an example of the options (besides host and user) with their expected values ...\n# ITEST_OPTIONS=--impersonation < True | False >\n\nITEST_YARN_PORT?=8888\nITEST_YARN_HOST?=localhost:$(ITEST_YARN_PORT)\nITEST_YARN_TESTS?=enterprise_gateway/itests\n\nITEST_KERNEL_LAUNCH_TIMEOUT=120\n\nLOG_LEVEL=INFO\n\nitest-yarn-debug: ## Run integration tests (optionally) against docker demo (YARN) container with print statements\n\tmake LOG_LEVEL=DEBUG TEST_DEBUG_OPTS=\"--log-level=10\" itest-yarn\n\nPREP_ITEST_YARN?=1\nitest-yarn: ## Run integration tests (optionally) against docker demo (YARN) container\nifeq (1, $(PREP_ITEST_YARN))\n\tmake itest-yarn-prep\nendif\n\t(GATEWAY_HOST=$(ITEST_YARN_HOST) LOG_LEVEL=$(LOG_LEVEL) KERNEL_USERNAME=$(ITEST_USER) KERNEL_LAUNCH_TIMEOUT=$(ITEST_KERNEL_LAUNCH_TIMEOUT) SPARK_VERSION=$(SPARK_VERSION) ITEST_HOSTNAME_PREFIX=$(ITEST_HOSTNAME_PREFIX) pytest -vv $(TEST_DEBUG_OPTS) $(ITEST_YARN_TESTS))\n\t@echo \"Run \\`docker logs itest-yarn\\` to see enterprise-gateway log.\"\n\nPREP_TIMEOUT?=60\nitest-yarn-prep:\n\t@-docker rm -f itest-yarn >> /dev/null\n\t@echo \"Starting enterprise-gateway container (run \\`docker logs itest-yarn\\` to see container log)...\"\n\t@-docker run -itd -p $(ITEST_YARN_PORT):$(ITEST_YARN_PORT) -p 8088:8088 -p 8042:8042 -h itest-yarn --name itest-yarn -v `pwd`/enterprise_gateway/itests:/tmp/byok elyra/enterprise-gateway-demo:$(TAG) --gateway\n\t@(r=\"1\"; attempts=0; while [ \"$$r\" == \"1\" -a $$attempts -lt $(PREP_TIMEOUT) ]; do echo \"Waiting for enterprise-gateway to start...\"; sleep 2; ((attempts++)); docker logs itest-yarn |grep --regexp \"Jupyter Enterprise Gateway .* is available at http\"; r=$$?; done; if [ $$attempts -ge $(PREP_TIMEOUT) ]; then echo \"Wait for startup timed out!\"; exit 1; fi;)\n\n\n# This should get cleaned up once docker support is more mature\nITEST_DOCKER_PORT?=8889\nITEST_DOCKER_HOST?=localhost:$(ITEST_DOCKER_PORT)\nITEST_DOCKER_TESTS?=enterprise_gateway/itests/test_r_kernel.py::TestRKernelLocal enterprise_gateway/itests/test_python_kernel.py::TestPythonKernelLocal enterprise_gateway/itests/test_scala_kernel.py::TestScalaKernelLocal\nITEST_DOCKER_KERNELS=PYTHON_KERNEL_LOCAL_NAME=python_docker SCALA_KERNEL_LOCAL_NAME=scala_docker R_KERNEL_LOCAL_NAME=R_docker\n\nitest-docker-debug: ## Run integration tests (optionally) against docker container with print statements\n\tmake LOG_LEVEL=DEBUG TEST_DEBUG_OPTS=\"--nocapture --nologcapture --logging-level=10\" itest-docker\n\nPREP_ITEST_DOCKER?=1\nitest-docker: ## Run integration tests (optionally) against docker swarm\nifeq (1, $(PREP_ITEST_DOCKER))\n\tmake itest-docker-prep\nendif\n\t(GATEWAY_HOST=$(ITEST_DOCKER_HOST) LOG_LEVEL=$(LOG_LEVEL) KERNEL_USERNAME=$(ITEST_USER) KERNEL_LAUNCH_TIMEOUT=$(ITEST_KERNEL_LAUNCH_TIMEOUT) $(ITEST_DOCKER_KERNELS) ITEST_HOSTNAME_PREFIX=$(ITEST_USER) pytest -vv $(TEST_DEBUG_OPTS) $(ITEST_DOCKER_TESTS))\n\t@echo \"Run \\`docker service logs itest-docker\\` to see enterprise-gateway log.\"\n\nPREP_TIMEOUT?=180\nitest-docker-prep:\n\t@-docker service rm enterprise-gateway_enterprise-gateway enterprise-gateway_enterprise-gateway-proxy\n\t@-docker swarm leave --force\n\t# Check if swarm mode is active, if not attempt to create the swarm\n\t@(docker info | grep -q 'Swarm: active'; if [ $$? -eq 1 ]; then docker swarm init; fi;)\n\t@echo \"Starting enterprise-gateway swarm service (run \\`docker service logs enterprise-gateway_enterprise-gateway\\` to see service log)...\"\n\t@KG_PORT=${ITEST_DOCKER_PORT} EG_DOCKER_NETWORK=enterprise-gateway docker stack deploy -c etc/docker/docker-compose.yml enterprise-gateway\n\t@(r=\"1\"; attempts=0; while [ \"$$r\" == \"1\" -a $$attempts -lt $(PREP_TIMEOUT) ]; do echo \"Waiting for enterprise-gateway to start...\"; sleep 2; ((attempts++)); docker service logs enterprise-gateway_enterprise-gateway 2>&1 |grep --regexp \"Jupyter Enterprise Gateway .* is available at http\"; r=$$?; done; if [ $$attempts -ge $(PREP_TIMEOUT) ]; then echo \"Wait for startup timed out!\"; exit 1; fi;)\n"
  },
  {
    "path": "README.md",
    "content": "**[Website](https://jupyter-enterprise-gateway.readthedocs.io/)** |\n**[Technical Overview](#technical-overview)** |\n**[Installation](#installation)** |\n**[System Architecture](#system-architecture)** |\n**[Contributing](#contributing)**\n\n# Jupyter Enterprise Gateway\n\n[![Actions Status](https://github.com/jupyter-server/enterprise_gateway/workflows/Builds/badge.svg)](https://github.com/jupyter-server/enterprise_gateway/actions)\n[![PyPI version](https://badge.fury.io/py/jupyter-enterprise-gateway.svg)](https://badge.fury.io/py/jupyter-enterprise-gateway)\n[![Downloads](https://pepy.tech/badge/jupyter-enterprise-gateway/month)](https://pepy.tech/project/jupyter-enterprise-gateway)\n[![Documentation Status](https://readthedocs.org/projects/jupyter-enterprise-gateway/badge/?version=latest)](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/?badge=latest)\n[![Google Group](https://img.shields.io/badge/google-group-blue.svg)](https://groups.google.com/forum/#!forum/jupyter)\n\nJupyter Enterprise Gateway enables Jupyter Notebook to launch remote kernels in a distributed cluster,\nincluding Apache Spark managed by YARN, IBM Spectrum Conductor, Kubernetes or Docker Swarm.\n\nIt provides out of the box support for the following kernels:\n\n- Python using IPython kernel\n- R using IRkernel\n- Scala using Apache Toree kernel\n\nFull Documentation for Jupyter Enterprise Gateway can be found [here](https://jupyter-enterprise-gateway.readthedocs.io/en/latest)\n\nJupyter Enterprise Gateway does not manage multiple Jupyter Notebook deployments, for that\nyou should use [JupyterHub](https://github.com/jupyterhub/jupyterhub).\n\n## Technical Overview\n\nJupyter Enterprise Gateway is a web server that provides headless access to Jupyter kernels within\nan enterprise. Inspired by Jupyter Kernel Gateway, Jupyter Enterprise Gateway provides feature parity with Kernel Gateway's [jupyter-websocket mode](https://jupyter-kernel-gateway.readthedocs.io/en/latest/websocket-mode.html) in addition to the following:\n\n- Adds support for remote kernels hosted throughout the enterprise where kernels can be launched in\n  the following ways:\n  - Local to the Enterprise Gateway server (today's Kernel Gateway behavior)\n  - On specific nodes of the cluster utilizing a round-robin algorithm\n  - On nodes identified by an associated resource manager\n- Provides support for Apache Spark managed by YARN, IBM Spectrum Conductor, Kubernetes or Docker Swarm out of the box. Others can be configured via Enterprise Gateway's extensible framework.\n- Secure communication from the client, through the Enterprise Gateway server, to the kernels\n- Multi-tenant capabilities\n- Persistent kernel sessions\n- Ability to associate profiles consisting of configuration settings to a kernel for a given user (see [Project Roadmap](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/roadmap.html))\n\n![Deployment Diagram](https://github.com/jupyter-server/enterprise_gateway/blob/main/docs/source/images/deployment.png?raw=true)\n\n## Installation\n\nDetailed installation instructions are located in the\n[Users Guide](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/users/index.html)\nof the project docs. Here's a quick start using `pip`:\n\n```bash\n# install from pypi\npip install --upgrade jupyter_enterprise_gateway\n\n# show all config options\njupyter enterprisegateway --help-all\n\n# run it with default options\njupyter enterprisegateway\n```\n\nPlease check the [configuration options within the Operators Guide](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/operators/index.html#configuring-enterprise-gateway)\nfor information about the supported options.\n\n## System Architecture\n\nThe [System Architecture page](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/system-architecture.html)\nincludes information about Enterprise Gateway's remote kernel, process proxy, and launcher frameworks.\n\n## Contributing\n\nThe [Contribution page](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/contrib.html) includes\ninformation about how to contribute to Enterprise Gateway along with our roadmap. While there, you'll want to\n[set up a development environment](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/devinstall.html) and check out typical developer tasks.\n"
  },
  {
    "path": "codecov.yml",
    "content": "codecov:\n  notify:\n    require_ci_to_pass: yes\n\ncoverage:\n  precision: 2\n  round: down\n  range: \"70...100\"\n\n  status:\n    project: no\n    patch: no\n    changes: no\n\nparsers:\n  gcov:\n    branch_detection:\n      conditional: yes\n      loop: yes\n      method: no\n      macro: no\n\ncomment: off\n"
  },
  {
    "path": "conftest.py",
    "content": "def pytest_addoption(parser):\n    parser.addoption(\"--host\", action=\"store\", default=\"localhost:8888\")\n    parser.addoption(\"--username\", action=\"store\", default=\"elyra\")\n    parser.addoption(\"--impersonation\", action=\"store\", default=\"false\")\n\n\ndef pytest_generate_tests(metafunc):\n    # This is called for every test. Only get/set command line arguments\n    # if the argument is specified in the list of test \"fixturenames\".\n    if \"host\" in metafunc.fixturenames:\n        metafunc.parametrize(\"host\", [metafunc.config.option.host])\n    if \"username\" in metafunc.fixturenames:\n        metafunc.parametrize(\"username\", [metafunc.config.option.username])\n    if \"impersonation\" in metafunc.fixturenames:\n        metafunc.parametrize(\"impersonation\", [metafunc.config.option.impersonation])\n"
  },
  {
    "path": "docs/Makefile",
    "content": "# Makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS    = -n\nSPHINXBUILD   = sphinx-build\nPAPER         =\nBUILDDIR      = build\n\n# Internal variables.\nPAPEROPT_a4     = -D latex_paper_size=a4\nPAPEROPT_letter = -D latex_paper_size=letter\nALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source\n# the i18n builder cannot share the environment and doctrees with the others\nI18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source\n\nDOC_REQUIREMENTS = doc-requirements.txt\n\n.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext requirements\n\nhelp:\n\t@echo \"Please use \\`make <target>' where <target> is one of\"\n\t@echo \"  requirements to install required packages\"\n\t@echo \"  html         to make standalone HTML files\"\n\t@echo \"  dirhtml      to make HTML files named index.html in directories\"\n\t@echo \"  singlehtml   to make a single large HTML file\"\n\t@echo \"  pickle       to make pickle files\"\n\t@echo \"  json         to make JSON files\"\n\t@echo \"  htmlhelp     to make HTML files and a HTML help project\"\n\t@echo \"  qthelp       to make HTML files and a qthelp project\"\n\t@echo \"  applehelp    to make an Apple Help Book\"\n\t@echo \"  devhelp      to make HTML files and a Devhelp project\"\n\t@echo \"  epub         to make an epub\"\n\t@echo \"  latex        to make LaTeX files, you can set PAPER=a4 or PAPER=letter\"\n\t@echo \"  latexpdf     to make LaTeX files and run them through pdflatex\"\n\t@echo \"  latexpdfja   to make LaTeX files and run them through platex/dvipdfmx\"\n\t@echo \"  text         to make text files\"\n\t@echo \"  man          to make manual pages\"\n\t@echo \"  texinfo      to make Texinfo files\"\n\t@echo \"  info         to make Texinfo files and run them through makeinfo\"\n\t@echo \"  gettext      to make PO message catalogs\"\n\t@echo \"  changes      to make an overview of all changed/added/deprecated items\"\n\t@echo \"  xml          to make Docutils-native XML files\"\n\t@echo \"  pseudoxml    to make pseudoxml-XML files for display purposes\"\n\t@echo \"  linkcheck    to check all external links for integrity\"\n\t@echo \"  doctest      to run all doctests embedded in the documentation (if enabled)\"\n\t@echo \"  coverage     to run coverage check of the documentation (if enabled)\"\n\nclean:\n\trm -rf $(BUILDDIR)/*\n\nrequirements:\n\tpip install -q -r $(DOC_REQUIREMENTS)\n\nhtml:\n\t$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/html.\"\n\ndirhtml:\n\t$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/dirhtml.\"\n\nsinglehtml:\n\t$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml\n\t@echo\n\t@echo \"Build finished. The HTML page is in $(BUILDDIR)/singlehtml.\"\n\npickle:\n\t$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle\n\t@echo\n\t@echo \"Build finished; now you can process the pickle files.\"\n\njson:\n\t$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json\n\t@echo\n\t@echo \"Build finished; now you can process the JSON files.\"\n\nhtmlhelp:\n\t$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp\n\t@echo\n\t@echo \"Build finished; now you can run HTML Help Workshop with the\" \\\n\t      \".hhp project file in $(BUILDDIR)/htmlhelp.\"\n\nqthelp:\n\t$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp\n\t@echo\n\t@echo \"Build finished; now you can run \"qcollectiongenerator\" with the\" \\\n\t      \".qhcp project file in $(BUILDDIR)/qthelp, like this:\"\n\t@echo \"# qcollectiongenerator $(BUILDDIR)/qthelp/JupyterHub.qhcp\"\n\t@echo \"To view the help file:\"\n\t@echo \"# assistant -collectionFile $(BUILDDIR)/qthelp/JupyterHub.qhc\"\n\napplehelp:\n\t$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp\n\t@echo\n\t@echo \"Build finished. The help book is in $(BUILDDIR)/applehelp.\"\n\t@echo \"N.B. You won't be able to view it unless you put it in\" \\\n\t      \"~/Library/Documentation/Help or install it in your application\" \\\n\t      \"bundle.\"\n\ndevhelp:\n\t$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp\n\t@echo\n\t@echo \"Build finished.\"\n\t@echo \"To view the help file:\"\n\t@echo \"# mkdir -p $$HOME/.local/share/devhelp/JupyterHub\"\n\t@echo \"# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/JupyterHub\"\n\t@echo \"# devhelp\"\n\nepub:\n\t$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub\n\t@echo\n\t@echo \"Build finished. The epub file is in $(BUILDDIR)/epub.\"\n\nlatex:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo\n\t@echo \"Build finished; the LaTeX files are in $(BUILDDIR)/latex.\"\n\t@echo \"Run \\`make' in that directory to run these through (pdf)latex\" \\\n\t      \"(use \\`make latexpdf' here to do that automatically).\"\n\nlatexpdf:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through pdflatex...\"\n\t$(MAKE) -C $(BUILDDIR)/latex all-pdf\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\nlatexpdfja:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through platex and dvipdfmx...\"\n\t$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\ntext:\n\t$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text\n\t@echo\n\t@echo \"Build finished. The text files are in $(BUILDDIR)/text.\"\n\nman:\n\t$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man\n\t@echo\n\t@echo \"Build finished. The manual pages are in $(BUILDDIR)/man.\"\n\ntexinfo:\n\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo\n\t@echo\n\t@echo \"Build finished. The Texinfo files are in $(BUILDDIR)/texinfo.\"\n\t@echo \"Run \\`make' in that directory to run these through makeinfo\" \\\n\t      \"(use \\`make info' here to do that automatically).\"\n\ninfo:\n\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo\n\t@echo \"Running Texinfo files through makeinfo...\"\n\tmake -C $(BUILDDIR)/texinfo info\n\t@echo \"makeinfo finished; the Info files are in $(BUILDDIR)/texinfo.\"\n\ngettext:\n\t$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale\n\t@echo\n\t@echo \"Build finished. The message catalogs are in $(BUILDDIR)/locale.\"\n\nchanges:\n\t$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes\n\t@echo\n\t@echo \"The overview file is in $(BUILDDIR)/changes.\"\n\nlinkcheck:\n\t$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck\n\t@echo\n\t@echo \"Link check complete; look for any errors in the above output \" \\\n\t      \"or in $(BUILDDIR)/linkcheck/output.txt.\"\n\ndoctest:\n\t$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest\n\t@echo \"Testing of doctests in the sources finished, look at the \" \\\n\t      \"results in $(BUILDDIR)/doctest/output.txt.\"\n\ncoverage:\n\t$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage\n\t@echo \"Testing of coverage in the sources finished, look at the \" \\\n\t      \"results in $(BUILDDIR)/coverage/python.txt.\"\n\nxml:\n\t$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml\n\t@echo\n\t@echo \"Build finished. The XML files are in $(BUILDDIR)/xml.\"\n\npseudoxml:\n\t$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml\n\t@echo\n\t@echo \"Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml.\"\n"
  },
  {
    "path": "docs/doc-requirements.txt",
    "content": "# https://github.com/miyakogi/m2r/issues/66\nmistune<4\nmyst-parser\npydata_sphinx_theme\nsphinx\nsphinx-markdown-tables\nsphinx_book_theme\nsphinxcontrib-mermaid\nsphinxcontrib-openapi\nsphinxcontrib_github_alt\nsphinxcontrib_spelling\nsphinxemoji\ntornado\n"
  },
  {
    "path": "docs/environment.yml",
    "content": "name: enterprise_gateway_docs\nchannels:\n  - conda-forge\n  - defaults\n  - free\ndependencies:\n  - pip\n  - python=3.11\n  - pip:\n      - -r doc-requirements.txt\n"
  },
  {
    "path": "docs/make.bat",
    "content": "@ECHO OFF\r\n\r\nREM Command file for Sphinx documentation\r\n\r\nif \"%SPHINXBUILD%\" == \"\" (\r\n\tset SPHINXBUILD=sphinx-build\r\n)\r\nset BUILDDIR=build\r\nset ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source\r\nset I18NSPHINXOPTS=%SPHINXOPTS% source\r\nif NOT \"%PAPER%\" == \"\" (\r\n\tset ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%\r\n\tset I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%\r\n)\r\n\r\nif \"%1\" == \"\" goto help\r\n\r\nif \"%1\" == \"help\" (\r\n\t:help\r\n\techo.Please use `make ^<target^>` where ^<target^> is one of\r\n\techo.  html       to make standalone HTML files\r\n\techo.  dirhtml    to make HTML files named index.html in directories\r\n\techo.  singlehtml to make a single large HTML file\r\n\techo.  pickle     to make pickle files\r\n\techo.  json       to make JSON files\r\n\techo.  htmlhelp   to make HTML files and a HTML help project\r\n\techo.  qthelp     to make HTML files and a qthelp project\r\n\techo.  devhelp    to make HTML files and a Devhelp project\r\n\techo.  epub       to make an epub\r\n\techo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter\r\n\techo.  text       to make text files\r\n\techo.  man        to make manual pages\r\n\techo.  texinfo    to make Texinfo files\r\n\techo.  gettext    to make PO message catalogs\r\n\techo.  changes    to make an overview over all changed/added/deprecated items\r\n\techo.  xml        to make Docutils-native XML files\r\n\techo.  pseudoxml  to make pseudoxml-XML files for display purposes\r\n\techo.  linkcheck  to check all external links for integrity\r\n\techo.  doctest    to run all doctests embedded in the documentation if enabled\r\n\techo.  coverage   to run coverage check of the documentation if enabled\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"clean\" (\r\n\tfor /d %%i in (%BUILDDIR%\\*) do rmdir /q /s %%i\r\n\tdel /q /s %BUILDDIR%\\*\r\n\tgoto end\r\n)\r\n\r\n\r\nREM Check if sphinx-build is available and fallback to Python version if any\r\n%SPHINXBUILD% 1>NUL 2>NUL\r\nif errorlevel 9009 goto sphinx_python\r\ngoto sphinx_ok\r\n\r\n:sphinx_python\r\n\r\nset SPHINXBUILD=python -m sphinx.__init__\r\n%SPHINXBUILD% 2> nul\r\nif errorlevel 9009 (\r\n\techo.\r\n\techo.The 'sphinx-build' command was not found. Make sure you have Sphinx\r\n\techo.installed, then set the SPHINXBUILD environment variable to point\r\n\techo.to the full path of the 'sphinx-build' executable. Alternatively you\r\n\techo.may add the Sphinx directory to PATH.\r\n\techo.\r\n\techo.If you don't have Sphinx installed, grab it from\r\n\techo.https://sphinx-doc.org/\r\n\texit /b 1\r\n)\r\n\r\n:sphinx_ok\r\n\r\n\r\nif \"%1\" == \"html\" (\r\n\t%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The HTML pages are in %BUILDDIR%/html.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"dirhtml\" (\r\n\t%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"singlehtml\" (\r\n\t%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"pickle\" (\r\n\t%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished; now you can process the pickle files.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"json\" (\r\n\t%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished; now you can process the JSON files.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"htmlhelp\" (\r\n\t%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished; now you can run HTML Help Workshop with the ^\r\n.hhp project file in %BUILDDIR%/htmlhelp.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"qthelp\" (\r\n\t%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished; now you can run \"qcollectiongenerator\" with the ^\r\n.qhcp project file in %BUILDDIR%/qthelp, like this:\r\n\techo.^> qcollectiongenerator %BUILDDIR%\\qthelp\\JupyterHub.qhcp\r\n\techo.To view the help file:\r\n\techo.^> assistant -collectionFile %BUILDDIR%\\qthelp\\JupyterHub.ghc\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"devhelp\" (\r\n\t%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"epub\" (\r\n\t%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The epub file is in %BUILDDIR%/epub.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"latex\" (\r\n\t%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished; the LaTeX files are in %BUILDDIR%/latex.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"latexpdf\" (\r\n\t%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex\r\n\tcd %BUILDDIR%/latex\r\n\tmake all-pdf\r\n\tcd %~dp0\r\n\techo.\r\n\techo.Build finished; the PDF files are in %BUILDDIR%/latex.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"latexpdfja\" (\r\n\t%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex\r\n\tcd %BUILDDIR%/latex\r\n\tmake all-pdf-ja\r\n\tcd %~dp0\r\n\techo.\r\n\techo.Build finished; the PDF files are in %BUILDDIR%/latex.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"text\" (\r\n\t%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The text files are in %BUILDDIR%/text.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"man\" (\r\n\t%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The manual pages are in %BUILDDIR%/man.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"texinfo\" (\r\n\t%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"gettext\" (\r\n\t%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The message catalogs are in %BUILDDIR%/locale.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"changes\" (\r\n\t%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.The overview file is in %BUILDDIR%/changes.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"linkcheck\" (\r\n\t%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Link check complete; look for any errors in the above output ^\r\nor in %BUILDDIR%/linkcheck/output.txt.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"doctest\" (\r\n\t%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Testing of doctests in the sources finished, look at the ^\r\nresults in %BUILDDIR%/doctest/output.txt.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"coverage\" (\r\n\t%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Testing of coverage in the sources finished, look at the ^\r\nresults in %BUILDDIR%/coverage/python.txt.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"xml\" (\r\n\t%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The XML files are in %BUILDDIR%/xml.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"pseudoxml\" (\r\n\t%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.\r\n\tgoto end\r\n)\r\n\r\n:end\r\n"
  },
  {
    "path": "docs/source/_static/custom.css",
    "content": "body div.sphinxsidebarwrapper p.logo {\n  text-align: left;\n}\n.mermaid svg {\n  height: 100%;\n}\n"
  },
  {
    "path": "docs/source/conf.py",
    "content": "#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n# sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = \"3.0\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n    \"myst_parser\",\n    \"sphinx.ext.autodoc\",\n    \"sphinx.ext.doctest\",\n    \"sphinx.ext.intersphinx\",\n    \"sphinx.ext.autosummary\",\n    \"sphinx.ext.mathjax\",\n    \"sphinxcontrib_github_alt\",\n    \"sphinxcontrib.mermaid\",\n    \"sphinxcontrib.openapi\",\n    \"sphinxemoji.sphinxemoji\",\n]\n\ntry:\n    import enchant  # noqa\n\n    extensions += [\"sphinxcontrib.spelling\"]\nexcept ImportError:\n    pass\n\nmyst_enable_extensions = [\"html_image\"]\nmyst_heading_anchors = 4  # Needs to be 4 or higher\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\nsource_suffix = {\n    \".rst\": \"restructuredtext\",\n    \".txt\": \"markdown\",\n    \".md\": \"markdown\",\n}\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"Jupyter Enterprise Gateway\"\ncopyright = \"2022, Project Jupyter\"  # noqa\nauthor = \"Jupyter Server Team\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n_version_py = os.path.join(\"..\", \"..\", \"enterprise_gateway\", \"_version.py\")\nversion_ns = {}\n\nwith open(_version_py) as version_file:\n    exec(version_file.read(), version_ns)  # noqa\n\n# The short X.Y version.\nversion = version_ns[\"__version__\"][:3]\n# The full version, including alpha/beta/rc tags.\nrelease = version_ns[\"__version__\"]\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"default\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# eep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.  See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pydata_sphinx_theme\"\n# html_theme = \"sphinx_book_theme\"\nhtml_logo = \"_static/jupyter-logo.png\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further.  For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\n#   'logo_only': html_logo\n#     'description': \"Enterprise Gateway\",\n#     'fixed_sidebar': False,\n#     'show_relbars': True,\n#     'github_user': 'jupyter',\n#     'github_repo': 'enterprise_gateway',\n#     'github_type': 'star',\n#     'logo': 'jupyter-logo.png',\n#     'logo_text_align': 'left',\n#     'analytics_id': 'UA-130853690-1',\n\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents.  If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar.  Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# These paths are either relative to html_static_path\n# or fully qualified paths (eg. https://...)\nhtml_css_files = [\n    \"custom.css\",\n]\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n# html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it.  The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n#   'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n#   'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"EnterpriseGatewaydoc\"\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n    # The paper size ('letterpaper' or 'a4paper').\n    # 'papersize': 'letterpaper',\n    # The font size ('10pt', '11pt' or '12pt').\n    # 'pointsize': '10pt',\n    # Additional stuff for the LaTeX preamble.\n    # 'preamble': '',\n    # Latex figure (float) alignment\n    # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n#  author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n    (\n        master_doc,\n        \"EnterpriseGateway.tex\",\n        \"Enterprise Gateway Documentation\",\n        \"https://jupyter.org\",\n        \"manual\",\n    ),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"enterprise_gateway\", \"Enterprise Gateway Documentation\", [author], 1)]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n#  dir menu entry, description, category)\ntexinfo_documents = [\n    (\n        master_doc,\n        \"enterprise_gateway\",\n        \"Enterprise Gateway Documentation\",\n        author,\n        \"EnterpriseGateway\",\n        \"One line description of project.\",\n        \"Miscellaneous\",\n    ),\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n# texinfo_no_detailmenu = False\n\n\n# -- Options for Epub output ----------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The basename for the epub file. It defaults to the project name.\n# epub_basename = project\n\n# The HTML theme for the epub output. Since the default themes are not optimized\n# for small screen space, using the same theme for HTML and epub output is\n# usually not wise. This defaults to 'epub', a theme designed to save visual\n# space.\n# epub_theme = 'epub'\n\n# The language of the text. It defaults to the language option\n# or 'en' if the language is not set.\n# epub_language = ''\n\n# The scheme of the identifier. Typical schemes are ISBN or URL.\n# epub_scheme = ''\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n# epub_identifier = ''\n\n# A unique identification for the text.\n# epub_uid = ''\n\n# A tuple containing the cover image and cover page html template filenames.\n# epub_cover = ()\n\n# A sequence of (type, uri, title) tuples for the guide element of content.opf.\n# epub_guide = ()\n\n# HTML files that should be inserted before the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n# epub_pre_files = []\n\n# HTML files shat should be inserted after the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n# epub_post_files = []\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n# The depth of the table of contents in toc.ncx.\n# epub_tocdepth = 3\n\n# Allow duplicate toc entries.\n# epub_tocdup = True\n\n# Choose between 'default' and 'includehidden'.\n# epub_tocscope = 'default'\n\n# Fix unsupported image types using the Pillow.\n# epub_fix_images = False\n\n# Scale large images.\n# epub_max_image_width = 0\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# epub_show_urls = 'inline'\n\n# If false, no index is generated.\n# epub_use_index = True\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n    \"python\": (\"https://docs.python.org/\", None),\n    \"ipython\": (\"https://ipython.readthedocs.io/en/stable/\", None),\n    \"jupyter\": (\"https://jupyter.readthedocs.io/en/latest/\", None),\n}\n\nspelling_lang = \"en_US\"\nspelling_word_list_filename = \"spelling_wordlist.txt\"\n\n# Read The Docs\n# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org\non_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\n\n# if not on_rtd:  # only import and set the theme if we're building docs locally\n#    import sphinx_rtd_theme\n#    html_theme = 'alabaster'\n#    html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# otherwise, readthedocs.org uses their theme by default, so no need to specify it\n"
  },
  {
    "path": "docs/source/contributors/contrib.md",
    "content": "# Contributing to Jupyter Enterprise Gateway\n\nThank you for your interest in Jupyter Enterprise Gateway! If you would like to contribute to the\nproject please first take a look at the\n[Project Jupyter Contributor Documentation](https://jupyter.readthedocs.io/en/latest/contributing/content-contributor.html).\n\nEnterprise Gateway has recently joined the [Jupyter Server organization](https://github.com/jupyter-server). Please check out our [team compass page](https://github.com/jupyter-server/team-compass#jupyter-server-team-compass) and try to attend our weekly dev meeting as we have a common goal of making all Jupyter server-side applications better!\n\nPrior to your contribution, we strongly recommend getting acquainted with Enterprise Gateway by checking\nout the [System Architecture](system-architecture.md) and [Development Workflow](devinstall.md) pages.\n"
  },
  {
    "path": "docs/source/contributors/debug.md",
    "content": "# Debugging Jupyter Enterprise Gateway\n\nThis page discusses how to go about debugging Enterprise Gateway. We also provide troubleshooting information\nin our [Troubleshooting Guide](../other/troubleshooting.md).\n\n## Configuring your IDE\n\nWhile your mileage may vary depending on which IDE you are using, the steps below (using PyCharm as an example) should be useful for configuring a debugging session for Enterprise Gateway with minimum\nadjustments for different IDEs.\n\n### Creating a new Debug Configuration\n\nGo to Run->Edit Configuration and create a new python configuration with the following settings:\n\n![Enterprise Gateway debug configuration](../images/debug_configuration.png)\n\n**Script Path:**\n\n```bash\n/Users/jovyan/opensource/jupyter/elyra/scripts/jupyter-enterprisegateway\n```\n\n**Parameters:**\n\n```bash\n--ip=0.0.0.0\n--log-level=DEBUG\n--EnterpriseGatewayApp.yarn_endpoint=“http://elyra-fyi-node-1.fyre.ibm.com:8088/ws/v1/cluster”\n--EnterpriseGatewayApp.remote_hosts=['localhost']\n```\n\n**Environment Variables:**\n\n```bash\nEG_ENABLE_TUNNELING=False\n```\n\n**Working Directory:**\n\n```bash\n/Users/jovyan/opensource/jupyter/elyra/scripts\n```\n\n### Running in debug mode\n\nNow that you have handled the necessary configuration, use Run-Debug and select the debug configuration\nyou just created and happy debugging!\n"
  },
  {
    "path": "docs/source/contributors/devinstall.md",
    "content": "# Development Workflow\n\nHere are instructions for setting up a development environment for the [Jupyter Enterprise Gateway](https://github.com/jupyter-server/enterprise_gateway)\nserver. It also includes common steps in the developer workflow such as building Enterprise Gateway,\nrunning tests, building docs, packaging kernel specifications, etc.\n\n## Prerequisites\n\nInstall GNU make on your system.\n\n## Clone the repo\n\nClone this repository into a local directory.\n\n```bash\n# make a directory under your HOME directory to put the source code\nmkdir -p ~/projects\ncd !$\n\n# clone this repo\ngit clone https://github.com/jupyter-server/enterprise_gateway.git\n```\n\n## Make\n\nEnterprise Gateway's build environment is centered around `make` and the corresponding [`Makefile`](https://github.com/jupyter-server/enterprise_gateway/blob/main/Makefile).\n\nEntering `make` with no parameters yields the following:\n\n```\nactivate                       Print instructions to activate the virtualenv (default: enterprise-gateway-dev)\nclean-env                      Remove conda env\nclean-images                   Remove docker images (includes kernel-based images)\nclean-kernel-images            Remove kernel-based images\nclean                          Make a clean source tree\ndist                           Make source, binary, kernelspecs and helm chart distributions to dist folder\ndocker-images                  Build docker images (includes kernel-based images)\ndocs                           Make HTML documentation\nenv                            Make a dev environment\nhelm-chart                     Make helm chart distribution\nitest-docker-debug             Run integration tests (optionally) against docker container with print statements\nitest-docker                   Run integration tests (optionally) against docker swarm\nitest-yarn-debug               Run integration tests (optionally) against docker demo (YARN) container with print statements\nitest-yarn                     Run integration tests (optionally) against docker demo (YARN) container\nkernel-images                  Build kernel-based docker images\nkernelspecs                    Create archives with sample kernelspecs\nlint                           Check code style\nrelease                        Make a wheel + source release on PyPI\nrun-dev                        Make a server in jupyter_websocket mode\ntest-install                   Install and minimally run EG with the wheel and tar distributions\ntest                           Run unit tests\n```\n\nSome of the more useful commands are listed below.\n\n## Build the conda environment\n\nBuild a Python 3 conda environment that can be used to run\nthe Enterprise Gateway server within an IDE. May be necessary prior\nto [debugging Enterprise Gateway](./debug.md) based on your local Python environment.\nSee [Conda's Managing environments](https://docs.conda.io/projects/conda/en/stable/user-guide/tasks/manage-environments.html#managing-environments)\nfor background on environments and why you may find them useful as you develop on Enterprise Gateway.\n\n```bash\nmake env\n```\n\nBy default, the env built will be named `enterprise-gateway-dev`. To produce a different conda env,\nyou can specify the name via the `ENV=` parameter.\n\n```bash\nmake ENV=my-conda-env env\n```\n\nTo delete your existing environment, use `clean-env` task.\n\n```bash\nmake clean-env\n```\n\n## Build the wheel file\n\nBuild a wheel file that can then be installed via `pip install`\n\n```\nmake bdist\n```\n\nThe wheel file will reside in the `dist` directory.\n\n## Build the kernelspec tar file\n\nEnterprise Gateway includes several sets of kernel specifications for each of the three primary kernels: `IPython Kernel`,`IRkernel`,\nand `Apache Toree` to demonstrate remote kernels and their corresponding launchers. These sets of files are then added to tar files corresponding to their target resource managers. In addition, a _combined_ tar file is also built containing all kernel specifications. Like the wheel file, these tar files will reside in the `dist` directory.\n\n```bash\nmake kernelspecs\n```\n\n```{note}\nBecause the scala launcher requires a jar file, `make kernelspecs` requires the use of `sbt` to build the\nscala launcher jar. Please consult the [sbt site](https://www.scala-sbt.org/) for directions to\ninstall/upgrade `sbt` on your platform. We currently use version 1.3.12.\n```\n\n## Build distribution files\n\nBuilds the files necessary for a given release: the wheel file, the source tar file, and the kernel specification tar\nfiles. This is essentially a helper target consisting of the `bdist` `sdist` and `kernelspecs` targets.\n\n```bash\nmake dist\n```\n\n## Run the Enterprise Gateway server\n\nRun an instance of the Enterprise Gateway server.\n\n```bash\nmake run-dev\n```\n\nThen access the running server at the URL printed in the console.\n\n## Build the docs\n\nRun Sphinx to build the HTML documentation.\n\n```bash\nmake docs\n```\n\nThis command actually issues `make requirements html` from the `docs` sub-directory.\n\n## Run the unit tests\n\nRun the unit test suite.\n\n```\nmake test\n```\n\nTo Run a test a subset of tests, we support passing \"TEST\" argument to the make command as below\n\n```\nmake test TEST=\"test_gatewayapp.py\"\nmake test TEST=\"test_gatewayapp.py::TestGatewayAppConfig\nmake test TEST=\"test_gatewayapp.py::TestGatewayAppConfig::test_config_env_vars_bc\"\n```\n\n## Run the integration tests\n\nRun the integration tests suite.\n\nThese tests will bootstrap the [`elyra/enterprise-gateway-demo`](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) docker image with Apache Spark using YARN resource manager and\nJupyter Enterprise Gateway and perform various tests for each kernel in local, YARN client, and YARN cluster modes.\n\n```bash\nmake itest-yarn\n```\n\n## Build the docker images\n\nThe following can be used to build all docker images used within the project. See [docker images](docker.md) for specific details.\n\n```bash\nmake docker-images\n```\n\nIf you only want to build the kernel images, use\n\n```bash\nmake kernel-images\n```\n"
  },
  {
    "path": "docs/source/contributors/docker.md",
    "content": "# Docker Images\n\nAll docker images can be pulled from docker hub's [elyra organization](https://hub.docker.com/u/elyra/) and their docker files can be found in the github repository in the appropriate directory of [etc/docker](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/docker).\n\nLocal images can also be built via `make docker-images`.\n\n```{note}\nBase images and versions change over time. Check the Dockerfiles in [etc/docker](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/docker) for the current base images used in each build.\n```\n\nThe following sections describe the docker images used within Kubernetes and Docker Swarm environments.\n\n## elyra/enterprise-gateway\n\nThe primary image for Kubernetes and Docker Swarm support, [elyra/enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) contains the Enterprise Gateway server software and default kernel specifications. For Kubernetes it is deployed using the [helm chart](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/kubernetes/helm/enterprise-gateway). For Docker Swarm, deployment can be accomplished using [docker-componse.yml](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/docker/docker-compose.yml).\n\nWe recommend that a persistent/mounted volume be used so that the kernel specifications can be accessed outside the container since we've found those to require post-deployment modifications from time to time.\n\n## elyra/kernel-py\n\nImage [elyra/kernel-py](https://hub.docker.com/r/elyra/kernel-py/) contains the IPython kernel. It is currently built on the [jupyter/scipy-notebook](https://hub.docker.com/r/jupyter/scipy-notebook) image with additional support necessary for remote operation.\n\n## elyra/kernel-spark-py\n\nImage [elyra/kernel-spark-py](https://hub.docker.com/r/elyra/kernel-spark-py/) is built on [elyra/kernel-py](https://hub.docker.com/r/elyra/kernel-py) and includes the Spark 2.4 distribution for use in Kubernetes clusters. Please note that the ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results.\n\n## elyra/kernel-tf-py\n\nImage [elyra/kernel-tf-py](https://hub.docker.com/r/elyra/kernel-tf-py/) contains the IPython kernel. It is currently built on the [jupyter/tensorflow-notebook](https://hub.docker.com/r/jupyter/tensorflow-notebook) image with additional support necessary for remote operation.\n\n## elyra/kernel-scala\n\nImage [elyra/kernel-scala](https://hub.docker.com/r/elyra/kernel-scala/) contains the Scala (Apache Toree) kernel and is built on [elyra/spark](https://hub.docker.com/r/elyra/spark) which is, itself, built using the scripts provided by the Spark 2.4 distribution for use in Kubernetes clusters. As a result, the ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results.\n\nSince Apache Toree is currently tied to Spark, creation of a _vanilla_ mode Scala kernel is not high on our current set of priorities.\n\n## elyra/kernel-r\n\nImage [elyra/kernel-r](https://hub.docker.com/r/elyra/kernel-r/) contains the IRKernel and is currently built on the [jupyter/r-notebook](https://hub.docker.com/r/jupyter/r-notebook/) image.\n\n## elyra/kernel-spark-r\n\nImage [elyra/kernel-spark-r](https://hub.docker.com/r/elyra/kernel-spark-r/) also contains the IRKernel but is built on [elyra/kernel-r](https://hub.docker.com/r/elyra/kernel-r) and includes the Spark 2.4 distribution for use in Kubernetes clusters.\n\n## Ancillary Docker Images\n\nThe project produces two docker images to make testing easier: `elyra/demo-base` and `elyra/enterprise-gateway-demo`.\n\n### elyra/demo-base\n\nThe [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/) image is considered the base image upon which [elyra/enterprise-gateway-demo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) is built. It consists of a Hadoop YARN installation that includes Spark, Java, miniconda, and various kernel installations.\n\nThe primary use of this image is to quickly build elyra/enterprise-gateway images for testing and development purposes. To build a local image, run `make demo-base`.\n\nThis image can be used to start a separate Hadoop YARN cluster that, when combined with another instance of elyra/enterprise-gateway can better demonstrate remote kernel functionality.\n\n### elyra/enterprise-gateway-demo\n\nBuilt on [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/), [elyra/enterprise-gateway-demo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) also includes the various example kernel specifications contained in the repository.\n\nBy default, this container will start with enterprise gateway running as a service user named `jovyan`. This user is enabled for `sudo` so that it can emulate other users where necessary. Other users included in this image are `elyra`, `bob` and `alice` (names commonly used in security-based examples).\n\nWe plan on producing one image per release to the [enterprise-gateway-demo docker repo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) where the image's tag reflects the corresponding release.\n\nTo build a local image, run `make enterprise-gateway-demo`. Because this is a development build, the tag for this image will not reflect the value of the VERSION variable in the root `Makefile` but will be 'dev'.\n"
  },
  {
    "path": "docs/source/contributors/index.rst",
    "content": "Contributors Guide\n==================\n\nThese pages target people who are interested in contributing directly to the Jupyter Enterprise Gateway Project.\n\n.. admonition:: Use cases\n\n    - *As a contributor, I want to learn more about kernel management within the Jupyter ecosystem.*\n    - *As a contributor, I want to make Enterprise Gateway a more stable service for my organization and the community as a whole.*\n    - *As a contributor, I'm interested in adding the ability for Enterprise Gateway to be highly available and fault tolerant.*\n\n.. note::\n   As a *contributor*, we encourage you to be familiar with all of the guides (Users, Developers, Operators) to best support Enterprise Gateway.  This guide provides an overview of Enterprise Gateway along with instructions on how to get set up.\n\n\n.. toctree::\n   :maxdepth: 1\n   :name: contributors\n\n   contrib\n   system-architecture\n   docker\n   devinstall\n   sequence-diagrams\n   debug\n   roadmap\n"
  },
  {
    "path": "docs/source/contributors/roadmap.md",
    "content": "# Project Roadmap\n\nWe have plenty to do, now and in the future. Here's where we're headed:\n\n## Completed in 3.x\n\n- Spark 3.0 support (including pod template files)\n- Spark Operator support via `SparkOperatorProcessProxy`\n- Custom Resource Definition support via `CustomResourceProcessProxy`\n- Session persistence (file-based and webhook-based)\n- `KERNEL_VOLUMES` and `KERNEL_VOLUME_MOUNTS` for Kubernetes and Spark Operator kernels\n- Authorizer class override support (`EG_AUTHORIZER_CLASS`)\n- SSTI prevention in `KERNEL_POD_NAME` template substitution\n- Python 3.9 and below dropped; Python 3.10+ required\n\n## Planned for 4.0\n\n- Kernel Provisioners\n  - Provisioners will replace process proxies and enable Enterprise Gateway to remove its cap on `jupyter_client < 7` and `jupyter_server < 2`.\n- Parameterized Kernels\n  - Enable the ability to prompt for parameters\n  - These will likely be based on kernel provisioners\n\n## Wish list\n\n- High Availability\n  - Session persistence using a shared location (NoSQL DB) (file-based persistence has been implemented)\n  - Active/active support\n- Multi-gateway support on client-side\n  - Enables the ability for a single Jupyter Server to be configured against multiple Gateway servers simultaneously. This work will primarily be in Jupyter Server.\n- Pluggable load-balancers into `DistributedProcessProxy` (currently uses simple round-robin)\n- Support for other resource managers\n  - Slurm?\n  - Mesos?\n- User Environments\n  - Improve the way user files are made available to remote kernels\n- Administration UI\n  - Dashboard with running kernels\n  - Lifecycle management\n  - Time running, stop/kill, Profile Management, etc\n\nWe'd love to hear any other use cases you might have and look forward to your contributions to Jupyter Enterprise Gateway!\n"
  },
  {
    "path": "docs/source/contributors/sequence-diagrams.md",
    "content": "# Sequence Diagrams\n\nThe following consists of various sequence diagrams you might find helpful. We plan to add\ndiagrams based on demand and contributions.\n\n## Kernel launch: Jupyter Lab to Enterprise Gateway\n\nThis diagram depicts the interactions between components when a kernel start request\nis submitted from Jupyter Lab running against [Jupyter Server configured to use\nEnterprise Gateway](../users/connecting-to-eg.md). The diagram also includes the\nretrieval of kernel specifications (kernelspecs) prior to the kernel's initialization.\n\n```{mermaid}\n    sequenceDiagram\n        participant JupyterLab\n        participant JupyterServer\n        participant EnterpriseGateway\n        participant ProcessProxy\n        participant Kernel\n        participant ResourceManager\n        Note left of JupyterLab: fetch kernelspecs\n        JupyterLab->>JupyterServer: https GET api/kernelspecs\n        JupyterServer->>EnterpriseGateway: https GET api/kernelspecs\n        EnterpriseGateway-->>JupyterServer: api/kernelspecs response\n        JupyterServer-->>JupyterLab: api/kernelspecs response\n\n        Note left of JupyterLab: kernel initialization\n        JupyterLab->>JupyterServer: https POST api/sessions\n        JupyterServer->>EnterpriseGateway: https POST api/kernels\n        EnterpriseGateway->>ProcessProxy: launch_process()\n        ProcessProxy->>Kernel: launch kernel\n        ProcessProxy->>ResourceManager: confirm startup\n        Kernel-->>ProcessProxy: connection info\n        ResourceManager-->>ProcessProxy: state & host info\n        ProcessProxy-->>EnterpriseGateway: complete connection info\n        EnterpriseGateway->>Kernel: TCP socket requests\n        Kernel-->>EnterpriseGateway: TCP socket handshakes\n        EnterpriseGateway-->>JupyterServer: api/kernels response\n        JupyterServer-->>JupyterLab: api/sessions response\n\n        JupyterLab->>JupyterServer: ws GET api/kernels\n        JupyterServer->>EnterpriseGateway: ws GET api/kernels\n        EnterpriseGateway->>Kernel: kernel_info_request message\n        Kernel-->>EnterpriseGateway: kernel_info_reply message\n        EnterpriseGateway-->>JupyterServer: websocket upgrade response\n        JupyterServer-->>JupyterLab: websocket upgrade response\n```\n"
  },
  {
    "path": "docs/source/contributors/system-architecture.md",
    "content": "# System Architecture\n\nBelow are sections presenting details of the Enterprise Gateway internals and other related items. While we will attempt to maintain its consistency, the ultimate answers are in the code itself.\n\n## Enterprise Gateway Process Proxy Extensions\n\nEnterprise Gateway is follow-on project to Jupyter Kernel Gateway with additional abilities to support remote kernel sessions on behalf of multiple users within resource-managed frameworks such as [Apache Hadoop YARN](https://apache.github.io/hadoop/hadoop-yarn/hadoop-yarn-site/YARN.html) or [Kubernetes](https://kubernetes.io/). Enterprise Gateway introduces these capabilities by extending the existing class hierarchies for `AsyncKernelManager` and `AsyncMultiKernelManager` classes, along with an additional abstraction known as a _process proxy_.\n\n### Overview\n\nAt its basic level, a running kernel consists of two components for its communication - a set of ports and a process.\n\n### Kernel Ports\n\nThe first component is a set of five zero-MQ ports used to convey the Jupyter protocol between the Notebook\nand the underlying kernel. In addition to the 5 ports, is an IP address, a key, and a signature scheme\nindicator used to interpret the key. These eight pieces of information are conveyed to the kernel via a\njson file, known as the connection file.\n\nWithin the base framework, the IP address must be a local IP address meaning that the kernel cannot be\nremote from the library launching the kernel. The enforcement of this restriction is down in the `jupyter_client` module - two levels below Enterprise Gateway.\n\nThis component is the core communication mechanism between the Notebook and the kernel. All aspects, including\nlifecycle management, can occur via this component. The kernel process (below) comes into play only when\nport-based communication becomes unreliable or additional information is required.\n\n### Kernel Process\n\nWhen a kernel is launched, one of the fields of the kernel's associated kernel specification is used to\nidentify a command to invoke. In today's implementation, this command information, along with other\nenvironment variables (also described in the kernel specification), is passed to `popen()` which returns\na process class. This class supports four basic methods following its creation:\n\n1. `poll()` to determine if the process is still running\n1. `wait()` to block the caller until the process has terminated\n1. `send_signal(signum)` to send a signal to the process\n1. `kill()` to terminate the process\n\nAs you can see, other forms of process communication can be achieved by abstracting the launch mechanism.\n\n### Kernel Specifications\n\nThe primary vehicle for indicating a given kernel should be handled in a different manner is the kernel\nspecification, otherwise known as the _kernel spec_. Enterprise Gateway leverages the natively extensible `metadata` stanza within the kernel specification to introduce a new stanza named `process_proxy`.\n\nThe `process_proxy` stanza identifies the class that provides the kernel's process abstraction\n(while allowing for future extensions). This class then provides the kernel's lifecycle management operations relative to the managed resource or functional equivalent.\n\nHere's an example of a kernel specification that uses the `DistributedProcessProxy` class for its abstraction:\n\n```json\n{\n  \"language\": \"scala\",\n  \"display_name\": \"Spark - Scala (YARN Client Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy\"\n    }\n  },\n  \"env\": {\n    \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n    \"__TOREE_SPARK_OPTS__\": \"--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID}\",\n    \"__TOREE_OPTS__\": \"\",\n    \"LAUNCH_OPTS\": \"\",\n    \"DEFAULT_INTERPRETER\": \"Scala\"\n  },\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/spark_scala_yarn_client/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n```\n\nSee the [Process Proxy](#process-proxy) section for more details on process proxies and those provided as part of the Enterprise Gateway release.\n\n## Remote Mapping Kernel Manager\n\n`RemoteMappingKernelManager` is a subclass of Jupyter Server's [`AsyncMappingKernelManager`](https://github.com/jupyter-server/jupyter_server/blob/745f5ba3f00280c1e1900326a7e08463d48a3912/jupyter_server/services/kernels/kernelmanager.py#L633) and provides two functions.\n\n1. It provides the vehicle for making the `RemoteKernelManager` class known and available.\n1. It overrides `start_kernel` to look at the target kernel's kernel spec to see if it contains a remote process proxy class entry. If so, it records the name of the class in its member variable to be made available to the kernel start logic.\n\n## Remote Kernel Manager\n\n`RemoteKernelManager` is a subclass of jupyter_client's [`AsyncIOLoopKernelManager` class](https://github.com/jupyter/jupyter_client/blob/10decd25308c306b6005cbf271b96493824a83e8/jupyter_client/ioloop/manager.py#L62) and provides the\nprimary integration points for remote process proxy invocations. It implements a number of methods which allow\nEnterprise Gateway to circumvent functionality that might otherwise be prevented. As a result, some of these overrides may\nnot be necessary if lower layers of the Jupyter framework were modified. For example, some methods are required\nbecause Jupyter makes assumptions that the kernel process is local.\n\nIts primary functionality, however, is to override the `_launch_kernel` method (which is the method closest to\nthe process invocation) and instantiates the appropriate process proxy instance - which is then returned in\nplace of the process instance used in today's implementation. Any interaction with the process then takes\nplace via the process proxy.\n\nBoth `RemoteMappingKernelManager` and `RemoteKernelManager` class definitions can be found in\n[remotemanager.py](https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/services/kernels/remotemanager.py)\n\n## Process Proxy\n\nProcess proxy classes derive from the abstract base class `BaseProcessProxyABC` - which defines the four basic\nprocess methods. There are two immediate subclasses of `BaseProcessProxyABC` - `LocalProcessProxy`\nand `RemoteProcessProxy`.\n\n`LocalProcessProxy` is essentially a pass-through to the current implementation. Kernel specifications that do not contain\na `process_proxy` stanza will use `LocalProcessProxy`.\n\n`RemoteProcessProxy` is an abstract base class representing remote kernel processes. Currently, there are seven\nbuilt-in subclasses of `RemoteProcessProxy` ...\n\n- `DistributedProcessProxy` - largely a proof of concept class, `DistributedProcessProxy` is responsible for the launch\n  and management of kernels distributed across an explicitly defined set of hosts using ssh. Hosts are determined\n  via a round-robin algorithm (that we should make pluggable someday).\n- `YarnClusterProcessProxy` - is responsible for the discovery and management of kernels hosted as Hadoop YARN applications\n  within a managed cluster.\n- `KubernetesProcessProxy` - is responsible for the discovery and management of kernels hosted\n  within a Kubernetes cluster.\n- `DockerSwarmProcessProxy` - is responsible for the discovery and management of kernels hosted\n  within a Docker Swarm cluster.\n- `DockerProcessProxy` - is responsible for the discovery and management of kernels hosted\n  within Docker configuration. Note: because these kernels will always run local to the corresponding Enterprise Gateway instance, these process proxies are of limited use.\n- `ConductorClusterProcessProxy` - is responsible for the discovery and management of kernels hosted\n  within an IBM Spectrum Conductor cluster.\n- `SparkOperatorProcessProxy` - is responsible for the discovery and management of kernels hosted\n  within a Kubernetes cluster but created as a `SparkApplication` instead of a Pod. The `SparkApplication` is a Kubernetes custom resource\n  defined inside the project [spark-on-k8s-operator](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator), which\n  makes all kinds of spark on k8s components better organized and easy to configure.\n\n```{note}\nBefore you run a kernel associated with `SparkOperatorProcessProxy`, ensure that the [Kubernetes Operator for Apache Spark is installed](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator#installation) in your Kubernetes cluster.\n```\n\nYou might notice that the last six process proxies do not necessarily control the _launch_ of the kernel. This is\nbecause the native jupyter framework is utilized such that the script that is invoked by the framework is what\nlaunches the kernel against that particular resource manager. As a result, the _startup time_ actions of these process\nproxies is more about discovering where the kernel _landed_ within the cluster in order to establish a mechanism for\ndetermining lifetime. _Discovery_ typically consists of using the resource manager's API to locate the kernel whose name includes its kernel ID\nin some fashion.\n\nOn the other hand, the `DistributedProcessProxy` essentially wraps the kernel specification's argument vector (i.e., invocation\nstring) in a remote shell since the host is determined by Enterprise Gateway, eliminating the discovery step from\nits implementation.\n\nThese class definitions can be found in the\n[processproxies package](https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/services/processproxies). However,\nEnterprise Gateway is architected such that additional process proxy implementations can be provided and are not\nrequired to be located within the Enterprise Gateway hierarchy - i.e., we embrace a _bring your own process proxy_ model.\n\n![Process Class Hierarchy](../images/process_proxy_hierarchy.png)\n\nThe complete process proxy class hierarchy is:\n\n```text\nBaseProcessProxyABC\n├── LocalProcessProxy\n└── RemoteProcessProxy\n    ├── DistributedProcessProxy\n    ├── YarnClusterProcessProxy\n    ├── ConductorClusterProcessProxy\n    └── ContainerProcessProxy\n        ├── DockerSwarmProcessProxy\n        ├── DockerProcessProxy\n        └── KubernetesProcessProxy\n            └── CustomResourceProcessProxy\n                └── SparkOperatorProcessProxy\n```\n\nThe process proxy constructor looks as follows:\n\n```python\ndef __init__(self, kernel_manager, proxy_config):\n```\n\nwhere\n\n- `kernel_manager` is an instance of a `RemoteKernelManager` class.\n- `proxy_config` is a dictionary of configuration values present in the `kernel.json` file. These\n  values can be used to override or amend various global configuration values on a per-kernel basis. See\n  [Process Proxy Configuration](#process-proxy-configuration) for more information.\n\n```python\n@abstractmethod\ndef launch_process(self, kernel_cmd, *kw):\n```\n\nwhere\n\n- `kernel_cmd` is a list (argument vector) that should be invoked to launch the kernel. This parameter is\n  an artifact of the kernel manager `_launch_kernel()` method.\n- `**kw` is a set keyword arguments which includes an `env` dictionary element consisting of the names\n  and values of which environment variables to set at launch time.\n\nThe `launch_process()` method is the primary method exposed on the Process Proxy classes. It's responsible for\nperforming the appropriate actions relative to the target type. The process must be in a running state prior\nto returning from this method - otherwise attempts to use the connections will not be successful since the\n(remote) kernel needs to have created the sockets.\n\nAll process proxy subclasses should ensure `BaseProcessProxyABC.launch_process()` is called - which will automatically\nplace a variable named `KERNEL_ID` (consisting of the kernel's unique ID) into the corresponding kernel's environment\nvariable list since `KERNEL_ID` is a primary mechanism for associating remote applications to a specific kernel instance.\n\n```python\ndef poll(self):\n```\n\nThe `poll()` method is used by the Jupyter framework to determine if the process is still alive. By default, the\nframework's heartbeat mechanism calls `poll()` every 3 seconds. This method returns `None` if the process is still running, `False` otherwise (per the `popen()` contract).\n\n```python\ndef wait(self):\n```\n\nThe `wait()` method is used by the Jupyter framework when terminating a kernel. Its purpose is to block return\nto the caller until the process has terminated. Since this could be awhile, it's best to return control in a\nreasonable amount of time since the kernel instance is destroyed anyway. This method does not return a value.\n\n```python\ndef send_signal(self, signum):\n```\n\nThe `send_signal()` method is used by the Jupyter framework to send a signal to the process. Currently, `SIGINT (2)`\n(to interrupt the kernel) is the signal sent.\n\nIt should be noted that for normal processes - both local and remote - `poll()` and `kill()` functionality can\nbe implemented via `send_signal` with `signum` values of `0` and `9`, respectively.\n\nThis method returns `None` if the process is still running, `False` otherwise.\n\n```python\ndef kill(self):\n```\n\nThe `kill()` method is used by the Jupyter framework to terminate the kernel process. This method is only necessary when the request to shutdown the kernel - sent via the control port of the zero-MQ ports - does not respond in an appropriate amount of time.\n\nThis method returns `None` if the process is killed successfully, `False` otherwise.\n\n### RemoteProcessProxy\n\nAs noted above, `RemoteProcessProxy` is an abstract base class that derives from `BaseProcessProxyABC`. Subclasses\nof `RemoteProcessProxy` must implement two methods - `confirm_remote_startup()` and `handle_timeout()`:\n\n```python\n@abstractmethod\ndef confirm_remote_startup(self, kernel_cmd, **kw):\n```\n\nwhere\n\n- `kernel_cmd` is a list (argument vector) that should be invoked to launch the kernel. This parameter is an\n  artifact of the kernel manager `_launch_kernel()` method.\n- `**kw` is a set key-word arguments.\n\n`confirm_remote_startup()` is responsible for detecting that the remote kernel has been appropriately launched and is ready to receive requests. This can include gathering application status from the remote resource manager but is really a function of having received the connection information from the remote kernel launcher. (See [Kernel Launchers](#kernel-launchers))\n\n```python\n@abstractmethod\ndef handle_timeout(self):\n```\n\n`handle_timeout()` is responsible for detecting that the remote kernel has failed to startup in an acceptable time. It\nshould be called from `confirm_remote_startup()`. If the timeout expires, `handle_timeout()` should throw HTTP\nError 500 (`Internal Server Error`).\n\nKernel launch timeout expiration is expressed via the environment variable `KERNEL_LAUNCH_TIMEOUT`. If this\nvalue does not exist, it defaults to the Enterprise Gateway process environment variable `EG_KERNEL_LAUNCH_TIMEOUT` - which\ndefaults to 30 seconds if unspecified. Since all `KERNEL_` environment variables \"flow\" from the Notebook server, the launch\ntimeout can be specified as a client attribute of the Notebook session.\n\n#### YarnClusterProcessProxy\n\nAs part of its base offering, Enterprise Gateway provides an implementation of a process proxy that communicates with the Hadoop YARN resource manager that has been instructed to launch a kernel on one of its worker nodes. The node on which the kernel is launched is up to the resource manager - which enables an optimized distribution of kernel resources.\n\nDerived from `RemoteProcessProxy`, `YarnClusterProcessProxy` uses the `yarn-api-client` library to locate the kernel and monitor its lifecycle. However, once the kernel has returned its connection information, the primary kernel operations naturally take place over the ZeroMQ ports.\n\nThis process proxy is reliant on the `--EnterpriseGatewayApp.yarn_endpoint` command line option or the `EG_YARN_ENDPOINT` environment variable to determine where the YARN resource manager is located. To accommodate increased flexibility, the endpoint definition can be defined within the process proxy stanza of the kernel specification, enabling the ability to direct specific kernels to different YARN clusters.\n\nIn cases where the YARN cluster is configured for high availability, then the `--EnterpriseGatewayApp.alt_yarn_endpoint` command line option or the `EG_ALT_YARN_ENDPOINT` environment variable should also be defined. When set, the underlying `yarn-api-client` library will choose the active Resource Manager between the two.\n\n```{note}\nIf Enterprise Gateway is running on an edge node of the cluster and has a valid `yarn-site.xml` file in HADOOP_CONF_DIR, neither of these values are required (default = None).  In such cases, the `yarn-api-client` library will choose the active Resource Manager from the configuration files.\n```\n\n```{seealso}\n[Hadoop YARN deployments](../operators/deploy-yarn-cluster.md) in the Operators Guide for details.\n```\n\n#### DistributedProcessProxy\n\nLike `YarnClusterProcessProxy`, Enterprise Gateway also provides an implementation of a basic\nround-robin remoting mechanism that is part of the `DistributedProcessProxy` class. This class\nuses the `--EnterpriseGatewayApp.remote_hosts` command line option (or `EG_REMOTE_HOSTS`\nenvironment variable) to determine on which hosts a given kernel should be launched. It uses\na basic round-robin algorithm to index into the list of remote hosts for selecting the target\nhost. It then uses ssh to launch the kernel on the target host. As a result, all kernel specification\nfiles must reside on the remote hosts in the same directory structure as on the Enterprise\nGateway server.\n\nIt should be noted that kernels launched with this process proxy run in YARN _client_ mode - so their\nresources (within the kernel process itself) are not managed by the Hadoop YARN resource manager.\n\nLike the yarn endpoint parameter the `remote_hosts` parameter can be specified within the\nprocess proxy configuration to override the global value - enabling finer-grained kernel distributions.\n\n```{seealso}\n[Distributed deployments](../operators/deploy-distributed.md) in the Operators Guide for details.\n```\n\n#### KubernetesProcessProxy\n\nWith the popularity of Kubernetes within the enterprise, Enterprise Gateway provides an implementation\nof a process proxy that communicates with the Kubernetes resource manager via the Kubernetes API. Unlike\nthe other offerings, in the case of Kubernetes, Enterprise Gateway is itself deployed within the Kubernetes\ncluster as a _Service_ and _Deployment_. The primary vehicle by which this is accomplished is via [Helm](https://helm.sh/) and Enterprise Gateway provides a set of [helm chart](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/kubernetes/helm/enterprise-gateway) files to simplify deployment.\n\n```{seealso}\n[Kubernetes deployments](../operators/deploy-kubernetes.md) in the Operators Guide for details.\n```\n\n#### DockerSwarmProcessProxy\n\nEnterprise Gateway provides an implementation of a process proxy that communicates with the Docker Swarm resource manager via the Docker API. When used, the kernels are launched as swarm services and can reside anywhere in the managed cluster. To leverage kernels configured in this manner, Enterprise Gateway can be deployed\neither as a Docker Swarm _service_ or a traditional Docker container.\n\nA similar `DockerProcessProxy` implementation has also been provided. When used, the corresponding kernel will be launched as a traditional docker container that runs local to the launching Enterprise Gateway instance. As a result, its use has limited value.\n\n```{seealso}\n[Docker and Docker Swarm deployments](../operators/deploy-docker.md) in the Operators Guide for details.\n```\n\n#### ConductorClusterProcessProxy\n\nEnterprise Gateway also provides an implementation of a process proxy\nthat communicates with an IBM Spectrum Conductor resource manager that has been instructed to launch a kernel\non one of its worker nodes. The node on which the kernel is launched is up to the resource\nmanager - which enables an optimized distribution of kernel resources.\n\nDerived from `RemoteProcessProxy`, `ConductorClusterProcessProxy` uses Conductor's REST-ful API\nto locate the kernel and monitor its life-cycle. However, once the kernel has returned its\nconnection information, the primary kernel operations naturally take place over the ZeroMQ ports.\n\nThis process proxy is reliant on the `--EnterpriseGatewayApp.conductor_endpoint` command line\noption or the `EG_CONDUCTOR_ENDPOINT` environment variable to determine where the Conductor resource manager is\nlocated.\n\n```{seealso}\n[IBM Spectrum Conductor deployments](../operators/deploy-conductor.md) in the Operators Guide for details.\n```\n\n#### CustomResourceProcessProxy\n\nEnterprise Gateway also provides a implementation of a process proxy derived from `KubernetesProcessProxy`\ncalled `CustomResourceProcessProxy`.\n\nInstead of creating kernels based on a Kubernetes pod, `CustomResourceProcessProxy`\nmanages kernels via a custom resource definition (CRD). For example, `SparkApplication` is a CRD that includes\nmany components of a Spark-on-Kubernetes application.\n\nIf you are going to extend `CustomResourceProcessProxy`, just follow steps below:\n\n- override custom resource related variables(i.e. `group`, `version` and `plural`\n  and `get_container_status` method, wrt [launch_kubernetes.py](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py).\n\n- define a jinja template like\n  [kernel-pod.yaml.j2](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2).\n  As a generic design, the template file should be named as {crd_group}-{crd_version} so that you can reuse\n  [launch_kubernetes.py](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py) in the kernelspec.\n\n- define a kernel specification like [spark_python_operator/kernel.json](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_operator/kernel.json).\n\n### Process Proxy Configuration\n\nEach `kernel.json`'s `process-proxy` stanza can specify an optional `config` stanza that is converted\ninto a dictionary of name/value pairs and passed as an argument to each process-proxy constructor\nrelative to the class identified by the `class_name` entry.\n\nHow each dictionary entry is interpreted is completely a function of the constructor relative to that process-proxy\nclass or its superclass. For example, an alternate list of remote hosts has meaning to the `DistributedProcessProxy` but\nnot to its superclasses. As a result, the superclass constructors will not attempt to interpret that value.\n\nIn addition, certain dictionary entries can override or amend system-level configuration values set on the command-line, thereby\nallowing administrators to tune behaviors down to the kernel level. For example, an administrator might want to\nconstrain Python kernels configured to use specific resources to an entirely different set of hosts (and ports) that other\nremote kernels might be targeting in order to isolate valuable resources. Similarly, an administrator might want to\nonly authorize specific users to a given kernel.\n\nIn such situations, one might find the following `process-proxy` stanza:\n\n```json\n{\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy\",\n      \"config\": {\n        \"remote_hosts\": \"priv_host1,priv_host2\",\n        \"port_range\": \"40000..41000\",\n        \"authorized_users\": \"bob,alice\"\n      }\n    }\n  }\n}\n```\n\nIn this example, the kernel associated with this `kernel.json` file is relegated to the hosts `priv_host1` and `priv_host2`\nwhere kernel ports will be restricted to a range between `40000` and `41000` and only users `bob` and `alice` can\nlaunch such kernels (provided neither appear in the global set of `unauthorized_users` since denial takes precedence).\n\nFor a current enumeration of which system-level configuration values can be overridden or amended on a per-kernel basis\nsee [Per-kernel overrides](../operators/config-kernel-override.md).\n\n## Kernel Launchers\n\nAs noted above, a kernel is considered started once the `launch_process()` method has conveyed its connection information back to the Enterprise Gateway server process. Conveyance of connection information from a remote kernel is the responsibility of the remote kernel _launcher_.\n\nKernel launchers provide a means of normalizing behaviors across kernels while avoiding kernel modifications. Besides providing a location where connection file creation can occur, they also provide a 'hook' for other kinds of behaviors - like establishing virtual environments or sandboxes, providing collaboration behavior, adhering to port range restrictions, etc.\n\nThere are four primary tasks of a kernel launcher:\n\n1. Creation of the connection file and ZMQ ports on the remote (target) system along with a _gateway listener_ socket\n1. Conveyance of the connection (and listener socket) information back to the Enterprise Gateway process\n1. Invocation of the target kernel\n1. Listen for interrupt and shutdown requests from Enterprise Gateway and carry out the action when appropriate\n\nKernel launchers are minimally invoked with three parameters (all of which are conveyed by the `argv` stanza of the corresponding `kernel.json` file) - the kernel's ID as created by the server and conveyed via the placeholder `{kernel_id}`, a response address consisting of the Enterprise Gateway server IP and port on which to return the connection information similarly represented by the placeholder `{response_address}`, and a public-key used by the launcher to encrypt an AES key that encrypts the kernel's connection information back to the server and represented by the placeholder `{public_key}`.\n\nThe kernel's ID is identified by the parameter `--RemoteProcessProxy.kernel-id`. Its value (`{kernel_id}`) is essentially used to build a connection file to pass to the to-be-launched kernel, along with any other things - like log files, etc.\n\nThe response address is identified by the parameter `--RemoteProcessProxy.response-address`. Its value (`{response_address}`) consists of a string of the form `<IPV4:port>` where the IPV4 address points back to the Enterprise Gateway server - which is listening for a response on the provided port. The port's default value is `8877`, but can be specified via the environment variable `EG_RESPONSE_PORT`.\n\nThe public key is identified by the parameter `--RemoteProcessProxy.public-key`. Its value (`{public_key}`) is used to encrypt an AES key created by the launcher to encrypt the kernel's connection information. The server, upon receipt of the response, uses the corresponding private key to decrypt the AES key, which it then uses to decrypt the connection information. Both the public and private keys are ephemeral; created upon Enterprise Gateway's startup. They can be ephemeral because they are only needed during a kernel's startup and never again.\n\nHere's a [kernel.json](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_cluster/kernel.json) file illustrating these parameters...\n\n```json\n{\n  \"language\": \"python\",\n  \"display_name\": \"Spark - Python (YARN Cluster Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy\"\n    }\n  },\n  \"env\": {\n    \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n    \"SPARK_OPTS\": \"--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false\",\n    \"LAUNCH_OPTS\": \"\"\n  },\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/spark_python_yarn_cluster/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n```\n\nOther options supported by launchers include:\n\n- `--RemoteProcessProxy.port-range {port_range}` - passes configured port-range to launcher where launcher applies that range to kernel ports. The port-range may be configured globally or on a per-kernel specification basis, as previously described.\n\n- `--RemoteProcessProxy.spark-context-initialization-mode [lazy|eager|none]` - indicates the _timeframe_ in which the spark context will be created.\n\n  - `lazy` (default) attempts to defer initialization as late as possible - although this can vary depending on the\n    underlying kernel and launcher implementation.\n  - `eager` attempts to create the spark context as soon as possible.\n  - `none` skips spark context creation altogether.\n\n  Note that some launchers may not be able to support all modes. For example, the scala launcher uses the Apache Toree\n  kernel - which currently assumes a spark context will exist. As a result, a mode of `none` doesn't apply.\n  Similarly, the `lazy` and `eager` modes in the Python launcher are essentially the same, with the spark context\n  creation occurring immediately, but in the background thereby minimizing the kernel's startup time.\n\nKernel.json files also include a `LAUNCH_OPTS:` section in the `env` stanza to allow for custom\nparameters to be conveyed in the launcher's environment. `LAUNCH_OPTS` are then referenced in\nthe [run.sh](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_cluster/bin/run.sh)\nscript as the initial arguments to the launcher\n(see [launch_ipykernel.py](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/python/scripts/launch_ipykernel.py)) ...\n\n```bash\neval exec \\\n     \"${SPARK_HOME}/bin/spark-submit\" \\\n     \"${SPARK_OPTS}\" \\\n     \"${PROG_HOME}/scripts/launch_ipykernel.py\" \\\n     \"${LAUNCH_OPTS}\" \\\n     \"$@\"\n```\n\n## Extending Enterprise Gateway\n\nTheoretically speaking, enabling a kernel for use in other frameworks amounts to the following:\n\n1. Build a kernel specification file that identifies the process proxy class to be used.\n1. Implement the process proxy class such that it supports the four primitive functions of\n   `poll()`, `wait()`, `send_signal(signum)` and `kill()` along with `launch_process()`.\n1. If the process proxy corresponds to a remote process, derive the process proxy class from\n   `RemoteProcessProxy` and implement `confirm_remote_startup()` and `handle_timeout()`.\n1. Insert invocation of a launcher (if necessary) which builds the connection file and\n   returns its contents on the `{response_address}` socket and following the encryption protocol set forth in the other launchers.\n\n```{seealso}\nThis topic is covered in the [Developers Guide](../developers/index.rst).\n```\n"
  },
  {
    "path": "docs/source/developers/custom-images.md",
    "content": "# Custom Kernel Images\n\nThis section presents information needed for how a custom kernel image could be built for your own uses with Enterprise Gateway. This is typically necessary if one desires to extend the existing image with additional supporting libraries or an image that encapsulates a different set of functionality altogether.\n\n## Extending Existing Kernel Images\n\nA common form of customization occurs when the existing kernel image is serving the fundamentals but the user wishes it be extended with additional libraries to prevent the need of their imports within the Notebook interactions. Since the image already meets the [basic requirements](#requirements-for-custom-kernel-images), this is really just a matter of referencing the existing image in the `FROM` statement and installing additional libraries. Because the EG kernel images do not run as the `root` user, you may need to switch users to perform the update.\n\n```dockerfile\nFROM elyra/kernel-py:VERSION\n\nUSER root  # switch to root user to perform installation (if necessary)\n\nRUN pip install my-libraries\n\nUSER $NB_UID  # switch back to the jovyan user\n```\n\n## Bringing Your Own Kernel Image\n\nUsers that do not wish to extend an existing kernel image must be cognizant of a couple of things.\n\n1. Requirements of a kernel-based image to be used by Enterprise Gateway.\n1. Is the base image one from [Jupyter Docker-stacks](https://github.com/jupyter/docker-stacks)?\n\n### Requirements for Custom Kernel Images\n\nCustom kernel images require some support files from the Enterprise Gateway repository. These are packaged into a tar file for each release starting in `2.5.0`. This tar file (named `jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz`) is composed of a few files - one bootstrap script and a kernel launcher (one per kernel type).\n\n#### Bootstrap-kernel.sh\n\nEnterprise Gateway provides a single [bootstrap-kernel.sh](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/bootstrap/bootstrap-kernel.sh) script that handles the three kernel languages supported out of the box - Python, R, and Scala. When a kernel image is started by Enterprise Gateway, parameters used within the bootstrap-kernel.sh script are conveyed via environment variables. The bootstrap script is then responsible for validating and converting those parameters to meaningful arguments to the appropriate launcher.\n\n#### Kernel Launcher\n\nThe kernel launcher, as discussed [here](kernel-launcher.md) does a number of things. In particular, it creates the connection ports and conveys that connection information back to Enterprise Gateway via the socket identified by the response address parameter. Although not a requirement for container-based usage, it is recommended that the launcher be written in the same language as the kernel. (This is more of a requirement when used in applications like Hadoop YARN.)\n\n### About Jupyter Docker-stacks Images\n\nMost of what is presented assumes the base image for your custom image is derived from the [Jupyter Docker-stacks](https://github.com/jupyter/docker-stacks) repository. As a result, it's good to cover what makes up those assumptions so you can build your own image independently of the docker-stacks repository.\n\nAll images produced from the docker-stacks repository come with a certain user configured. This user is named `jovyan` and is mapped to a user id (UID) of `1000` and a group id (GID) of `100` - named `users`.\n\nThe various startup scripts and commands typically reside in `/usr/local/bin` and we recommend trying to adhere to that policy.\n\nThe base jupyter image, upon which most all images from docker-stacks are built, also contains a `fix-permissions` script that is responsible for _gracefully_ adjusting permissions based on its given parameters. By only changing the necessary permissions, use of this script minimizes the size of the docker layer in which that command is invoked during the build of the docker image.\n\n### Sample Dockerfiles for Custom Kernel Images\n\nBelow we provide two working Dockerfiles that produce custom kernel images. One based on an existing image from Jupyter docker-stacks, the other from an independent base image.\n\n#### Custom Kernel Image Built on Jupyter Image\n\nHere's an example Dockerfile that installs the minimally necessary items for a Python-based kernel image built on the docker-stack image `jupyter/scipy-notebook`. Note: the string `VERSION` must be replaced with the appropriate value.\n\n```dockerfile\n# Choose a base image.  Preferrably one from https://github.com/jupyter/docker-stacks\nFROM jupyter/scipy-notebook:61d8aaedaeaf\n\n# Switch user to root since, if from docker-stacks, its probably jovyan\nUSER root\n\n# Install any packages required for the kernel-wrapper.  If the image\n# does not contain the target kernel (i.e., IPython, IRkernel, etc.,\n# it should be installed as well.\nRUN pip install pycrypto\n\n# Download and extract the enterprise gateway kernel launchers and bootstrap\n# files and deploy to /usr/local/bin. Change permissions to NB_UID:NB_GID.\nRUN wget https://github.com/jupyter-server/enterprise_gateway/releases/download/vVERSION/jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz &&\\\n        tar -xvf jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz -C /usr/local/bin &&\\\n        rm -f jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz &&\\\n        fix-permissions /usr/local/bin\n\n# Switch user back to jovyan and setup language and default CMD\nUSER $NB_UID\nENV KERNEL_LANGUAGE python\nCMD /usr/local/bin/bootstrap-kernel.sh\n```\n\n#### Independent Custom Kernel Image\n\nIf your base image is not from docker-stacks, it is recommended that you NOT run the image as USER `root` and create an _image user_ that is not UID 0. For this example, we will create the `jovyan` user with UID `1000` and a primary group of `users`, GID `100`. Note that Enterprise Gateway makes no assumption relative to the user in which the kernel image is running.\n\nAside from configuring the image user, all other aspects of customization are the same. In this case, we'll use the tensorflow-gpu image and convert it to be usable via Enterprise Gateway as a custom kernel image. Note that because this image didn't have `wget` we used `curl` to download the supporting kernel-image files.\n\n```dockerfile\nFROM tensorflow/tensorflow:2.5.0-gpu-jupyter\n\nUSER root\n\n# Install OS dependencies required for the kernel-wrapper. Missing\n# packages can be installed later only if container is running as\n# privileged user.\nRUN apt-get update && apt-get install -yq --no-install-recommends \\\n    build-essential \\\n    libsm6 \\\n    libxext-dev \\\n    libxrender1 \\\n    netcat \\\n    python3-dev \\\n    tzdata \\\n    unzip \\\n    && rm -rf /var/lib/apt/lists/*\n\n# Install any packages required for the kernel-wrapper.  If the image\n# does not contain the target kernel (i.e., IPython, IRkernel, etc.,\n# it should be installed as well.\nRUN pip install pycrypto\n\n# Download and extract the enterprise gateway kernel launchers and bootstrap\n# files and deploy to /usr/local/bin. Change permissions to NB_UID:NB_GID.\nRUN curl -L https://github.com/jupyter-server/enterprise_gateway/releases/download/vVERSION/jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz | \\\n    tar -xz -C /usr/local/bin\n\nRUN adduser --system --uid 1000 --gid 100 jovyan && \\\n    chown jovyan:users /usr/local/bin/bootstrap-kernel.sh && \\\n    chmod 0755 /usr/local/bin/bootstrap-kernel.sh && \\\n    chown -R jovyan:users /usr/local/bin/kernel-launchers\n\nENV NB_UID 1000\nENV NB_GID 100\nUSER jovyan\nENV KERNEL_LANGUAGE python\nCMD /usr/local/bin/bootstrap-kernel.sh\n```\n\n## Deploying Your Custom Kernel Image\n\nThe final step in deploying a customer kernel image is creating a corresponding kernel specifications directory that is available to Enterprise Gateway. Since Enterprise Gateway is also running in a container, its import that its kernel specifications directory either be mounted externally or a new Enterprise Gateway image is created with the appropriate directory in place. For the purposes of this discussion, we'll assume the kernel specifications directory, `/usr/local/share/jupyter/kernels`, is externally mounted.\n\n- Find a similar kernel specification directory from which to create your custom kernel specification. The most important aspect to this is matching the language of your kernel since it will use the same [kernel launcher](#kernel-launcher). Another important question is whether your custom kernel uses Spark, because those kernel specifications will vary significantly since many of the spark options reside in the `kernel.json`'s `env` stanza. Since our examples use _vanilla_ (non-Spark) python kernels we'll use the `python_kubernetes` kernel specification as our basis.\n\n```bash\ncd /usr/local/share/jupyter/kernels\ncp -r python_kubernetes python_myCustomKernel\n```\n\n- Edit the `kernel.json` file and change the `display_name:`, `image_name:` and path to `launch_kubernetes.py` script.\n\n```json\n{\n  \"language\": \"python\",\n  \"display_name\": \"My Custom Kernel\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy\",\n      \"config\": {\n        \"image_name\": \"myDockerHub/myCustomKernelImage:myTag\"\n      }\n    }\n  },\n  \"env\": {},\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/python_myCustomKernel/scripts/launch_kubernetes.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n```\n\n- If using kernel filtering (`EG_ALLOWED_KERNELS`), be sure to update it with the new kernel specification directory name (e.g., `python_myCustomKernel`) and restart/redeploy Enterprise Gateway.\n- Launch or refresh your Notebook session and confirm `My Custom Kernel` appears in the _new kernel_ drop-down.\n- Create a new notebook using `My Custom Kernel`.\n"
  },
  {
    "path": "docs/source/developers/dev-process-proxy.md",
    "content": "# Implementing a process proxy\n\nA process proxy implementation is necessary if you want to interact with a resource manager that is not currently supported or extend some existing behaviors. For example, recently, we've had [contributions](https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/services/processproxies/crd.py#L18) that interact with [Kubernetes Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions), which is an example of _extending_ the `KubernetesProcessProxy` to accomplish a slightly different task.\n\nExamples of resource managers in which there's been some interest include [Slurm Workload Manager](https://slurm.schedmd.com/documentation.html) and [Apache Mesos](https://mesos.apache.org/), for example. In the end, it's really a matter of having access to an API and the ability to apply \"tags\" or \"labels\" in order to _discover_ where the kernel is running within the managed cluster. Once you have that information, then it becomes of matter of implementing the appropriate methods to control the kernel's lifecycle.\n\n```{admonition} Important!\n:class: error\n\nBefore continuing, it is important to consider timeframes here.  You may instead want to implement a [_Kernel Provisioner_](https://jupyter-client.readthedocs.io/en/latest/provisioning.html) rather an a Process Proxy since _provisioners_ are available to the general framework!\n\nThe [Enterprise Gateway 4.0 release is slated to adopt Kernel Provisioners](../contributors/roadmap.md) but must remain on a down-level `jupyter_client` release (< 7.x) until that time as Enterprise Gateway (and process proxies) are currently incompatible.\n\nThat said, if you and your organization plan to stay on Enterprise Gateway 2.x or 3.x for the next couple years, then implementing a process proxy may be in your best interest.  Fortunately, the two constructs are nearly identical since Kernel Provisioners are essentially Process Proxies _properly_ integrated into the Jupyter framework thereby eliminating the need for various `KernelManager` hooks.\n```\n\n## General approach\n\nPlease refer to the [Process Proxy section](../contributors/system-architecture.md#process-proxy) in the System Architecture pages for descriptions and structure of existing process proxies. Here is the general guideline for the process of implementing a process proxy.\n\n1. Identify and understand how to _decorate_ your \"job\" within the resource manager. In Hadoop YARN, this is done by using the kernel's ID as the _application name_ by setting the [`--name` parameter to `${KERNEL_ID}`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_cluster/kernel.json). In Kubernetes, we apply the kernel's ID to the [`kernel-id` label on the POD](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2).\n1. Today, all invocations of kernels into resource managers use a shell or python script mechanism configured into the `argv` stanza of the kernelspec. If you take this approach, you need to apply the necessary changes to integrate with your resource manager.\n1. Determine how to interact with the resource manager's API to _discover_ the kernel and determine on which host it's running. This interaction should occur immediately following Enterprise Gateway's receipt of the kernel's connection information in its response from the kernel launcher. This extra step, performed within `confirm_remote_startup()`, is necessary to get the appropriate host name as reflected in the resource manager's API.\n1. Determine how to monitor the \"job\" using the resource manager API. This will become part of the `poll()` implementation to determine if the kernel is still running. This should be as quick as possible since it occurs every 3 seconds. If this is an expensive call, you may need to make some adjustments like skip the call every so often.\n1. Determine how to terminate \"jobs\" using the resource manager API. This will become part of the termination sequence, but probably only necessary if the message-based shutdown does not work (i.e., a last resort).\n\n```{tip}\nBecause kernel IDs are globally unique, they serve as ideal identifiers for discovering where in the cluster the kernel is running.\n```\n\nYou will likely need to provide implementations for `launch_process()`, `poll()`, `wait()`, `send_signal()`, and `kill()`, although, depending on where your process proxy resides in the class hierarchy, some implementations may be reused.\n\nFor example, if your process proxy is going to service remote kernels, you should consider deriving your implementation from the [`RemoteProcessProxy` class](https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/services/processproxies/processproxy.py#L1070). If this is the case, then you'll need to implement `confirm_remote_startup()`.\n\nLikewise, if your process proxy is based on containers, you should consider deriving your implementation from the [`ContainerProcessProxy`](https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/services/processproxies/container.py#L39). If this is the case, then you'll need to implement `get_container_status()` and `terminate_container_resources()` rather than `confirm_remote_startup()`, etc.\n\nOnce the process proxy has been implemented, construct an appropriate kernel specification that references your process proxy and iterate until you are satisfied with how your remote kernels behave.\n"
  },
  {
    "path": "docs/source/developers/index.rst",
    "content": "Developers Guide\n================\n\nThese pages target *developers* writing applications against the REST API, authoring process proxies for other resource managers, or integrating applications with remote kernel functionality.\n\n.. admonition:: Use cases\n\n    - *As a developer, I want to explore supporting a different resource manager with Enterprise Gateway, by implementing a new `ProcessProxy` class such that I can easily take advantage of specific functionality provided by the resource manager.*\n    - *As a developer, I want to extend the `nbclient` application to use a `KernelManager` that can leverage remote kernels spawned from Enterprise Gateway.*\n    - *As a developer, I want to easily integrate the ability to launch remote kernels with existing platforms, so I can leverage my compute cluster in a customizable way.*\n    - *As a developer, I am currently using Golang and need to implement a kernel launcher to allow the Go kernel I use to run remotely in my Kubernetes cluster.*\n    - *As a developer, I'd like to extend some of the kernel container images and, eventually, create my own to better enable the data scientists I support.*\n    - *As a developer, I need want to author my own Kernel-as-a-Service application.*\n\n.. toctree::\n   :maxdepth: 1\n   :name: developers\n\n   dev-process-proxy\n   kernel-launcher\n   kernel-specification\n   custom-images\n   kernel-library\n   kernel-manager\n   rest-api\n"
  },
  {
    "path": "docs/source/developers/kernel-launcher.md",
    "content": "# Implementing a kernel launcher\n\nA new implementation for a [_kernel launcher_](../contributors/system-architecture.md#kernel-launchers) becomes necessary when you want to introduce another kind of kernel to an existing configuration. Out of the box, Enterprise Gateway provides [kernel launchers](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/kernel-launchers) that support the IPython kernel, the Apache Toree scala kernel, and the R kernel - IRKernel. There are other \"language-agnostic kernel launchers\" provided by Enterprise Gateway, but those are used in container environments to start the container or pod where the \"kernel image\" uses on the three _language-based_ launchers to start the kernel within the container.\n\nIts generally recommended that the launcher be written in the language of the kernel, but that is not a requirement so long as the launcher can start and manage the kernel's lifecycle and issue interrupts (if the kernel does not support message-based interrupts itself).\n\nTo reiterate, the four tasks of a kernel launcher are:\n\n1. Create the necessary connection information based on the 5 zero-mq ports, a signature key and algorithm specifier, along with a _gateway listener_ socket.\n1. Conveyance of the connection (and listener socket) information back to the Enterprise Gateway process after encrypting the information using AES, then encrypting the AES key using the provided public key.\n1. Invocation of the target kernel.\n1. Listen for interrupt and shutdown requests from Enterprise Gateway on the communication socket and carry out the action when appropriate.\n\n## Creating the connection information\n\nIf your target kernel exists, then there is probably support for creating ZeroMQ ports. If this proves difficult, you may be able to take a _hybrid approach_ where the connection information, encryption and listener portion of things is implemented in Python, while invocation takes place in the native language. This is how the [R kernel-launcher](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/kernel-launchers/R/scripts) support is implemented.\n\nWhen creating the connection information, your kernel launcher should handle the possibility that the `--port-range` option has been specified such that each port should reside within the specified range.\n\nThe port used between Enterprise Gateway and the launcher, known as the _communication port_ should also adhere to the port range. It is not required that this port be ZeroMQ (and is not a ZMQ port in existing implementations).\n\n## Encrypting the connection information\n\nThe next task of the kernel launcher is sending the connection information back to the Enterprise Gateway server. Prior to doing this, the connection information, including the communication port, are encrypted using AES encryption and a 16-byte key. The AES key is then encrypted using the public key specified in the `public_key` parameter. These two fields (the AES-encrypted payload and the publice-key-encrypted AES key) are then included into a JSON structure that also include the launcher's version information and base64 encoded. Here's such an example from the [Python kernel launcher](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/python/scripts/launch_ipykernel.py#L207).\n\nThe payload is then [sent back on a socket](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/python/scripts/launch_ipykernel.py#L235) identified by the `--response-address` option.\n\n## Invoking the target kernel\n\nFor the R kernel launcher, the kernel is started using [`IRKernel::main()`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/R/scripts/launch_IRkernel.R#L256) after the `SparkContext` is initialized based on the `spark-context-initialization-mode` parameter.\n\nThe scala kernel launcher works similarly in that the Apache Toree kernel provides an [\"entrypoint\" to start the kernel](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/scala/toree-launcher/src/main/scala/launcher/ToreeLauncher.scala#L315), however, because the Toree kernel initializes a `SparkContext` itself, the need to do so is conveyed directly to the kernel.\n\nFor the Python kernel launcher, it creates a namespace instance that contains the `SparkContext` information, if requested to do so via the `spark-context-initialization-mode` parameter, instantiates an `IPKernelApp` instance using the configured namespace, then calls the [`start()`](https://github.com/ipython/ipykernel/blob/6f448d280dadbff7245f4b28b5e210c899d79342/ipykernel/kernelapp.py#L694) method.\n\n### Invoking subclasses of `ipykernel.kernelbase.Kernel`\n\nBecause the python kernel launcher uses `IPKernelApp`, support for any subclass of `ipykernel.kernelbase.Kernel` can be launched by EG's Python kernel launcher.\n\nTo specify an alternate subclass, add `--kernel-class-name` (along with the specified dotted class string) to the `kernel.json` file's `argv` stanza. EG's Python launcher will import that class and pass it as a parameter to `IPKernelApp.initialize()`.\n\nHere's an example `kernel.json` file that launches the \"echo\" kernel using the `DistributedProcessProxy`:\n\n```JSON\n{\n  \"display_name\": \"Echo\",\n  \"language\": \"text\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy\"\n    }\n  },\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/echo/scripts/launch_ipykernel.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.spark-context-initialization-mode\",\n    \"none\",\n    \"--kernel-class-name\",\n    \"echo_kernel.kernel.EchoKernel\"\n  ]\n}\n```\n\n```{admonition} Important!\nThe referenced `kernel-class-name` package must first be properly installed on all nodes where the associated process-proxy will run.\n```\n\n## Listening for interrupt and shutdown requests\n\nThe last task that must be performed by a kernel launcher is to listen on the communication port for work. There are currently two requests sent on the port, a signal event and a shutdown request.\n\nThe signal event is of the form `{\"signum\": n}` where the string `'signum'` indicates a signal event and `'n'` is an integer specifying the signal number to send to the kernel. Typically, the value of 'n' is `2` representing `SIGINT` and used to interrupt any current processing. As more kernels adopt a message-based interrupt approach, this will not be as common. Enterprise Gateway also uses this event to perform its `poll()` implementation by sending `{\"signum\": 0}`. Raising a signal of 0 to a process is common way to determine the process is still alive.\n\nThe event is a shutdown request. This is sent when the process proxy has typically terminated the kernel and it's just performing its final cleanup. The form of this request is `{\"shutdown\": 1}`. This is what instructs the launcher to abandon listening on the communication socket and to exit.\n\n## Other parameters\n\nBesides `--port-range`, `--public-key`, and `--response-address`, the kernel launcher needs to support `--kernel-id` that indicates the kernel's ID as known to the Gateway server. It should also tolerate the existence of `--spark-context-initialization-mode` but, unless applicable for Spark enviornments, should only support values of `\"none\"` for this option.\n"
  },
  {
    "path": "docs/source/developers/kernel-library.md",
    "content": "# Standalone Remote Kernel Execution\n\nRemote kernels can be executed by using the `RemoteKernelManager` class directly. This enables running kernels using `ProcessProxy`s without requiring deployment of the Enterprise Gateway web application. This approach is also known as _Library Mode_.\n\nThis can be useful in niche situations, for example, using [nbconvert](https://nbconvert.readthedocs.io/) or [nbclient](https://nbclient.readthedocs.io/) to execute a kernel on a remote cluster.\n\nSample code using nbclient 0.2.0:\n\n```python\nimport nbformat\nfrom nbclient import NotebookClient\nfrom enterprise_gateway.services.kernels.remotemanager import RemoteKernelManager\n\nwith open(\"my_notebook.ipynb\") as fp:\n    test_notebook = nbformat.read(fp, as_version=4)\n\nclient = NotebookClient(nb=test_notebook, kernel_manager_class=RemoteKernelManager)\nclient.execute(kernel_name='my_remote_kernel')\n```\n\nThe above code will execute the notebook on a kernel named `my_remote_kernel` using its configured `ProcessProxy`.\n\nDepending on the process proxy, the _hosting application_ (e.g., `nbclient`) will likely need to be configured to run on the same network as the remote kernel. So, for example, with Kubernetes, `nbclient` would need to be configured as a Kubernetes POD.\n"
  },
  {
    "path": "docs/source/developers/kernel-manager.md",
    "content": "# Using Jupyter Server's `GatewayKernelManager`\n\nAnother way to expose other Jupyter applications like `nbclient` or `papermill` to remote kernels is to use the [`GatewayKernelManager`](https://github.com/jupyter-server/jupyter_server/blob/745f5ba3f00280c1e1900326a7e08463d48a3912/jupyter_server/gateway/managers.py#L317) (and, implicitly, [`GatewayKernelClient`](https://github.com/jupyter-server/jupyter_server/blob/745f5ba3f00280c1e1900326a7e08463d48a3912/jupyter_server/gateway/managers.py#L562)) classes that are embedded in Jupyter Server.\n\nThese classes essentially emulate the lower level [`KernelManager`](https://github.com/jupyter/jupyter_client/blob/10decd25308c306b6005cbf271b96493824a83e8/jupyter_client/manager.py#L84) and [`KernelClient`](https://github.com/jupyter/jupyter_client/blob/10decd25308c306b6005cbf271b96493824a83e8/jupyter_client/client.py#L75) classes but _forward_ their requests to/from a configured gateway server. Their necessary configuration for interacting with the gateway server is set on the [`GatewayClient` configurable](../users/client-config.md#gateway-client-configuration).\n\nThis allows for the _hosting application_ to remain **outside** the resource-managed cluster since the kernel is actually being managed by the target gateway server.\n\nSo, using the previous example, one my have...\n\n```python\nimport nbformat\nfrom nbclient import NotebookClient\nfrom jupyter_server.gateway.gateway_client import GatewayClient\nfrom jupyter_server.gateway.managers import GatewayKernelManager\n\nwith open(\"my_notebook.ipynb\") as fp:\n    test_notebook = nbformat.read(fp, as_version=4)\n\n# Set any other gateway-specific parameters on the GatewayClient (singleton) instance\ngw_client = GatewayClient.instance()\ngw_client.url = \"http://my-gateway-server.com:8888\"\n\nclient = NotebookClient(nb=test_notebook, kernel_manager_class=GatewayKernelManager)\nclient.execute(kernel_name='my_remote_kernel')\n```\n\nIn this case, `my_remote_kernel`'s kernel specification file actually resides on the Gateway server. `NotebookClient` will _think_ its talking to local `KernelManager` and `KernelClient` instances, when, in actuality, they are forwarding requests to (and getting response from) the Gateway server at `http://my-gateway-server.com:8888`.\n"
  },
  {
    "path": "docs/source/developers/kernel-specification.md",
    "content": "# Implementing a kernel specification\n\nIf you find yourself [implementing a kernel launcher](kernel-launcher.md), you'll need a way to make that kernel and kernel launcher available to applications. This is accomplished via the _kernel specification_ or _kernelspec_.\n\nKernelspecs reside in well-known directories. For Enterprise Gateway, we generally recommend they reside in `/usr/local/share/jupyter/kernels` where each entry in this directory is a directory representing the name of the kernel. The kernel specification is represented by the file `kernel.json`, the contents of which essentially indicate what environment variables should be present in the kernel process (via the `env` _stanza_) and which command (and arguments) should be issued to start the kernel process (via the `argv` _stanza_). The JSON also includes a `metadata` stanza that contains the process_proxy configuration, along with which process proxy class to instantiate to help manage the kernel process's lifecycle.\n\nOne approach the sample Enterprise Gateway kernel specifications take is to include a shell script that actually issues the `spark-submit` request. It is this shell script (typically named `run.sh`) that is referenced in the `argv` stanza.\n\nHere's an example from the [`spark_python_yarn_cluster`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_cluster/kernel.json) kernel specification:\n\n```JSON\n{\n  \"language\": \"python\",\n  \"display_name\": \"Spark - Python (YARN Cluster Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy\"\n    },\n    \"debugger\": true\n  },\n  \"env\": {\n    \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n    \"PYSPARK_PYTHON\": \"/opt/conda/bin/python\",\n    \"PYTHONPATH\": \"${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip\",\n    \"SPARK_OPTS\": \"--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.appMasterEnv.PYTHONUSERBASE=/home/${KERNEL_USERNAME}/.local --conf spark.yarn.appMasterEnv.PYTHONPATH=${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip --conf spark.yarn.appMasterEnv.PATH=/opt/conda/bin:$PATH ${KERNEL_EXTRA_SPARK_OPTS}\",\n    \"LAUNCH_OPTS\": \"\"\n  },\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/spark_python_yarn_cluster/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.spark-context-initialization-mode\",\n    \"lazy\"\n  ]\n}\n```\n\nwhere [`run.sh`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_cluster/bin/run.sh) issues `spark-submit` specifying the kernel launcher as the \"application\":\n\n```bash\neval exec \\\n     \"${SPARK_HOME}/bin/spark-submit\" \\\n     \"${SPARK_OPTS}\" \\\n     \"${IMPERSONATION_OPTS}\" \\\n     \"${PROG_HOME}/scripts/launch_ipykernel.py\" \\\n     \"${LAUNCH_OPTS}\" \\\n     \"$@\"\n```\n\nFor container-based environments, the `argv` may instead reference a script that is meant to create the container pod (for Kubernetes). For these, we use a [template file](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2) that operators can adjust to meet the needs of their environment. Here's how that `kernel.json` looks:\n\n```json\n{\n  \"language\": \"python\",\n  \"display_name\": \"Python on Kubernetes\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-py:VERSION\"\n      }\n    },\n    \"debugger\": true\n  },\n  \"env\": {},\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/python_kubernetes/scripts/launch_kubernetes.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n```\n\nWhen using the `launch_ipykernel` launcher (aka the Python kernel launcher), subclasses of `ipykernel.kernelbase.Kernel` can be launched. By default, this launcher uses the classname `\"ipykernel.ipkernel.IPythonKernel\"`, but other subclasses of `ipykernel.kernelbase.Kernel` can be specified by adding a `--kernel-class-name` parameter to the `argv` stanza. See [Invoking subclasses of `ipykernel.kernelbase.Kernel`](kernel-launcher.md#invoking-subclasses-of-ipykernelkernelbasekernel) for more information.\n\nAs should be evident, kernel specifications are highly tuned to the runtime environment so your needs may be different, but _should_ resemble the approaches we've taken so far.\n"
  },
  {
    "path": "docs/source/developers/rest-api.rst",
    "content": "Using the REST API\n===============================\n\nThe REST API is used to author new applications that need to interact with\nEnterprise Gateway.  Generally speaking, only the ``/api/kernels`` and\n``/api/kernelspecs`` endpoints are used.  The ``/api/sessions`` endpoint *can*\nbe used to manage a kernel's lifecycle, but it is not necessary.  For example,\nwhile the Jupyter Notebook and JupyterLab applications start kernels using\n``/api/sessions``, the only interaction they perform with Enterprise Gateway is\nvia the ``/api/kernelspecs`` to retrieve a list of available kernel\nspecifications, and ``/api/kernels`` to start, stop, interrupt and restart a\nkernel.  The \"session\" remains on the client.\n\nGeneral sequence\n----------------\nHere's the general sequence of events to implement a REST-based application to *discover*, *start*, *execute code*, *interrupt*, and *shutdown* a kernel.  To demonstrate each call, we'll use `curl` against a running Enterprise Gateway server at ``http://my-gateway-server.com:8888``.\n\nKernel discovery\n~~~~~~~~~~~~~~~~\nIssue a `GET` request against the ``/api/kernelspecs`` endpoint to discover\navailable kernel specifications. Each entry corresponds to a ``kernel.json``\nfile located in a directory that corresponds to the kernel's name.  This *name*\nis what will be used in the subsequent start request.\n\nThe response is a JSON object where the ``default`` is a string specifying the\nname of the default kernel.  This kernel specification will be used if the\nstart request (e.g., ``POST /api/kernels``) does not specify a kernel name in\nits JSON body.\n\nThe other key in the response is `kernelspecs` and consists of a JSON indexed\nby kernel name with a value corresponding to the corresponding ``kernel.json``\nin addition to any *resources* associated with the kernel.  These are typically\nthe icon filenames to be used by the front-end application.\n\n.. code-block:: console\n\n    curl http://my-gateway-server.com:8888/api/kernelspecs\n\n.. raw:: html\n\n   <details>\n   <summary><a><span style=\"font-family:'Courier New'\">GET /api/kernelspecs</span> response</a></summary>\n\n.. code-block:: json\n\n    {\n      \"default\": \"python3\",\n      \"kernelspecs\": {\n        \"python3\": {\n          \"name\": \"python3\",\n          \"spec\": {\n            \"argv\": [\n              \"/usr/bin/env\",\n              \"/opt/anaconda2/envs/py3/bin/python\",\n              \"-m\",\n              \"ipykernel_launcher\",\n              \"-f\",\n              \"{connection_file}\"\n            ],\n            \"display_name\": \"Python 3\",\n            \"language\": \"python\",\n            \"interrupt_mode\": \"signal\",\n            \"metadata\": {}\n          },\n          \"resources\": {\n            \"logo-32x32\": \"/kernelspecs/python3/logo-32x32.png\",\n            \"logo-64x64\": \"/kernelspecs/python3/logo-64x64.png\"\n          }\n        },\n        \"ir\": {\n          \"name\": \"ir\",\n          \"spec\": {\n            \"argv\": [\n              \"R\",\n              \"--slave\",\n              \"-e\",\n              \"IRkernel::main()\",\n              \"--args\",\n              \"{connection_file}\"\n            ],\n            \"env\": {},\n            \"display_name\": \"R\",\n            \"language\": \"R\",\n            \"interrupt_mode\": \"signal\",\n            \"metadata\": {}\n          },\n          \"resources\": {\n            \"kernel.js\": \"/kernelspecs/ir/kernel.js\",\n            \"logo-64x64\": \"/kernelspecs/ir/logo-64x64.png\"\n          }\n        },\n        \"spark_r_yarn_client\": {\n          \"name\": \"spark_r_yarn_client\",\n          \"spec\": {\n            \"argv\": [\n              \"/usr/local/share/jupyter/kernels/spark_R_yarn_client/bin/run.sh\",\n              \"--RemoteProcessProxy.kernel-id\",\n              \"{kernel_id}\",\n              \"--RemoteProcessProxy.response-address\",\n              \"{response_address}\",\n              \"--RemoteProcessProxy.public-key\",\n              \"{public_key}\",\n              \"--RemoteProcessProxy.port-range\",\n              \"{port_range}\",\n              \"--RemoteProcessProxy.spark-context-initialization-mode\",\n              \"lazy\"\n            ],\n            \"env\": {\n              \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n              \"SPARK_OPTS\": \"--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.sparkr.r.command=/opt/conda/lib/R/bin/Rscript ${KERNEL_EXTRA_SPARK_OPTS}\",\n              \"LAUNCH_OPTS\": \"\"\n            },\n            \"display_name\": \"Spark - R (YARN Client Mode)\",\n            \"language\": \"R\",\n            \"interrupt_mode\": \"signal\",\n            \"metadata\": {\n              \"process_proxy\": {\n                \"class_name\": \"enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy\"\n              }\n            }\n          },\n          \"resources\": {\n            \"kernel.js\": \"/kernelspecs/spark_r_yarn_client/kernel.js\",\n            \"logo-64x64\": \"/kernelspecs/spark_r_yarn_client/logo-64x64.png\"\n          }\n        },\n        \"spark_r_yarn_cluster\": {\n          \"name\": \"spark_r_yarn_cluster\",\n          \"spec\": {\n            \"argv\": [\n              \"/usr/local/share/jupyter/kernels/spark_R_yarn_cluster/bin/run.sh\",\n              \"--RemoteProcessProxy.kernel-id\",\n              \"{kernel_id}\",\n              \"--RemoteProcessProxy.response-address\",\n              \"{response_address}\",\n              \"--RemoteProcessProxy.public-key\",\n              \"{public_key}\",\n              \"--RemoteProcessProxy.port-range\",\n              \"{port_range}\",\n              \"--RemoteProcessProxy.spark-context-initialization-mode\",\n              \"eager\"\n            ],\n            \"env\": {\n              \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n              \"SPARK_OPTS\": \"--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.am.waitTime=1d --conf spark.yarn.appMasterEnv.PATH=/opt/conda/bin:$PATH --conf spark.sparkr.r.command=/opt/conda/lib/R/bin/Rscript ${KERNEL_EXTRA_SPARK_OPTS}\",\n              \"LAUNCH_OPTS\": \"\"\n            },\n            \"display_name\": \"Spark - R (YARN Cluster Mode)\",\n            \"language\": \"R\",\n            \"interrupt_mode\": \"signal\",\n            \"metadata\": {\n              \"process_proxy\": {\n                \"class_name\": \"enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy\"\n              }\n            }\n          },\n          \"resources\": {\n            \"kernel.js\": \"/kernelspecs/spark_r_yarn_cluster/kernel.js\",\n            \"logo-64x64\": \"/kernelspecs/spark_r_yarn_cluster/logo-64x64.png\"\n          }\n        },\n        \"spark_python_yarn_client\": {\n          \"name\": \"spark_python_yarn_client\",\n          \"spec\": {\n            \"argv\": [\n              \"/usr/local/share/jupyter/kernels/spark_python_yarn_client/bin/run.sh\",\n              \"--RemoteProcessProxy.kernel-id\",\n              \"{kernel_id}\",\n              \"--RemoteProcessProxy.response-address\",\n              \"{response_address}\",\n              \"--RemoteProcessProxy.public-key\",\n              \"{public_key}\",\n              \"--RemoteProcessProxy.port-range\",\n              \"{port_range}\",\n              \"--RemoteProcessProxy.spark-context-initialization-mode\",\n              \"lazy\"\n            ],\n            \"env\": {\n              \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n              \"PYSPARK_PYTHON\": \"/opt/conda/bin/python\",\n              \"PYTHONPATH\": \"${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip\",\n              \"SPARK_OPTS\": \"--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} ${KERNEL_EXTRA_SPARK_OPTS}\",\n              \"LAUNCH_OPTS\": \"\"\n            },\n            \"display_name\": \"Spark - Python (YARN Client Mode)\",\n            \"language\": \"python\",\n            \"interrupt_mode\": \"signal\",\n            \"metadata\": {\n              \"process_proxy\": {\n                \"class_name\": \"enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy\"\n              },\n              \"debugger\": true\n            }\n          },\n          \"resources\": {\n            \"logo-64x64\": \"/kernelspecs/spark_python_yarn_client/logo-64x64.png\"\n          }\n        },\n        \"spark_python_yarn_cluster\": {\n          \"name\": \"spark_python_yarn_cluster\",\n          \"spec\": {\n            \"argv\": [\n              \"/usr/local/share/jupyter/kernels/spark_python_yarn_cluster/bin/run.sh\",\n              \"--RemoteProcessProxy.kernel-id\",\n              \"{kernel_id}\",\n              \"--RemoteProcessProxy.response-address\",\n              \"{response_address}\",\n              \"--RemoteProcessProxy.public-key\",\n              \"{public_key}\",\n              \"--RemoteProcessProxy.port-range\",\n              \"{port_range}\",\n              \"--RemoteProcessProxy.spark-context-initialization-mode\",\n              \"lazy\"\n            ],\n            \"env\": {\n              \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n              \"PYSPARK_PYTHON\": \"/opt/conda/bin/python\",\n              \"PYTHONPATH\": \"${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip\",\n              \"SPARK_OPTS\": \"--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.appMasterEnv.PYTHONUSERBASE=/home/${KERNEL_USERNAME}/.local --conf spark.yarn.appMasterEnv.PYTHONPATH=${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip --conf spark.yarn.appMasterEnv.PATH=/opt/conda/bin:$PATH ${KERNEL_EXTRA_SPARK_OPTS}\",\n              \"LAUNCH_OPTS\": \"\"\n            },\n            \"display_name\": \"Spark - Python (YARN Cluster Mode)\",\n            \"language\": \"python\",\n            \"interrupt_mode\": \"signal\",\n            \"metadata\": {\n              \"process_proxy\": {\n                \"class_name\": \"enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy\"\n              },\n              \"debugger\": true\n            }\n          },\n          \"resources\": {\n            \"logo-64x64\": \"/kernelspecs/spark_python_yarn_cluster/logo-64x64.png\"\n          }\n        },\n        \"spark_scala_yarn_client\": {\n          \"name\": \"spark_scala_yarn_client\",\n          \"spec\": {\n            \"argv\": [\n              \"/usr/local/share/jupyter/kernels/spark_scala_yarn_client/bin/run.sh\",\n              \"--RemoteProcessProxy.kernel-id\",\n              \"{kernel_id}\",\n              \"--RemoteProcessProxy.response-address\",\n              \"{response_address}\",\n              \"--RemoteProcessProxy.public-key\",\n              \"{public_key}\",\n              \"--RemoteProcessProxy.port-range\",\n              \"{port_range}\",\n              \"--RemoteProcessProxy.spark-context-initialization-mode\",\n              \"lazy\"\n            ],\n            \"env\": {\n              \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n              \"__TOREE_SPARK_OPTS__\": \"--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} ${KERNEL_EXTRA_SPARK_OPTS}\",\n              \"__TOREE_OPTS__\": \"--alternate-sigint USR2\",\n              \"LAUNCH_OPTS\": \"\",\n              \"DEFAULT_INTERPRETER\": \"Scala\"\n            },\n            \"display_name\": \"Spark - Scala (YARN Client Mode)\",\n            \"language\": \"scala\",\n            \"interrupt_mode\": \"signal\",\n            \"metadata\": {\n              \"process_proxy\": {\n                \"class_name\": \"enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy\"\n              }\n            }\n          },\n          \"resources\": {\n            \"logo-64x64\": \"/kernelspecs/spark_scala_yarn_client/logo-64x64.png\"\n          }\n        },\n        \"spark_scala_yarn_cluster\": {\n          \"name\": \"spark_scala_yarn_cluster\",\n          \"spec\": {\n            \"argv\": [\n              \"/usr/local/share/jupyter/kernels/spark_scala_yarn_cluster/bin/run.sh\",\n              \"--RemoteProcessProxy.kernel-id\",\n              \"{kernel_id}\",\n              \"--RemoteProcessProxy.response-address\",\n              \"{response_address}\",\n              \"--RemoteProcessProxy.public-key\",\n              \"{public_key}\",\n              \"--RemoteProcessProxy.port-range\",\n              \"{port_range}\",\n              \"--RemoteProcessProxy.spark-context-initialization-mode\",\n              \"lazy\"\n            ],\n            \"env\": {\n              \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n              \"__TOREE_SPARK_OPTS__\": \"--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.am.waitTime=1d ${KERNEL_EXTRA_SPARK_OPTS}\",\n              \"__TOREE_OPTS__\": \"--alternate-sigint USR2\",\n              \"LAUNCH_OPTS\": \"\",\n              \"DEFAULT_INTERPRETER\": \"Scala\"\n            },\n            \"display_name\": \"Spark - Scala (YARN Cluster Mode)\",\n            \"language\": \"scala\",\n            \"interrupt_mode\": \"signal\",\n            \"metadata\": {\n              \"process_proxy\": {\n                \"class_name\": \"enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy\"\n              }\n            }\n          },\n          \"resources\": {\n            \"logo-64x64\": \"/kernelspecs/spark_scala_yarn_cluster/logo-64x64.png\"\n          }\n        }\n      }\n    }\n\n.. raw:: html\n\n   </details>\n\nKernel start\n~~~~~~~~~~~~~~~~\nA kernel is started by issuing a ``POST`` request against the ``/api/kernels``\nendpoint.  The JSON body can take a ``name``, indicating the kernel to start,\nand an ``env`` JSON, corresponding to environment variables to set in the\nkernel's environment.\n\nIn this example, we will start the ``spark_python_yarn_cluster`` kernel with a ``KERNEL_USERNAME`` environment variable of ``jovyan``.\n\n.. code-block:: console\n\n    curl -X POST -i 'http://my-gateway-server.com:8888/api/kernels' --data '{ \"name\": \"spark_python_yarn_cluster\", \"env\": { \"KERNEL_USERNAME\": \"jovyan\" }}'\n\n.. raw:: html\n\n   <details>\n   <summary><a><span style=\"font-family:'Courier New'\">POST /api/kernels</span> response</a></summary>\n\n.. code-block:: json\n\n    {\n      \"id\": \"f88bdc84-04c6-4021-963d-6811a61eca18\",\n      \"name\": \"spark_python_yarn_cluster\",\n      \"last_activity\": \"2022-02-12T00:40:45.080107Z\",\n      \"execution_state\": \"starting\",\n      \"connections\": 0\n    }\n\n.. raw:: html\n\n   </details>\n\nKernel code execution\n~~~~~~~~~~~~~~~~~~~~~\nUpgrading the connection to a websocket and issuing code against that websocket is currently beyond the knowledge of our maintainers.  For this aspect of this discussion we will refer you to our Python `GatewayClient class <https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/client/gateway_client.py#L20>`_ that we use in our integration tests.\n\n.. note::\n\n   The name ``GatewayClient`` in our ``enterprise_gateway/client`` subdirectory is not to be confused with the ``GatewayClient`` class defined in the client applications in Jupyter Server and Notebook.  In addition, the internal test class ``KernelClient`` is not to be confused with the ``KernelClient`` that lives in the ``jupyter_client`` package.\n\nKernel interrupt\n~~~~~~~~~~~~~~~~\nA kernel is interrupted by issuing a ``POST`` request against the ``/api/kernels/<kernel_id>/interrupt`` endpoint.\n\nIn this example, we will interrupt the ``spark_python_yarn_cluster`` kernel with ID ``f88bdc84-04c6-4021-963d-6811a61eca18`` that was started previously.\n\n.. note::\n\n   Restarting a kernel is nearly identical to interrupting a kernel; just replace ``interrupt`` in the endpoint with ``restart``.\n\n.. code-block:: console\n\n    curl -X POST -i 'http://ymy-gateway-server.com:8888/api/kernels/f88bdc84-04c6-4021-963d-6811a61eca18/interrupt'\n\nAn expected response of ``Status Code`` equal ``204`` (No Content) is returned.  (The expected response for ``restart`` is ``200`` (OK).)\n\n\nKernel shutdown\n~~~~~~~~~~~~~~~~\nA kernel is shutdown by issuing a ``DELETE`` request against the ``/api/kernels/<kernel_id>`` endpoint.\n\nIn this example, we will shutdown the ``spark_python_yarn_cluster`` kernel with ID ``f88bdc84-04c6-4021-963d-6811a61eca18`` that was started previously.\n\n.. code-block:: console\n\n    curl -X DELETE -i 'http://my-gateway-server.com:8888/api/kernels/f88bdc84-04c6-4021-963d-6811a61eca18'\n\nAn expected response of ``Status Code`` equal ``204`` (No Content) is returned.\n\nOpenAPI Specification\n~~~~~~~~~~~~~~~~~~~~~\nHere's the current `OpenAPI <https://www.openapis.org/>`_ specification available from Enterprise Gateway.  An interactive version is available `here <https://petstore.swagger.io/?url=https://raw.githubusercontent.com/jupyter-server/enterprise_gateway/main/enterprise_gateway/services/api/swagger.yaml>`_.\n\n.. openapi:: ../../../enterprise_gateway/services/api/swagger.yaml\n"
  },
  {
    "path": "docs/source/index.rst",
    "content": "Welcome to Jupyter Enterprise Gateway!\n======================================\nJupyter Enterprise Gateway is a headless web server with a pluggable framework\nfor anyone supporting multiple notebook users in a managed-cluster environment.\nSome of the core functionality it provides is better optimization of compute\nresources, improved multi-user support, and more granular security for your\nJupyter notebook environment - making it suitable for enterprise, scientific,\nand academic implementations.\n\nFrom a technical perspective, Jupyter Enterprise Gateway is a web server that enables the ability to\nlaunch kernels on behalf of remote notebooks. This leads to better resource\nmanagement, as the web server is no longer the single location for kernel activity.  It essentially exposes a *Kernel as a Service* model.\n\nBy default, the Jupyter framework runs kernels locally - potentially exhausting the server of resources. By leveraging the functionality of the\nunderlying resource management applications like Hadoop YARN, Kubernetes, and others, Jupyter Enterprise Gateway\ndistributes kernels across the compute cluster, dramatically increasing the number of simultaneously active kernels while leveraging the available compute resources.\n\n.. figure:: images/Scalability-After-JEG.gif\n   :align: center\n\nKernel Gateway vs. Enterprise Gateway\n-------------------------------------\nJupyter Enterprise Gateway was formerly built directly on Jupyter Kernel\nGateway.  At that time, it had complete feature parity with Kernel Gateway.\nHowever, in order to address various roadmap items, Enterprise Gateway removed\nits dependency on Kernel Gateway, so now the question arises, when does one\nchoose Enterprise Gateway over Kernel Gateway?\n\nUse Enterprise Gateway if...\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n1. You have a large compute cluster consisting of limited resources (GPUs, large memory, etc) and users require those resources from notebooks\n2. You have large numbers of users requiring access to a shared compute cluster\n3. You require some amount of High Availability/Disaster Recovery such that another Gateway server can be spun up to service existing (and remote) kernels\n\nUse Kernel Gateway if...\n~~~~~~~~~~~~~~~~~~~~~~~~\n1. You have a small user pool where the resources of the Gateway server can be shared amongst those users (no remote kernel support)\n2. You wish to configured the `notebook-http mode <https://jupyter-kernel-gateway.readthedocs.io/en/latest/http-mode.html>`_ functionality where a specific Notebook provides HTTP endpoints\n\nWho's this for?\n---------------\nJupyter Enterprise Gateway is a highly technical piece of the Jupyter Stack, so we've separated documentation to help specific personas:\n\n1. `Users <users/index.html>`_: people using Jupyter web applications that wish to connect to an Enterprise Gateway instance.\n2. `Operators <operators/index.html>`_: people deploying or serving Jupyter Enterprise Gateway to others.\n3. `Developers <developers/index.html>`_: people writing applications or deploying kernels for other resource managers.\n4. `Contributors <contributors/index.html>`_: people contributing directly to the Jupyter Enterprise Gateway project.\n\nIf you find gaps in our documentation, please open an issue (or better yet, a pull request) on the Jupyter Enterprise Gateway `Github repo <https://github.com/jupyter-server/enterprise_gateway>`_.\n\nTable of Contents\n-----------------\n\n.. toctree::\n   :maxdepth: 2\n\n   Users <users/index>\n   Operators <operators/index>\n   Developers <developers/index>\n   Contributors <contributors/index>\n   Other <other/index>\n"
  },
  {
    "path": "docs/source/operators/config-add-env.md",
    "content": "# Additional environment variables\n\nBesides those environment variables associated with configurable options, the following environment variables can also be used to influence functionality:\n\n```text\n  EG_DEFAULT_KERNEL_SERVICE_ACCOUNT_NAME=default\n    Kubernetes only.  This value indicates the default service account name to use for\n    kernel namespaces when the Enterprise Gateway needs to create the kernel's namespace\n    and KERNEL_SERVICE_ACCOUNT_NAME has not been provided.\n\n  EG_DOCKER_NETWORK=enterprise-gateway or bridge\n    Docker only. Used by the docker deployment and launch scripts, this indicates the\n    name of the docker network docker network to use.  The start scripts default this\n    value to 'enterprise-gateway' because they create the network.  The docker kernel\n    launcher (launch_docker.py) defaults this value to 'bridge' only in cases where it\n    wasn't previously set by the deployment script.\n\n  EG_ENABLE_TUNNELING=False\n    Indicates whether tunneling (via ssh) of the kernel and communication ports\n    is enabled (True) or not (False).\n\n  EG_KERNEL_CLUSTER_ROLE=kernel-controller or cluster-admin\n    Kubernetes only.  The role to use when binding with the kernel service account.\n    The eg-clusterrole.yaml file creates the cluster role 'kernel-controller'\n    and conveys that name via EG_KERNEL_CLUSTER_ROLE.  Should the deployment script\n    not set this valuem, Enterprise Gateway will then use 'cluster-admin'.  It is\n    recommended this value be set to something other than 'cluster-admin'.\n\n  EG_KERNEL_LAUNCH_TIMEOUT=30\n    The time (in seconds) Enterprise Gateway will wait for a kernel's startup\n    completion status before deeming the startup a failure, at which time a second\n    startup attempt will take place.  If a second timeout occurs, Enterprise\n    Gateway will report a failure to the client.\n\n  EG_KERNEL_INFO_TIMEOUT=60\n    The time (in seconds) Enterprise Gateway will wait for kernel info response\n    before deeming the request a failure.\n\n  EG_SENSITIVE_ENV_KEYS=\"\"\n    A comma separated list (e.g. \"secret,pwd,auth\") of sensitive environment\n    variables. Any environment variables that contain any of the words from this\n    list will have their values as EG_REDACTION_MASK whenever logged.\n\n  EG_REDACTION_MASK=********\n    The redaction mask used if EG_SENSITIVE_ENV_KEYS is set. Sensitive environment\n    variables will be logged as this redaction mask instead.\n\n  EG_KERNEL_LOG_DIR=/tmp\n    The directory used during remote kernel launches of DistributedProcessProxy\n    kernels.  Files in this directory will be of the form kernel-<kernel_id>.log.\n\n  EG_KERNEL_SESSION_PERSISTENCE=False\n    **Experimental** Enables kernel session persistence.  Currently, this is purely\n    experiemental and writes kernel session information to a local file.  Should\n    Enterprise Gateway terminate with running kernels, a subsequent restart of\n    Enterprise Gateway will attempt to reconnect to the persisted kernels.  See\n    also EG_KERNEL_SESSION_LOCATION and --KernelSessionManager.enable_persistence.\n\n  EG_KERNEL_SESSION_LOCATION=<JupyterDataDir>\n    **Experimental** The location in which the kernel session information is persisted.\n    By default, this is located in the configured JupyterDataDir.  See also\n    EG_KERNEL_SESSION_PERSISTENCE.\n\n  EG_MAX_PORT_RANGE_RETRIES=5\n    The number of attempts made to locate an available port within the specified\n    port range.  Only applies when --EnterpriseGatewayApp.port_range\n    (or EG_PORT_RANGE) has been specified or is in use for the given kernel.\n\n  EG_MIN_PORT_RANGE_SIZE=1000\n    The minimum port range size permitted when --EnterpriseGatewayApp.port_range\n    (or EG_PORT_RANGE) is specified or is in use for the given kernel.  Port ranges\n    reflecting smaller sizes will result in a failure to launch the corresponding\n    kernel (since port-range can be specified within individual kernel specifications).\n\n  EG_MIRROR_WORKING_DIRS=False\n    Containers only.  If True, kernel creation requests that specify KERNEL_WORKING_DIR\n    will set the kernel container's working directory to that value.  See also\n    KERNEL_WORKING_DIR.\n\n  EG_NAMESPACE=enterprise-gateway or default\n    Kubernetes only.  Used during Kubernetes deployment, this indicates the name of\n    the namespace in which the Enterprise Gateway service is deployed.  The\n    namespace is created prior to deployment, and is set into the EG_NAMESPACE env via\n    deployment.yaml script. This value is then used within Enterprise Gateway to coordinate\n    kernel configurations. Should this value not be set during deployment, Enterprise\n    Gateway will default its value to namespace 'default'.\n\n  EG_PROHIBITED_GIDS=0\n    Containers only.  A comma-separated list of group ids (GID) whose values are not\n    allowed to be referenced by KERNEL_GID.  This defaults to the root group id (0).\n    Attempts to launch a kernel where KERNEL_GID's value is in this list will result\n    in an exception indicating error 403 (Forbidden).  See also EG_PROHIBITED_UIDS.\n\n  EG_PROHIBITED_LOCAL_IPS=''\n    A comma-separated list of local IPv4 addresses (or regular expressions) that\n    should not be used when determining the response address used to convey connection\n    information back to Enterprise Gateway from a remote kernel.  In some cases, other\n    network interfaces (e.g., docker with 172.17.0.*) can interfere - leading to\n    connection failures during kernel startup.\n    Example: EG_PROHIBITED_LOCAL_IPS=172.17.0.*,192.168.0.27 will eliminate the use of\n    all addresses in 172.17.0 as well as 192.168.0.27\n\n  EG_PROHIBITED_UIDS=0\n    Containers only.  A comma-separated list of user ids (UID) whose values are not\n    allowed to be referenced by KERNEL_UID.  This defaults to the root user id (0).\n    Attempts to launch a kernel where KERNEL_UID's value is in this list will result\n    in an exception indicating error 403 (Forbidden).  See also EG_PROHIBITED_GIDS.\n\n  EG_RESPONSE_IP=None\n    Experimental.  The IP address to use to formulate the response address (with\n    `EG_RESPONSE_PORT`).  By default, the server's IP is used.  However, we may find\n    it necessary to use a different IP in cases where the target kernels are external\n    to the Enterprise Gateway server (for example).  It's value may also need to be\n    set in cases where the computed (default) is not correct for the current topology.\n\n  EG_RESPONSE_PORT=8877\n    The single response port used to receive connection information\n    from launched kernels.\n\n  EG_RESPONSE_PORT_RETRIES=10\n    The number of retries to attempt when the original response port\n    (EG_RESPONSE_PORT) is found to be in-use.  This value should be\n    set to 0 (zero) if no port retries are desired.\n\n  EG_SHARED_NAMESPACE=False\n    Kubernetes only. This value indicates whether (True) or not (False) all kernel pods\n    should reside in the same namespace as Enterprise Gateway.  This is not a recommended\n    configuration.\n\n  EG_SSH_PORT=22\n    The port number used for ssh operations for installations choosing to\n    configure the ssh server on a port other than the default 22.\n\n  EG_REMOTE_PWD=None\n    The password to use to ssh to remote hosts\n\n  EG_REMOTE_USER=None\n    The username to use when connecting to remote hosts (default to `getpass.getuser()`\n    when not set).\n\n  EG_REMOTE_GSS_SSH=False\n    Use gss instead of EG_REMOTE_USER and EG_REMOTE_PWD to connect to remote host via SSH.\n    Case insensitive. 'True' to enable, 'False', '' or unset to disable.\n    Any other value will error.\n\n  EG_YARN_CERT_BUNDLE=<custom_truststore_path>\n    The path to a .pem or any other custom truststore used as a CA bundle in\n    yarn-api-client.\n\n  EG_ZMQ_IO_THREADS=1\n    The size of the ZMQ thread pool used to handle I/O operations.  Applies only to shared\n    contexts which are enabled by default but can be specified via\n    `RemoteMappingKernelManager.shared_context = True`.\n\n  EG_ZMQ_MAX_SOCKETS=1023\n    Specifies the maximum number of sockets to allow on the ZMQ context.  Applies only to\n    shared contexts which are enabled by default but can be specified via\n    `RemoteMappingKernelManager.shared_context = True`.\n```\n"
  },
  {
    "path": "docs/source/operators/config-availability.md",
    "content": "# Availability modes\n\nEnterprise Gateway can be optionally configured in one of two \"availability modes\": _standalone_ or _replication_. When configured, Enterprise Gateway can recover from failures and reconnect to any active remote kernels that were previously managed by the terminated EG instance. As such, both modes require that kernel session persistence also be enabled via `KernelSessionManager.enable_persistence=True`.\n\n```{note}\nKernel session persistence will be automtically enabled whenever availability mode is configured.\n```\n\n```{caution}\n**Availability modes and kernel session persistence should be considered experimental!**\n\nKnown issues include:\n1. Culling configurations do not account for different nodes and therefore could result in the incorrect culling of kernels.\n2. Each \"node switch\" requires a manual reconnect to the kernel.\n\nWe hope to address these in future releaases (depending on demand).\n```\n\n## Standalone availability\n\n_Standalone availability_ assumes that, upon failure of the original EG instance, another EG instance will be started. Upon startup of the second instance (following the termination of the first), EG will attempt to load and reconnect to all kernels that were deemed active when the previous instance terminated. This mode is somewhat analogous to the classic HA/DR mode of _active-passive_ and is typically used when node resources are at a premium or the number of replicas (in the Kubernetes sense) must remain at 1.\n\nTo enable Enterprise Gateway for 'standalone' availability, configure `EnterpiseGatewayApp.availability_mode=standalone` or set env `EG_AVAILABILITY_MODE=standalone`.\n\nHere's an example for starting Enterprise Gateway with standalone availability:\n\n```bash\n#!/bin/bash\n\nLOG=/var/log/enterprise_gateway.log\nPIDFILE=/var/run/enterprise_gateway.pid\n\njupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --log-level=DEBUG \\\n   --EnterpriseGatewayApp.availability_mode=standalone > $LOG 2>&1 &\n\nif [ \"$?\" -eq 0 ]; then\n  echo $! > $PIDFILE\nelse\n  exit 1\nfi\n```\n\n## Replication availability\n\nWith _replication availability_, multiple EG instances (or replicas) are operating at the same time, and fronted with some kind of reverse proxy or load balancer. Because state still resides within each `KernelManager` instance executing within a given EG instance, we strongly suggest configuring some form of _client affinity_ (a.k.a, \"sticky session\") to avoid node switches wherever possible since each node switch requires manual reconnection of the front-end (today).\n\n```{tip}\nConfiguring client affinity is **strongly recommended**, otherwise functionality that relies on state within the servicing node (e.g., culling) can be affected upon node switches, resulting in incorrect behavior.\n```\n\nIn this mode, when one node goes down, the subsequent request will be routed to a different node that doesn't know about the kernel. Prior to returning a `404` (not found) status code, EG will check its persisted store to determine if the kernel was managed and, if so, attempt to \"hydrate\" a `KernelManager` instance associated with the remote kernel. (Of course, if the kernel was running local to the downed server, chances are it cannot be _revived_.) Upon successful \"hydration\" the request continues as if on the originating node. Because _client affinity_ is in place, subsequent requests should continue to be routed to the \"servicing node\".\n\nTo enable Enterprise Gateway for 'replication' availability, configure `EnterpiseGatewayApp.availability_mode=replication` or set env `EG_AVAILABILITY_MODE=replication`.\n\n```{attention}\nTo preserve backwards compatibility, if only kernel session persistence is enabled via `KernelSessionManager.enable_persistence=True`, the availability mode will be automatically configured to 'replication' if `EnterpiseGatewayApp.availability_mode` is not configured.\n```\n\nHere's an example for starting Enterprise Gateway with replication availability:\n\n```bash\n#!/bin/bash\n\nLOG=/var/log/enterprise_gateway.log\nPIDFILE=/var/run/enterprise_gateway.pid\n\njupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --log-level=DEBUG \\\n   --EnterpriseGatewayApp.availability_mode=replication > $LOG 2>&1 &\n\nif [ \"$?\" -eq 0 ]; then\n  echo $! > $PIDFILE\nelse\n  exit 1\nfi\n```\n\n# Kernel Session Persistence\n\nEnabling kernel session persistence allows Jupyter Notebooks to reconnect to kernels when Enterprise Gateway is restarted and forms the basis for the _availability modes_ described above. Enterprise Gateway provides two ways of persisting kernel sessions: _File Kernel Session Persistence_ and _Webhook Kernel Session Persistence_, although others can be provided by subclassing `KernelSessionManager` (see below).\n\n```{attention}\nDue to its experimental nature, kernel session persistence is disabled by default. To enable this functionality, you must configure `KernelSessionManger.enable_persistence=True` or configure `EnterpriseGatewayApp.availability_mode` to either `standalone` or `replication`.\n```\n\nAs noted above, the availability modes rely on the persisted information relative to the kernel. This information consists of the arguments and options used to launch the kernel, along with its connection information. In essence, it consists of any information necessary to re-establish communication with the kernel.\n\n## File Kernel Session Persistence\n\nFile Kernel Session Persistence stores kernel sessions as files in a specified directory. To enable this form of persistence, set the environment variable `EG_KERNEL_SESSION_PERSISTENCE=True` or configure `FileKernelSessionManager.enable_persistence=True`. To change the directory in which the kernel session file is being saved, either set the environment variable `EG_PERSISTENCE_ROOT` or configure `FileKernelSessionManager.persistence_root` to the directory. By default, the directory used to store a given kernel's session information is the `JUPYTER_DATA_DIR`.\n\n```{note}\nEnterprise Gateway handles corrupted or invalid session files gracefully. If a persisted session file contains invalid JSON or cannot be read, the error is logged and that session is skipped rather than preventing Enterprise Gateway from starting.\n```\n\n```{note}\nBecause `FileKernelSessionManager` is the default class for kernel session persistence, configuring `EnterpriseGatewayApp.kernel_session_manager_class` to `enterprise_gateway.services.sessions.kernelsessionmanager.FileKernelSessionManager` is not necessary.\n```\n\n## Webhook Kernel Session Persistence\n\nWebhook Kernel Session Persistence stores all kernel sessions to any database. In order for this to work, an API must be created. The API must include four endpoints:\n\n- A `GET` that will retrieve a list of all kernel sessions from a database\n- A `GET` that will take the kernel id as a path variable and retrieve that information from a database\n- A `DELETE` that will delete all kernel sessions, where the body of the request is a list of kernel ids\n- A `POST` that will take kernel id as a path variable and kernel session in the body of the request and save it to a database where the object being saved is:\n\n```\n    {\n      kernel_id: UUID string,\n      kernel_session: JSON\n    }\n```\n\nTo enable the webhook kernel session persistence, set the environment variable `EG_KERNEL_SESSION_PERSISTENCE=True` or configure `WebhookKernelSessionManager.enable_persistence=True`. To connect the API, set the environment variable `EG_WEBHOOK_URL` or configure `WebhookKernelSessionManager.webhook_url` to the API endpoint.\n\nBecause `WebhookKernelSessionManager` is not the default kernel session persistence class, an additional configuration step must be taken to instruct EG to use this class: `EnterpriseGatewayApp.kernel_session_manager_class = enterprise_gateway.services.sessions.kernelsessionmanager.WebhookKernelSessionManager`.\n\n### Enabling Authentication\n\nEnabling authentication is an option if the API requires it for requests. Set the environment variable `EG_AUTH_TYPE` or configure `WebhookKernelSessionManager.auth_type` to be either `Basic` or `Digest`. If it is set to an empty string authentication won't be enabled.\n\nThen set the environment variables `EG_WEBHOOK_USERNAME` and `EG_WEBHOOK_PASSWORD` or configure `WebhookKernelSessionManager.webhook_username` and `WebhookKernelSessionManager.webhook_password` to provide the username and password for authentication.\n\n## Bring Your Own Kernel Session Persistence\n\nTo introduce a different implementation, you must configure the kernel session manager class. Here's an example for starting Enterprise Gateway using a custom `KernelSessionManager` and 'standalone' availability. Note that setting `--MyCustomKernelSessionManager.enable_persistence=True` is not necessary because an availability mode is specified, but displayed here for completeness:\n\n```bash\n#!/bin/bash\n\nLOG=/var/log/enterprise_gateway.log\nPIDFILE=/var/run/enterprise_gateway.pid\n\njupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --log-level=DEBUG \\\n   --EnterpriseGatewayApp.kernel_session_manager_class=custom.package.MyCustomKernelSessionManager \\\n   --MyCustomKernelSessionManager.enable_persistence=True \\\n   --EnterpriseGatewayApp.availability_mode=standalone > $LOG 2>&1 &\n\nif [ \"$?\" -eq 0 ]; then\n  echo $! > $PIDFILE\nelse\n  exit 1\nfi\n```\n\nAlternative persistence implementations using SQL and NoSQL databases would be ideal and, as always, contributions are welcome!\n\n## Testing Kernel Session Persistence\n\nOnce kernel session persistence has been enabled and configured, create a kernel by opening up a Jupyter Notebook. Save some variable in that notebook and shutdown Enterprise Gateway using `kill -9 PID`, where `PID` is the PID of gateway. Restart Enterprise Gateway and refresh you notebook tab. If all worked correctly, the variable should be loaded without the need to rerun the cell.\n\nIf you are using docker, ensure the container isn't tied to the PID of Enterprise Gateway. The container should still run after killing that PID.\n"
  },
  {
    "path": "docs/source/operators/config-cli.md",
    "content": "# Command-line options\n\nIn some cases, it may be easier to use command line options. These can also be used for _static_ values that should not be the targeted for [_dynamic configurables_](config-dynamic.md/#dynamic-configurables).\n\nTo see the same configuration options at the command line, run the following:\n\n```bash\njupyter enterprisegateway --help-all\n```\n\nA snapshot of this help appears below for ease of reference. The options for the superclass `EnterpriseGatewayConfigMixin` have been omitted. As with the `--generate-config` option, each option includes its corresponding environment variable, if applicable.\n\n```text\nJupyter Enterprise Gateway\n\nProvisions remote Jupyter kernels and proxies HTTP/Websocket traffic to them.\n\nOptions\n-------\n\nArguments that take values are actually convenience aliases to full\nConfigurables, whose aliases are listed on the help line. For more information\non full configurables, see '--help-all'.\n\n--debug\n    set log level to logging.DEBUG (maximize logging output)\n--generate-config\n    generate default config file\n-y\n    Answer yes to any questions instead of prompting.\n--log-level=<Enum> (Application.log_level)\n    Default: 30\n    Choices: (0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL')\n    Set the log level by value or name.\n--config=<Unicode> (JupyterApp.config_file)\n    Default: ''\n    Full path of a config file.\n--ip=<Unicode> (EnterpriseGatewayApp.ip)\n    Default: '127.0.0.1'\n    IP address on which to listen (EG_IP env var)\n--port=<Int> (EnterpriseGatewayApp.port)\n    Default: 8888\n    Port on which to listen (EG_PORT env var)\n--port_retries=<Int> (EnterpriseGatewayApp.port_retries)\n    Default: 50\n    Number of ports to try if the specified port is not available\n    (EG_PORT_RETRIES env var)\n--keyfile=<Unicode> (EnterpriseGatewayApp.keyfile)\n    Default: None\n    The full path to a private key file for usage with SSL/TLS. (EG_KEYFILE env\n    var)\n--certfile=<Unicode> (EnterpriseGatewayApp.certfile)\n    Default: None\n    The full path to an SSL/TLS certificate file. (EG_CERTFILE env var)\n--client-ca=<Unicode> (EnterpriseGatewayApp.client_ca)\n    Default: None\n    The full path to a certificate authority certificate for SSL/TLS client\n    authentication. (EG_CLIENT_CA env var)\n\nClass parameters\n----------------\n\nParameters are set from command-line arguments of the form:\n`--Class.trait=value`. This line is evaluated in Python, so simple expressions\nare allowed, e.g.:: `--C.a='range(3)'` For setting C.a=[0,1,2].\n\nEnterpriseGatewayApp(EnterpriseGatewayConfigMixin, JupyterApp) options\n----------------------------------------------------------------------\n--EnterpriseGatewayApp.allow_credentials=<Unicode>\n    Sets the Access-Control-Allow-Credentials header. (EG_ALLOW_CREDENTIALS env\n    var)\n    Default: ''\n--EnterpriseGatewayApp.allow_headers=<Unicode>\n    Sets the Access-Control-Allow-Headers header. (EG_ALLOW_HEADERS env var)\n    Default: ''\n--EnterpriseGatewayApp.allow_methods=<Unicode>\n    Sets the Access-Control-Allow-Methods header. (EG_ALLOW_METHODS env var)\n    Default: ''\n--EnterpriseGatewayApp.allow_origin=<Unicode>\n    Sets the Access-Control-Allow-Origin header. (EG_ALLOW_ORIGIN env var)\n    Default: ''\n--EnterpriseGatewayApp.alt_yarn_endpoint=<Unicode>\n    The http url specifying the alternate YARN Resource Manager.  This value\n    should be set when YARN Resource Managers are configured for high\n    availability.  Note: If both YARN endpoints are NOT set, the YARN library\n    will use the files within the local HADOOP_CONFIG_DIR to determine the\n    active resource manager. (EG_ALT_YARN_ENDPOINT env var)\n    Default: None\n--EnterpriseGatewayApp.answer_yes=<Bool>\n    Answer yes to any prompts.\n    Default: False\n--EnterpriseGatewayApp.auth_token=<Unicode>\n    Authorization token required for all requests (EG_AUTH_TOKEN env var)\n    Default: ''\n--EnterpriseGatewayApp.authorized_users=<set-item-1>...\n    Comma-separated list of user names (e.g., ['bob','alice']) against which\n    KERNEL_USERNAME will be compared.  Any match (case-sensitive) will allow the\n    kernel's launch, otherwise an HTTP 403 (Forbidden) error will be raised.\n    The set of unauthorized users takes precedence. This option should be used\n    carefully as it can dramatically limit who can launch kernels.\n    (EG_AUTHORIZED_USERS env var - non-bracketed, just comma-separated)\n    Default: set()\n--EnterpriseGatewayApp.authorized_origin=<Unicode>\n    Hostname (e.g. 'localhost', 'reverse.proxy.net') which the handler will\n    match against the request's SSL certificate.  An HTTP 403 (Forbidden) error\n    will be raised on a failed match.  This option requires TLS to be enabled.\n    It does not support IP addresses. (EG_AUTHORIZED_ORIGIN env var)\n    Default: ''\n--EnterpriseGatewayApp.availability_mode=<CaselessStrEnum>\n    Specifies the type of availability.  Values must be one of \"standalone\"\n    or \"replication\".  (EG_AVAILABILITY_MODE env var)\n    Choices: any of ['standalone', 'replication'] (case-insensitive) or None\n    Default: None\n--EnterpriseGatewayApp.base_url=<Unicode>\n    The base path for mounting all API resources (EG_BASE_URL env var)\n    Default: '/'\n--EnterpriseGatewayApp.certfile=<Unicode>\n    The full path to an SSL/TLS certificate file. (EG_CERTFILE env var)\n    Default: None\n--EnterpriseGatewayApp.client_ca=<Unicode>\n    The full path to a certificate authority certificate for SSL/TLS client\n    authentication. (EG_CLIENT_CA env var)\n    Default: None\n--EnterpriseGatewayApp.client_envs=<list-item-1>...\n    Environment variables allowed to be set when a client requests a\n    new kernel. (EG_CLIENT_ENVS env var)\n    Default: []\n--EnterpriseGatewayApp.conductor_endpoint=<Unicode>\n    The http url for accessing the Conductor REST API. (EG_CONDUCTOR_ENDPOINT\n    env var)\n    Default: None\n--EnterpriseGatewayApp.config_file=<Unicode>\n    Full path of a config file.\n    Default: ''\n--EnterpriseGatewayApp.config_file_name=<Unicode>\n    Specify a config file to load.\n    Default: ''\n--EnterpriseGatewayApp.default_kernel_name=<Unicode>\n    Default kernel name when spawning a kernel (EG_DEFAULT_KERNEL_NAME env var)\n    Default: ''\n--EnterpriseGatewayApp.dynamic_config_interval=<Int>\n    Specifies the number of seconds configuration files are polled for changes.\n    A value of 0 or less disables dynamic config updates.\n    (EG_DYNAMIC_CONFIG_INTERVAL env var)\n    Default: 0\n--EnterpriseGatewayApp.env_process_whitelist=<list-item-1>...\n    DEPRECATED, use inherited_envs\n    Default: []\n--EnterpriseGatewayApp.env_whitelist=<list-item-1>...\n    DEPRECATED, use client_envs.\n    Default: []\n--EnterpriseGatewayApp.expose_headers=<Unicode>\n    Sets the Access-Control-Expose-Headers header. (EG_EXPOSE_HEADERS env var)\n    Default: ''\n--EnterpriseGatewayApp.generate_config=<Bool>\n    Generate default config file.\n    Default: False\n--EnterpriseGatewayApp.impersonation_enabled=<Bool>\n    Indicates whether impersonation will be performed during kernel launch.\n    (EG_IMPERSONATION_ENABLED env var)\n    Default: False\n--EnterpriseGatewayApp.inherited_envs=<list-item-1>...\n    Environment variables allowed to be inherited\n    from the spawning process by the kernel. (EG_INHERITED_ENVS env var)\n    Default: []\n--EnterpriseGatewayApp.ip=<Unicode>\n    IP address on which to listen (EG_IP env var)\n    Default: '127.0.0.1'\n--EnterpriseGatewayApp.kernel_headers=<list-item-1>...\n    Request headers to make available to kernel launch framework.\n    (EG_KERNEL_HEADERS env var)\n    Default: []\n--EnterpriseGatewayApp.kernel_manager_class=<Type>\n    The kernel manager class to use. Must be a subclass of\n    `enterprise_gateway.services.kernels.RemoteMappingKernelManager`.\n    Default: 'enterprise_gateway.services.kernels.remotemanager.RemoteMapp...\n--EnterpriseGatewayApp.kernel_session_manager_class=<Type>\n    The kernel session manager class to use. Must be a subclass of\n    `enterprise_gateway.services.sessions.KernelSessionManager`.\n    Default: 'enterprise_gateway.services.sessions.kernelsessionmanager.Fi...\n--EnterpriseGatewayApp.kernel_spec_cache_class=<Type>\n    The kernel spec cache class to use. Must be a subclass of\n    `enterprise_gateway.services.kernelspecs.KernelSpecCache`.\n    Default: 'enterprise_gateway.services.kernelspecs.kernelspec_cache.Ker...\n--EnterpriseGatewayApp.kernel_spec_manager_class=<Type>\n    The kernel spec manager class to use. Must be a subclass of\n    `jupyter_client.kernelspec.KernelSpecManager`.\n    Default: 'jupyter_client.kernelspec.KernelSpecManager'\n--EnterpriseGatewayApp.keyfile=<Unicode>\n    The full path to a private key file for usage with SSL/TLS. (EG_KEYFILE env\n    var)\n    Default: None\n--EnterpriseGatewayApp.list_kernels=<Bool>\n    Permits listing of the running kernels using API endpoints /api/kernels and\n    /api/sessions. (EG_LIST_KERNELS env var) Note: Jupyter Notebook allows this\n    by default but Jupyter Enterprise Gateway does not.\n    Default: False\n--EnterpriseGatewayApp.load_balancing_algorithm=<Unicode>\n    Specifies which load balancing algorithm DistributedProcessProxy should use.\n    Must be one of \"round-robin\" or \"least-connection\".\n    (EG_LOAD_BALANCING_ALGORITHM env var)\n    Default: 'round-robin'\n--EnterpriseGatewayApp.log_datefmt=<Unicode>\n    The date format used by logging formatters for %(asctime)s\n    Default: '%Y-%m-%d %H:%M:%S'\n--EnterpriseGatewayApp.log_format=<Unicode>\n    The Logging format template\n    Default: '[%(name)s]%(highlevel)s %(message)s'\n--EnterpriseGatewayApp.log_level=<Enum>\n    Set the log level by value or name.\n    Choices: any of [0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL']\n    Default: 30\n--EnterpriseGatewayApp.max_age=<Unicode>\n    Sets the Access-Control-Max-Age header. (EG_MAX_AGE env var)\n    Default: ''\n--EnterpriseGatewayApp.max_kernels=<Int>\n    Limits the number of kernel instances allowed to run by this gateway.\n    Unbounded by default. (EG_MAX_KERNELS env var)\n    Default: None\n--EnterpriseGatewayApp.max_kernels_per_user=<Int>\n    Specifies the maximum number of kernels a user can have active\n    simultaneously.  A value of -1 disables enforcement.\n    (EG_MAX_KERNELS_PER_USER env var)\n    Default: -1\n--EnterpriseGatewayApp.port=<Int>\n    Port on which to listen (EG_PORT env var)\n    Default: 8888\n--EnterpriseGatewayApp.port_range=<Unicode>\n    Specifies the lower and upper port numbers from which ports are created. The\n    bounded values are separated by '..' (e.g., 33245..34245 specifies a range\n    of 1000 ports to be randomly selected). A range of zero (e.g., 33245..33245\n    or 0..0) disables port-range enforcement.  (EG_PORT_RANGE env var)\n    Default: '0..0'\n--EnterpriseGatewayApp.port_retries=<Int>\n    Number of ports to try if the specified port is not available\n    (EG_PORT_RETRIES env var)\n    Default: 50\n--EnterpriseGatewayApp.remote_hosts=<list-item-1>...\n    Bracketed comma-separated list of hosts on which DistributedProcessProxy\n    kernels will be launched e.g., ['host1','host2']. (EG_REMOTE_HOSTS env var -\n    non-bracketed, just comma-separated)\n    Default: ['localhost']\n--EnterpriseGatewayApp.show_config=<Bool>\n    Instead of starting the Application, dump configuration to stdout\n    Default: False\n--EnterpriseGatewayApp.show_config_json=<Bool>\n    Instead of starting the Application, dump configuration to stdout (as JSON)\n    Default: False\n--EnterpriseGatewayApp.ssl_version=<Int>\n    Sets the SSL version to use for the web socket connection. (EG_SSL_VERSION\n    env var)\n    Default: None\n--EnterpriseGatewayApp.trust_xheaders=<CBool>\n    Use x-* header values for overriding the remote-ip, useful when application\n    is behind a proxy. (EG_TRUST_XHEADERS env var)\n    Default: False\n--EnterpriseGatewayApp.unauthorized_users=<set-item-1>...\n    Comma-separated list of user names (e.g., ['root','admin']) against which\n    KERNEL_USERNAME will be compared.  Any match (case-sensitive) will prevent\n    the kernel's launch and result in an HTTP 403 (Forbidden) error.\n    (EG_UNAUTHORIZED_USERS env var - non-bracketed, just comma-separated)\n    Default: {'root'}\n--EnterpriseGatewayApp.ws_ping_interval=<Int>\n    Specifies the ping interval(in seconds) that should be used by zmq port\n     associated with spawned kernels.Set this variable to 0 to disable ping mechanism.\n    (EG_WS_PING_INTERVAL_SECS env var)\n    Default: 30\n--EnterpriseGatewayApp.yarn_endpoint=<Unicode>\n    The http url specifying the YARN Resource Manager. Note: If this value is\n    NOT set, the YARN library will use the files within the local\n    HADOOP_CONFIG_DIR to determine the active resource manager.\n    (EG_YARN_ENDPOINT env var)\n    Default: None\n--EnterpriseGatewayApp.yarn_endpoint_security_enabled=<Bool>\n    Is YARN Kerberos/SPNEGO Security enabled (True/False).\n    (EG_YARN_ENDPOINT_SECURITY_ENABLED env var)\n    Default: False\n\nKernelSpecCache(SingletonConfigurable) options\n----------------------------------------------\n--KernelSpecCache.cache_enabled=<CBool>\n    Enable Kernel Specification caching. (EG_KERNELSPEC_CACHE_ENABLED env var)\n    Default: False\n\nFileKernelSessionManager(KernelSessionManager) options\n------------------------------------------------------\n--FileKernelSessionManager.enable_persistence=<Bool>\n    Enable kernel session persistence (True or False). Default = False\n    (EG_KERNEL_SESSION_PERSISTENCE env var)\n    Default: False\n--FileKernelSessionManager.persistence_root=<Unicode>\n    Identifies the root 'directory' under which the 'kernel_sessions' node will\n    reside.  This directory should exist.  (EG_PERSISTENCE_ROOT env var)\n    Default: ''\n\nWebhookKernelSessionManager(KernelSessionManager) options\n---------------------------------------------------------\n--WebhookKernelSessionManager.enable_persistence=<Bool>\n    Enable kernel session persistence (True or False). Default = False\n    (EG_KERNEL_SESSION_PERSISTENCE env var)\n    Default: False\n--WebhookKernelSessionManager.persistence_root=<Unicode>\n    Identifies the root 'directory' under which the 'kernel_sessions' node will\n    reside.  This directory should exist.  (EG_PERSISTENCE_ROOT env var)\n    Default: None\n--WebhookKernelSessionManager.webhook_url=<Unicode>\n    URL endpoint for webhook kernel session manager\n    Default: None\n--WebhookKernelSessionManager.auth_type=<Unicode>\n    Authentication type for webhook kernel session manager API. Either basic, digest or None\n    Default: None\n--WebhookKernelSessionManager.webhook_username=<Unicode>\n    Username for webhook kernel session manager API auth\n    Default: None\n--WebhookKernelSessionManager.webhook_password=<Unicode>\n    Password for webhook kernel session manager API auth\n    Default: None\n\nRemoteMappingKernelManager(AsyncMappingKernelManager) options\n-------------------------------------------------------------\n--RemoteMappingKernelManager.allowed_message_types=<list-item-1>...\n    White list of allowed kernel message types. When the list is empty, all\n    message types are allowed.\n    Default: []\n--RemoteMappingKernelManager.buffer_offline_messages=<Bool>\n    Whether messages from kernels whose frontends have disconnected should be\n    buffered in-memory. When True (default), messages are buffered and replayed\n    on reconnect, avoiding lost messages due to interrupted connectivity.\n    Disable if long-running kernels will produce too much output while no\n    frontends are connected.\n    Default: True\n--RemoteMappingKernelManager.cull_busy=<Bool>\n    Whether to consider culling kernels which are busy. Only effective if\n    cull_idle_timeout > 0.\n    Default: False\n--RemoteMappingKernelManager.cull_connected=<Bool>\n    Whether to consider culling kernels which have one or more connections. Only\n    effective if cull_idle_timeout > 0.\n    Default: False\n--RemoteMappingKernelManager.cull_idle_timeout=<Int>\n    Timeout (in seconds) after which a kernel is considered idle and ready to be\n    culled. Values of 0 or lower disable culling. Very short timeouts may result\n    in kernels being culled for users with poor network connections.\n    Default: 0\n--RemoteMappingKernelManager.cull_interval=<Int>\n    The interval (in seconds) on which to check for idle kernels exceeding the\n    cull timeout value.\n    Default: 300\n--RemoteMappingKernelManager.default_kernel_name=<Unicode>\n    The name of the default kernel to start\n    Default: 'python3'\n--RemoteMappingKernelManager.kernel_info_timeout=<Float>\n    Timeout for giving up on a kernel (in seconds). On starting and restarting\n    kernels, we check whether the kernel is running and responsive by sending\n    kernel_info_requests. This sets the timeout in seconds for how long the\n    kernel can take before being presumed dead. This affects the\n    MappingKernelManager (which handles kernel restarts) and the\n    ZMQChannelsHandler (which handles the startup).\n    Default: 60\n--RemoteMappingKernelManager.kernel_manager_class=<DottedObjectName>\n    The kernel manager class.  This is configurable to allow subclassing of the\n    AsyncKernelManager for customized behavior.\n    Default: 'jupyter_client.ioloop.AsyncIOLoopKernelManager'\n--RemoteMappingKernelManager.root_dir=<Unicode>\n    Default: ''\n--RemoteMappingKernelManager.shared_context=<Bool>\n    Share a single zmq.Context to talk to all my kernels\n    Default: True\n```\n"
  },
  {
    "path": "docs/source/operators/config-culling.md",
    "content": "# Culling idle kernels\n\nWith the adoption of notebooks and interactive development for data science, a new \"resource utilization\" pattern has arisen, where kernel resources are locked for a given notebook, but due to interactive development processes it might be idle for a long period of time causing the cluster resources to starve. One way to workaround this problem is to enable the culling of idle kernels after a specific timeout period.\n\nIdle kernel culling is set to “off” by default. It’s enabled by setting `--RemoteKernelManager.cull_idle_timeout` to a positive value representing the number of seconds a kernel must remain idle to be culled (default: 0, recommended: 43200, 12 hours).\n\n```{tip}\nWhen managing large clusters with limited resources, we recommend enabling the culling of idle kernels.\n```\n\nYou can also configure the interval that the kernels are checked for their idle timeouts by adjusting the setting `--RemoteKernelManager.cull_interval` to a positive value. If the interval is not set or set to a non-positive value, the system uses 300 seconds as the default value: (default: 300 seconds).\n\nThere are use-cases where we would like to enable only culling of idle kernels that have no connections (e.g. the notebook browser was closed without stopping the kernel first), this can be configured by adjusting the setting `--RemoteKernelManager.cull_connected` (default: False).\n\nHere's an updated start script that provides some default configuration to enable the culling of idle kernels:\n\n```bash\n#!/bin/bash\n\nLOG=/var/log/enterprise_gateway.log\nPIDFILE=/var/run/enterprise_gateway.pid\n\njupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --log-level=DEBUG \\\n   --RemoteKernelManager.cull_idle_timeout=43200 --MappingKernelManager.cull_interval=60 > $LOG 2>&1 &\n\nif [ \"$?\" -eq 0 ]; then\n  echo $! > $PIDFILE\nelse\n  exit 1\nfi\n```\n"
  },
  {
    "path": "docs/source/operators/config-dynamic.md",
    "content": "# Dynamic configurables\n\nEnterprise Gateway also supports the ability to update configuration variables without having to\nrestart Enterprise Gateway. This enables the ability to do things like enable debug logging or\nadjust the maximum number of kernels per user, all without having to restart Enterprise Gateway.\n\nTo enable dynamic configurables configure `EnterpriseGatewayApp.dynamic_config_interval` to a\npositive value (default is 0 or disabled). Since this is the number of seconds to poll Enterprise Gateway's configuration files,\na value greater than 60 (1 minute) is recommended. This functionality works for most configuration\nvalues, but does have the following caveats:\n\n1. Any configuration variables set on the command line (CLI) or via environment variables are\n   NOT eligible for dynamic updates. This is because Jupyter gives those values priority over\n   file-based configuration variables.\n1. Any configuration variables tied to background processing may not reflect their update if\n   the variable is not _observed_ for changes. For example, the code behind\n   `RemoteKernelManager.cull_idle_timeout` may not reflect changes to the timeout period if\n   that variable is not monitored (i.e., observed) for changes.\n1. Only `Configurables` registered by Enterprise Gateway are eligible for dynamic updates.\n   Currently, that list consists of the following (and their subclasses): EnterpriseGatewayApp,\n   RemoteKernelManager, KernelSpecManager, and KernelSessionManager.\n\nAs a result, operators and adminstrators are encouraged to configure Enterprise Gateway via configuration files with only static values configured via the command line or environment.\n\nNote that if `EnterpriseGatewayApp.dynamic_config_interval` is configured with a positive value\nvia the configuration file (i.e., is eligible for updates) and is subsequently set to 0, then\ndynamic configuration updates will be disabled until Enterprise Gateway is restarted with a\npositive value. Therefore, we recommend `EnterpriseGatewayApp.dynamic_config_interval` be\nconfigured via the command line or environment.\n"
  },
  {
    "path": "docs/source/operators/config-env-debug.md",
    "content": "# Environment variables that assist in troubleshooting\n\nThe following environment variables may be useful for troubleshooting:\n\n```text\n  EG_DOCKER_LOG_LEVEL=WARNING\n    By default, the docker client library is too verbose for its logging.  This\n    value can be adjusted in situations where docker troubleshooting may be warranted.\n\n  EG_KUBERNETES_LOG_LEVEL=WARNING\n    By default, the kubernetes client library is too verbose for its logging.  This\n    value can be adjusted in situations where kubernetes troubleshooting may be\n    warranted.\n\n  EG_LOG_LEVEL=10\n    Used by remote launchers and gateway listeners (where the kernel runs), this\n    indicates the level of logging used by those entities.  Level 10 (DEBUG) is\n    recommended since they don't do verbose logging.\n\n  EG_MAX_POLL_ATTEMPTS=10\n    Polling is used in various places during life-cycle management operations - like\n    determining if a kernel process is still alive, stopping the process, waiting\n    for the process to terminate, etc.  As a result, it may be useful to adjust\n    this value during those kinds of troubleshooting scenarios, although that\n    should rarely be necessary.\n\n  EG_POLL_INTERVAL=0.5\n    The interval (in seconds) to wait before checking poll results again.\n\n  EG_RESTART_STATUS_POLL_INTERVAL=1.0\n    The interval (in seconds) to wait before polling for the restart status again when\n    duplicate restart request for the same kernel is received or when a shutdown request\n    is received while kernel is still restarting.\n\n  EG_REMOVE_CONTAINER=True\n    Used by launch_docker.py, indicates whether the kernel's docker container should be\n    removed following its shutdown.  Set this value to 'False' if you want the container\n    to be left around in order to troubleshoot issues.  Remember to set back to 'True'\n    to restore normal operation.\n\n  EG_SOCKET_TIMEOUT=5.0\n    The time (in seconds) the enterprise gateway will wait on its connection\n    file socket waiting on return from a remote kernel launcher.  Upon timeout, the\n    operation will be retried immediately, until the overall time limit has been\n    exceeded.\n\n  EG_SSH_LOG_LEVEL=WARNING\n    By default, the paramiko ssh library is too verbose for its logging.  This\n    value can be adjusted in situations where ssh troubleshooting may be warranted.\n\n  EG_YARN_LOG_LEVEL=WARNING\n    By default, the yarn-api-client library is too verbose for its logging.  This\n    value can be adjusted in situations where YARN troubleshooting may be warranted.\n```\n"
  },
  {
    "path": "docs/source/operators/config-file.md",
    "content": "# Configuration file options\n\nPlacing configuration options into the configuration file `jupyter_enterprise_gateway_config.py` is recommended because this will enabled the use of the [_dynamic configurables_](config-dynamic.md/#dynamic-configurables) functionality. To generate a template configuration file, run the following:\n\n```bash\njupyter enterprisegateway --generate-config\n```\n\nThis command will produce a `jupyter_enterprise_gateway_config.py` file, typically located in the invoking user's `$HOME/.jupyter` directory. The file contains python code, including comments, relative to each available configuration option. The actual option itself will also be commented out. To enable that option, set its value and uncomment the code.\n\n```{Note}\nSome options may appear duplicated.  For example, the `remote_hosts` trait appears on both `c.EnterpriseGatewayConfigMixin` and `c.EnterpriseGatewayApp`.  This is due to how configurable traits appear in the class hierarchy. Since `EnterpriseGatewayApp` derives from `EnterpriseGatewayConfigMixin` and both are configurable classes, the output contains duplicated values.  If both values are set, the value _closest_ to the derived class will be used (in this case, `EnterpriseGatewayApp`).\n```\n\nHere's an example entry. Note that its default value, when defined, is also displayed, along with the corresponding environment variable name:\n\n```python\n## Bracketed comma-separated list of hosts on which DistributedProcessProxy\n#  kernels will be launched e.g., ['host1','host2'].\n#  (EG_REMOTE_HOSTS env var - non-bracketed, just comma-separated)\n#  Default: ['localhost']\n# c.EnterpriseGatewayConfigMixin.remote_hosts = ['localhost']\n```\n"
  },
  {
    "path": "docs/source/operators/config-kernel-override.md",
    "content": "# Per-kernel overrides\n\nAs mentioned in the overview of [Process Proxy Configuration](../contributors/system-architecture.md#process-proxy-configuration)\ncapabilities, it's possible to override or amend specific system-level configuration values on a per-kernel basis. These capabilities can be implemented with the kernel specification's process-proxy `config` stanza or via environment variables.\n\n## Per-kernel configuration overrides\n\nThe following enumerates the set of per-kernel configuration overrides:\n\n- `remote_hosts`: This process proxy configuration entry can be used to override `--EnterpriseGatewayApp.remote_hosts`.\n  Any values specified in the config dictionary override the globally defined values. These apply to all\n  `DistributedProcessProxy` kernels.\n- `yarn_endpoint`: This process proxy configuration entry can be used to override `--EnterpriseGatewayApp.yarn_endpoint`.\n  Any values specified in the config dictionary override the globally defined values. These apply to all\n  `YarnClusterProcessProxy` kernels. Note that you'll likely be required to specify a different `HADOOP_CONF_DIR`\n  setting in the kernel.json's `env` stanza in order of the `spark-submit` command to target the appropriate YARN cluster.\n- `authorized_users`: This process proxy configuration entry can be used to override\n  `--EnterpriseGatewayApp.authorized_users`. Any values specified in the config dictionary override the globally\n  defined values. These values apply to **all** process-proxy kernels, including the default `LocalProcessProxy`. Note\n  that the typical use-case for this value is to not set `--EnterpriseGatewayApp.authorized_users` at the global level,\n  but then restrict access at the kernel level.\n- `unauthorized_users`: This process proxy configuration entry can be used to **_amend_**\n  `--EnterpriseGatewayApp.unauthorized_users`. Any values specified in the config dictionary are **added** to the\n  globally defined values. As a result, once a user is denied access at the global level, they will _always be denied\n  access at the kernel level_. These values apply to **all** process-proxy kernels, including the default\n  `LocalProcessProxy`.\n- `port_range`: This process proxy configuration entry can be used to override `--EnterpriseGatewayApp.port_range`.\n  Any values specified in the config dictionary override the globally defined values. These apply to all\n  `RemoteProcessProxy` kernels.\n\n## Per-kernel environment overrides\n\nIn some cases, it is useful to allow specific values that exist in a kernel.json `env` stanza to be\noverridden on a per-kernel basis. For example, if the kernel.json supports resource limitations you\nmay want to allow some requests to have access to more memory or GPUs than another. Enterprise\nGateway enables this capability by honoring environment variables provided in the json request over\nthose same-named variables in the kernel.json `env` stanza.\n\nEnvironment variables for which this can occur are any variables prefixed with `KERNEL_`\nas well as any variables\nlisted in the `EnterpriseGatewayApp.client_envs` configurable trait (or via\nthe `EG_CLIENT_ENVS` variable). Likewise, environment variables of the Enterprise Gateway\nserver process listed in the `EnterpriseGatewayApp.inherited_envs` configurable trait\n(or via the `EG_INHERITED_ENVS` variable)\nare also available for replacement in the kernel process' environment.\n\nSee [Kernel Environment Variables](../users/kernel-envs.md) in the Users documentation section for a complete set of recognized `KERNEL_` variables.\n"
  },
  {
    "path": "docs/source/operators/config-security.md",
    "content": "# Configuring security\n\nJupyter Enterprise Gateway does not currently perform user _authentication_ but, instead, assumes that all users\nissuing requests have been previously authenticated. Recommended applications for this are\n[Apache Knox](https://knox.apache.org/) or [Jupyter Hub](https://jupyterhub.readthedocs.io/en/latest/)\n(e.g., if gateway-enabled notebook servers were spawned targeting an Enterprise Gateway cluster).\n\nThis section introduces some security features inherent in Enterprise Gateway (with more to come).\n\n## KERNEL_USERNAME\n\nIn order to convey the name of the authenticated user, `KERNEL_USERNAME` should be sent in the kernel creation request\nvia the `env:` entry. This will occur automatically within the gateway-enabled Notebook server since it propagates all environment variables\nprefixed with `KERNEL_`. If the request does not include a `KERNEL_USERNAME` entry, one will be added to the kernel's\nlaunch environment with the value of the gateway user.\n\nThis value is then used within the _authorization_ and _impersonation_ functionality.\n\n## Authorization\n\nBy default, all users are authorized to start kernels. This behavior can be adjusted when situations arise where\nmore control is required. Basic authorization can be expressed in two ways.\n\n### Authorized Users\n\nThe command-line or configuration file option: `EnterpriseGatewayApp.authorized_users` can be specified to contain a\nlist of user names indicating which users are permitted to launch kernels within the current gateway server.\n\nOn each kernel launched, the authorized users list is searched for the value of `KERNEL_USERNAME` (case-sensitive). If\nthe user is found in the list the kernel's launch sequence continues, otherwise HTTP Error 403 (Forbidden) is raised\nand the request fails.\n\n```{warning}\nSince the `authorized_users` option must be exhaustive, it should be used only in situations where a small\nand limited set of users are allowed access and empty otherwise.\n```\n\n### Unauthorized Users\n\nThe command-line or configuration file option: `EnterpriseGatewayApp.unauthorized_users` can be specified to contain a\nlist of user names indicating which users are **NOT** permitted to launch kernels within the current gateway server.\nThe `unauthorized_users` list is always checked prior to the `authorized_users` list. If the value of `KERNEL_USERNAME`\nappears in the `unauthorized_users` list, the request is immediately failed with the same 403 (Forbidden) HTTP Error.\n\nFrom a system security standpoint, privileged users (e.g., `root` and any users allowed `sudo` privileges) should be\nadded to this option.\n\n### Authorization Failures\n\nIt should be noted that the corresponding messages logged when each of the above authorization failures occur are\nslightly different. This allows the administrator to discern from which authorization list the failure was generated.\n\nFailures stemming from _inclusion_ in the `unauthorized_users` list will include text similar to the following:\n\n```\nUser 'bob' is not authorized to start kernel 'Spark - Python (YARN Client Mode)'. Ensure\nKERNEL_USERNAME is set to an appropriate value and retry the request.\n```\n\nFailures stemming from _exclusion_ from a non-empty `authorized_users` list will include text similar to the following:\n\n```\nUser 'bob' is not in the set of users authorized to start kernel 'Spark - Python (YARN Client Mode)'. Ensure\nKERNEL_USERNAME is set to an appropriate value and retry the request.\n```\n\n## User Impersonation\n\nThe Enterprise Gateway server leverages other technologies to implement user impersonation when launching kernels. This\noption is configured via two pieces of information: `EG_IMPERSONATION_ENABLED` and\n`KERNEL_USERNAME`.\n\n`EG_IMPERSONATION_ENABLED` indicates the intention that user impersonation should be performed and can also be conveyed\nvia the command-line boolean option `EnterpriseGatewayApp.impersonation_enabled` (default = False).\n\n`KERNEL_USERNAME` is also conveyed within the environment of the kernel launch sequence where\nits value is used to indicate the user that should be impersonated.\n\n### Impersonation in Hadoop YARN clusters\n\nIn a cluster managed by the Hadoop YARN resource manager, impersonation is implemented by leveraging kerberos, and thus require\nthis security option as a pre-requisite for user impersonation. When user impersonation is enabled, kernels are launched\nwith the `--proxy-user ${KERNEL_USERNAME}` which will tell YARN to launch the kernel in a container used by the provided\nuser name.\n\n```{admonition} Important!\n:class: warning\nWhen using kerberos in a YARN managed cluster, the gateway user (`elyra` by default) needs to be set up as a\n`proxyuser` superuser in hadoop configuration. Please refer to the\n[Hadoop documentation](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/Superusers.html)\nregarding the proper configuration steps.\n```\n\n### SPNEGO Authentication to YARN APIs\n\nWhen kerberos is enabled in a YARN managed cluster, the administration uis can be configured to require authentication/authorization\nvia SPENEGO. When running Enterprise Gateway in a environment configured this way, we need to convey an extra configuration\nto enable the proper authorization when communicating with YARN via the YARN APIs.\n\n`YARN_ENDPOINT_SECURITY_ENABLED` indicates the requirement to use SPNEGO authentication/authorization when connecting with the\nYARN APIs and can also be conveyed via the command-line boolean option `EnterpriseGatewayApp.yarn_endpoint_security_enabled`\n(default = False)\n\n### Impersonation in Standalone or YARN Client Mode\n\nImpersonation performed in standalone or YARN cluster modes tends to take the form of using `sudo` to perform the\nkernel launch as the target user. This can also be configured within the\n[run.sh](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_client/bin/run.sh)\nscript and requires the following:\n\n1. The gateway user (i.e., the user in which Enterprise Gateway is running) must be enabled to perform sudo operations\n   on each potential host. This enablement must also be done to prevent password prompts since Enterprise Gateway runs\n   in the background. Refer to your operating system documentation for details.\n1. Each user identified by `KERNEL_USERNAME` must be associated with an actual operating system user on each host.\n1. Once the gateway user is configured for `sudo` privileges it is **strongly recommended** that that user be included\n   in the set of `unauthorized_users`. Otherwise, kernels not configured for impersonation, or those requests that do not\n   include `KERNEL_USERNAME`, will run as the, now, highly privileged gateway user!\n\n```{warning}\nShould impersonation be disabled after granting the gateway user elevated privileges, it is\n**strongly recommended** those privileges be revoked (on all hosts) prior to starting kernels since those kernels\nwill run as the gateway user **regardless of the value of KERNEL_USERNAME**.\n```\n\n## SSH Tunneling\n\nJupyter Enterprise Gateway is configured to perform SSH tunneling on the five ZeroMQ kernel sockets as well as the\ncommunication socket created within the launcher and used to perform remote and cross-user signalling functionality. SSH\ntunneling is NOT enabled by default. Tunneling can be enabled/disabled via the environment variable `EG_ENABLE_TUNNELING=False`.\nNote, there is no command-line or configuration file support for this variable.\n\nNote that SSH by default validates host keys before connecting to remote hosts and the connection will fail for invalid\nor unknown hosts. Enterprise Gateway honors this requirement, and invalid or unknown hosts will cause tunneling to fail.\nPlease perform necessary steps to validate all hosts before enabling SSH tunneling, such as:\n\n- SSH to each node cluster and accept the host key properly\n- Configure SSH to disable `StrictHostKeyChecking`\n\n## Using Generic Security Service (Kerberos)\n\nJupyter Enterprise Gateway has support for SSH connections using GSS (for example Kerberos), which enables its deployment\nwithout the use of an ssh key. The `EG_REMOTE_GSS_SSH` environment variable can be used to control this behavior.\n\n```{seealso}\nThe list of [additional supported environment variables](config-add-env.md#additional-environment-variables).\n```\n\n## Securing Enterprise Gateway Server\n\n### Using SSL for encrypted communication\n\nEnterprise Gateway supports Secure Sockets Layer (SSL) communication with its clients. With SSL enabled, all the\ncommunication between the server and client are encrypted and highly secure.\n\n1. You can start Enterprise Gateway to communicate via a secure protocol mode by setting the `certfile` and `keyfile`\n   options with the command:\n\n   ```\n   jupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --certfile=mycert.pem --keyfile=mykey.key\n   ```\n\n   As server starts up, the log should reflect the following,\n\n   ```\n   [EnterpriseGatewayApp] Jupyter Enterprise Gateway at https://localhost:8888\n   ```\n\n   Note: Enterprise Gateway server is started with `HTTPS` instead of `HTTP`, meaning server side SSL is enabled.\n\n   ````{tip}\n   A self-signed certificate can be generated with openssl. For example, the following command will create a\n   certificate valid for 365 days with both the key and certificate data written to the same file:\n\n   ```bash\n   openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout mykey.key -out mycert.pem\n   ````\n\n1. With Enterprise Gateway server SSL enabled, now you need to configure the client side SSL, which is accomplished via the Gateway configuration options embedded in Notebook server.\n\n   During Jupyter Notebook server startup, export the following environment variables where the gateway-enabled server has access\n   during runtime:\n\n   ```bash\n   export JUPYTER_GATEWAY_CLIENT_CERT=${PATH_TO_PEM_FILE}\n   export JUPYTER_GATEWAY_CLIENT_KEY=${PATH_TO_KEY_FILE}\n   export JUPYTER_GATEWAY_CA_CERTS=${PATH_TO_SELFSIGNED_CA}\n   ```\n\n   ```{note}\n   If using a self-signed certificate, you can set `JUPYTER_GATEWAY_CA_CERTS` same as `JUPYTER_GATEWAY_CLIENT_CERT`.\n   ```\n\n### Using Enterprise Gateway configuration file\n\nYou can also utilize the [Enterprise Gateway configuration file](config-file.md#configuration-file-options) to set static configurations for the server.\n\nTo enable SSL from the configuration file, modify the corresponding parameter to the appropriate value.\n\n```\nc.EnterpriseGatewayApp.certfile = '/absolute/path/to/your/certificate/fullchain.pem'\nc.EnterpriseGatewayApp.keyfile = '/absolute/path/to/your/certificate/privatekey.key'\n```\n\nUsing configuration file achieves the same result as starting the server with `--certfile` and `--keyfile`, this way\nprovides better readability and maintainability.\n\nAfter configuring the above, the communication between gateway-enabled Notebook Server and Enterprise Gateway is SSL enabled.\n"
  },
  {
    "path": "docs/source/operators/config-sys-env.md",
    "content": "# System-owned environment variables\n\nThe following environment variables are managed by Enterprise Gateway and listed here for completeness.\n\n```{warning}\nManually setting these variables could adversely affect operations.\n```\n\n```text\n  EG_DOCKER_MODE\n    Docker only.  Used by launch_docker.py to determine if the kernel container\n    should be created using the swarm service API or the regular docker container\n    API.  Enterprise Gateway sets this value depending on whether the kernel is\n    using the DockerSwarmProcessProxy or DockerProcessProxy.\n\n  EG_RESPONSE_ADDRESS\n    This value is set during each kernel launch and resides in the environment of\n    the kernel launch process. Its value represents the address to which the remote\n    kernel's connection information should be sent.  Enterprise Gateway is listening\n    on that socket and will associate that connnection information with the responding\n    kernel.\n```\n"
  },
  {
    "path": "docs/source/operators/deploy-conductor.md",
    "content": "# IBM Spectrum Conductor deployments\n\nThis information will be added shortly. The configuration is similar to that of [Hadoop YARN deployments](deploy-yarn-cluster.md) with the `ConductorClusterProcessProxy` used in place of `YARNClusterProcessProxy`.\n\nThe following sample kernel specifications are currently available on IBM Spectrum Conductor:\n\n- spark_R_conductor_cluster\n- spark_python_conductor_cluster\n- spark_scala_conductor_cluster\n"
  },
  {
    "path": "docs/source/operators/deploy-distributed.md",
    "content": "# Distributed deployments\n\nThis section describes how to deploy Enterprise Gateway to manage kernels across a distributed set of hosts. In this case, a resource manager is not used, but, rather, SSH is used to distribute the kernels. This functionality is accomplished via the [`DistributedProcessProxy`](../contributors/system-architecture.md#distributedprocessproxy).\n\nSteps required to complete deployment on a distributed cluster are:\n\n1. [Install Enterprise Gateway](installing-eg.md) on the \"primary node\" of the cluster.\n1. [Install the desired kernels](installing-kernels.md)\n1. Install and configure the server and desired kernel specifications (see below)\n1. [Launch Enterprise Gateway](launching-eg.md)\n\nThe `DistributedProcessProxy` simply uses a fixed set of host names and selects the _next_ host using a simple round-robin algorithm (see the [Roadmap](../contributors/roadmap.md) for making this pluggable). In this case, you can still experience bottlenecks on a given node that receives requests to start \"large\" kernels, but otherwise, you will be better off compared to when all kernels are started on a single node or as local processes, which is the default for Jupyter Notebook and JupyterLab when not configured to use Enterprise Gateway.\n\nThe following sample kernelspecs are configured to use the `DistributedProcessProxy`:\n\n- python_distributed\n- spark_python_yarn_client\n- spark_scala_yarn_client\n- spark_R_yarn_client\n\n```{admonition} Important!\n:class: warning\nThe `DistributedProcessProxy` utilizes SSH between the Enterprise Gateway server and the remote host.  As a result, you must ensure passwordless SSH is configured between hosts.\n```\n\nThe set of remote hosts used by the `DistributedProcessProxy` are derived from two places.\n\n- The configuration option `EnterpriseGatewayApp.remote_hosts`, whose default value comes from the env variable EG_REMOTE_HOSTS - which, itself, defaults to 'localhost'.\n- The config option can be [overridden on a per-kernel basis](config-kernel-override.md#per-kernel-configuration-overrides) if the process_proxy stanza contains a config stanza where there's a `remote_hosts` entry. If present, this value will be used instead.\n\n```{tip}\nEntries in the remote hosts configuration should be fully qualified domain names (FQDN). For example, `host1.acme.com, host2.acme.com`\n```\n\n```{admonition} Important!\n:class: warning\nAll the kernel *specifications* configured to use the `DistributedProcessProxy` must be on all nodes to which there's a reference in the remote hosts configuration!  With YARN cluster node, only the Python and R kernel _packages_ are required on each node, not the entire kernel specification.\n```\n\nThe following installs the sample `python_distributed` kernel specification relative to the 3.2.3 release on the given node. This step must be repeated for each node and each kernel specification.\n\n```Bash\nwget https://github.com/jupyter-server/enterprise_gateway/releases/download/v3.2.3/jupyter_enterprise_gateway_kernelspecs-3.2.3.tar.gz\nKERNELS_FOLDER=/usr/local/share/jupyter/kernels\ntar -zxvf jupyter_enterprise_gateway_kernelspecs-3.2.3.tar.gz --strip 1 --directory $KERNELS_FOLDER/python_distributed/ python_distributed/\n```\n\n```{tip}\nYou may find it easier to install all kernel specifications on each node, then remove directories corresponding to specification you're not interested in using.\n```\n\n## Specifying a load-balancing algorithm\n\nJupyter Enterprise Gateway provides two ways to configure how kernels are distributed across the configured set of hosts: round-robin or least-connection.\n\n### Round-robin\n\nThe round-robin algorithm simply uses an index into the set of configured hosts, incrementing the index on each kernel startup so that it points to the next host in the configured set. To specify the use of round-robin, use one of the following:\n\n_Command-line_:\n\n```bash\n--EnterpriseGatewayApp.load_balancing_algorithm=round-robin\n```\n\n_Configuration_:\n\n```python\nc.EnterpriseGatewayApp.load_balancing_algorithm=\"round-robin\"\n```\n\n_Environment_:\n\n```bash\nexport EG_LOAD_BALANCING_ALGORITHM=round-robin\n```\n\nSince _round-robin_ is the default load-balancing algorithm, this option is not necessary.\n\n### Least-connection\n\nThe least-connection algorithm tracks the hosts that are currently servicing kernels spawned by the Enterprise Gateway instance. Using this information, Enterprise Gateway selects the host with the least number of kernels. It does not consider other information, or whether there is _another_ Enterprise Gateway instance using the same set of hosts. To specify the use of least-connection, use one of the following:\n\n_Command-line_:\n\n```bash\n--EnterpriseGatewayApp.load_balancing_algorithm=least-connection\n```\n\n_Configuration_:\n\n```python\nc.EnterpriseGatewayApp.load_balancing_algorithm=\"least-connection\"\n```\n\n_Environment_:\n\n```bash\nexport EG_LOAD_BALANCING_ALGORITHM=least-connection\n```\n\n### Pinning a kernel to a host\n\nA kernel's start request can specify a specific remote host on which to run by specifying that host in the `KERNEL_REMOTE_HOST` environment variable within the request's body. When specified, the configured load-balancing algorithm will be by-passed and the kernel will be started on the specified host.\n\n## YARN Client Mode\n\nYARN client mode kernel specifications can be considered _distributed mode kernels_. They just happen to use `spark-submit` from different nodes in the cluster but use the `DistributedProcessProxy` to manage their lifecycle.\n\nYARN Client kernel specifications require the following environment variable to be set within their `env` entries:\n\n- `SPARK_HOME` must point to the Apache Spark installation path\n\n```\nSPARK_HOME:/usr/hdp/current/spark2-client                            #For HDP distribution\n```\n\nIn addition, they will leverage the aforementioned remote hosts configuration.\n\nAfter that, you should have a `kernel.json` that looks similar to the one below:\n\n```json\n{\n  \"language\": \"python\",\n  \"display_name\": \"Spark - Python (YARN Client Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy\"\n    }\n  },\n  \"env\": {\n    \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n    \"PYSPARK_PYTHON\": \"/opt/conda/bin/python\",\n    \"PYTHONPATH\": \"${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip\",\n    \"SPARK_YARN_USER_ENV\": \"PYTHONUSERBASE=/home/yarn/.local,PYTHONPATH=${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip,PATH=/opt/conda/bin:$PATH\",\n    \"SPARK_OPTS\": \"--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false\",\n    \"LAUNCH_OPTS\": \"\"\n  },\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/spark_python_yarn_client/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n```\n\nMake any necessary adjustments such as updating `SPARK_HOME` or other environment and path specific configurations.\n\n```{tip}\nEach node of the cluster will typically be configured in the same manner relative to directory hierarchies and environment variables.  As a result, you may find it easier to get kernel specifications working on one node, then, after confirming their operation, copy them to other nodes and update the remote-hosts configuration to include the other nodes.  You will still need to _install_ the kernels themselves on each node.\n```\n\n## Spark Standalone\n\nAlthough Enterprise Gateway does not provide sample kernelspecs for Spark standalone, here are the steps necessary to convert a `yarn_client` kernelspec to standalone.\n\n- Make a copy of the source `yarn_client` kernelspec into an applicable `standalone` directory.\n- Edit the `kernel.json` file:\n  - Update the display_name with e.g. `Spark - Python (Spark Standalone)`.\n  - Update the `--master` option in the SPARK_OPTS to point to the spark master node rather than indicate `--deploy-mode client`.\n  - Update `SPARK_OPTS` and remove the `spark.yarn.submit.waitAppCompletion=false`.\n  - Update the `argv` stanza to reference `run.sh` in the appropriate directory.\n\nAfter that, you should have a `kernel.json` that looks similar to the one below:\n\n```json\n{\n  \"language\": \"python\",\n  \"display_name\": \"Spark - Python (Spark Standalone)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy\"\n    }\n  },\n  \"env\": {\n    \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n    \"PYSPARK_PYTHON\": \"/opt/conda/bin/python\",\n    \"PYTHONPATH\": \"${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip\",\n    \"SPARK_YARN_USER_ENV\": \"PYTHONUSERBASE=/home/yarn/.local,PYTHONPATH=${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip,PATH=/opt/conda/bin:$PATH\",\n    \"SPARK_OPTS\": \"--master spark://127.0.0.1:7077  --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID}\",\n    \"LAUNCH_OPTS\": \"\"\n  },\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/spark_python_spark_standalone/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n```\n"
  },
  {
    "path": "docs/source/operators/deploy-docker.md",
    "content": "# Docker and Docker Swarm deployments\n\nThis section describes how to deploy Enterprise Gateway into an existing Docker or Docker Swarm cluster. The two deployments are nearly identical and any differences will be noted.\n\nThe base Enterprise Gateway image is [elyra/enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) and can be found in the Enterprise Gateway dockerhub organization [elyra](https://hub.docker.com/r/elyra/), along with other images. See [Docker Images](../contributors/docker.md) for image details.\n\nThe following sample kernelspecs are currently available on Docker and Docker Swarm deployments:\n\n- R_docker\n- python_docker\n- python_tf_docker\n- python_tf_gpu_docker\n- scala_docker\n\n## Docker Swarm deployment\n\nEnterprise Gateway manifests itself as a Docker Swarm service. It is identified by the name `enterprise-gateway` within the cluster. In addition, all objects related to Enterprise Gateway, including kernel instances, have a label of `app=enterprise-gateway` applied.\n\nThe current deployment uses a compose stack definition, [docker-compose.yml](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/docker/docker-compose.yml) which creates an overlay network intended for use solely by Enterprise Gateway and any kernel-based services it launches.\n\nTo deploy the stack to a swarm cluster from a manager node, use:\n\n```bash\ndocker stack deploy -c docker-compose.yml enterprise-gateway\n```\n\nMore information about deploying and managing stacks can be found [here](https://docs.docker.com/engine/reference/commandline/stack_deploy/).\n\nSince Swarm's support for session-based affinity has not been investigated at this time, the deployment script configures a single replica. Once session affinity is available, the number of replicas can be increased.\n\n```{note}\nOnce session affinity has been figured out, we can (theretically) configure Enterprise Gateway for high availability by increasing the replicas.  However, HA support cannot be fully realized until Enterprise Gateway has finalized its persistent sessions functionality.\n```\n\n## Docker deployment\n\nAn alternative deployment of Enterprise Gateway in docker environments is to deploy Enterprise Gateway as a traditional docker container. This can be accomplished via the [docker-compose.yml](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/docker/docker-compose.yml) file. However, keep in mind that in choosing this deployment approach, one loses leveraging swarm's monitoring & restart capabilities. That said, choosing this approach does not preclude one from leveraging swarm's scheduling capabilities for launching kernels. As noted below, kernel instances, and how they manifest as docker-based entities (i.e., a swarm service or a docker container), is purely a function of the process proxy class to which they're associated.\n\nTo start the stack using compose:\n\n```bash\ndocker-compose up\n```\n\nThe documentation for managing a compose stack can be found [here](https://docs.docker.com/compose/overview/).\n\n## Kernelspec Modifications\n\nOne of the more common areas of customization we see occur within the kernelspec files located in /usr/local/share/jupyter/kernels. To customize the kernel definitions, the kernels directory can be exposed as a mounted volume thereby making it available to all containers within the swarm cluster.\n\nAs an example, we have included the necessary commands to mount these volumes, both in the deployment script and in the [launch_docker.py](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/docker/scripts/launch_docker.py) file used to launch docker-based kernels. By default, these references are commented out as they require the system administrator to ensure the directories are available throughout the cluster.\n\nNote that because the kernel launch script, [launch_docker.py](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/docker/scripts/launch_docker.py), resides in the kernelspecs hierarchy, updates or modifications to docker-based kernel instances can now also take place.\n\n## Docker and Docker Swarm Kernel Instances\n\nEnterprise Gateway currently supports launching of _vanilla_ (i.e., non-spark) kernels within a Docker Swarm cluster. When kernels are launched, Enterprise Gateway is responsible for creating the appropriate entity. The kind of entity created is a function of the corresponding process proxy class.\n\nWhen the process proxy class is `DockerSwarmProcessProxy` the `launch_docker.py` script will create a Docker Swarm _service_. This service uses a restart policy of `none` meaning that it's configured to go away upon failures or completion. In addition, because the kernel is launched as a swarm service, the kernel can \"land\" on any node of the cluster.\n\nWhen the process proxy class is `DockerProcessProxy` the `launch_docker.py` script will create a traditional docker _container_. As a result, the kernel will always reside on the same host as the corresponding Enterprise Gateway.\n\nItems worth noting:\n\n1. The Swarm service or Docker container name will be composed of the launching username (`KERNEL_USERNAME`) and kernel-id.\n1. The service/container will have 3 labels applied: \"kernel_id=<kernel-id>\", \"component=kernel\", and \"app=enterprise-gateway\" - similar to Kubernetes.\n1. The service/container will be launched within the same docker network as Enterprise Gateway.\n\n## DockerSwarmProcessProxy\n\nTo indicate that a given kernel should be launched as a Docker Swarm service into a swarm cluster, the kernel.json file's `metadata` stanza must include a `process_proxy` stanza indicating a `class_name:` of `DockerSwarmProcessProxy`. This ensures the appropriate lifecycle management will take place relative to a Docker Swarm environment.\n\nAlong with the `class_name:` entry, this process proxy stanza should also include a proxy configuration stanza which specifies the docker image to associate with the kernel's service container. If this entry is not provided, the Enterprise Gateway implementation will use a default entry of `elyra/kernel-py:VERSION`. In either case, this value is made available to the rest of the parameters used to launch the kernel by way of an environment variable: `KERNEL_IMAGE`.\n\n```{note}\n_The use of `VERSION` in docker image tags is a placeholder for the appropriate version-related image tag.  When kernelspecs are built via the Enterprise Gateway Makefile, `VERSION` is replaced with the appropriate version denoting the target release.  A full list of available image tags can be found in the dockerhub repository corresponding to each image._\n```\n\n```json\n{\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.docker_swarm.DockerSwarmProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-py:VERSION\"\n      }\n    }\n  }\n}\n```\n\nAs always, kernels are launched by virtue of the `argv:` stanza in their respective kernel.json files. However, when launching kernels in a docker environment, what gets invoked isn't the kernel's launcher, but, instead, a python script that is responsible for using the [Docker Python API](https://docker-py.readthedocs.io/en/stable/) to create the corresponding instance.\n\n```json\n{\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/python_docker/scripts/launch_docker.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n```\n\n## DockerProcessProxy\n\nRunning containers in Docker Swarm versus traditional Docker are different enough to warrant having separate process proxy implementations. As a result, the kernel.json file could reference the `DockerProcessProxy` class and, accordingly, a traditional docker container (as opposed to a swarm _service_) will be created. The rest of the kernel.json file, image name, argv stanza, etc. is identical.\n\n```json\n{\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.docker_swarm.DockerProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-py:VERSION\"\n      }\n    }\n  },\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/python_docker/scripts/launch_docker.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n```\n\nUpon invocation, the invoked process proxy will set a \"docker mode\" environment variable (`EG_DOCKER_MODE`) to either `swarm` or `docker`, depending on the process proxy instance, that the `launch_docker.py` script uses to determine whether a _service_ or _container_ should be created, respectively.\n\nIt should be noted that each of these forms of process proxy usage does **NOT** need to match to the way in which the Enterprise Gateway instance was deployed. For example, if Enterprise Gateway was deployed as a Docker Swarm service and a `DockerProcessProxy` is used, that corresponding kernel will be launched as a traditional docker container and will reside on the same host as wherever the Enterprise Gateway (swarm) service is running. Similarly, if Enterprise Gateway was deployed using standard Docker container and a `DockerSwarmProcessProxy` is used (and assuming a swarm configuration is present), that corresponding kernel will be launched as a docker swarm service and will reside on whatever host the Docker Swarm scheduler decides is best. That is, the kernel container's lifecycle will be managed by the corresponding process proxy and the Enterprise Gateway's deployment has no bearing.\n"
  },
  {
    "path": "docs/source/operators/deploy-kubernetes.md",
    "content": "# Kubernetes deployments\n\n## Overview\n\nThis section describes how to deploy Enterprise Gateway into an existing Kubernetes cluster.\n\nEnterprise Gateway is provisioned as a Kubernetes _deployment_ and exposed as a Kubernetes _service_. Enterprise Gateway can leverage load balancing and high availability functionality provided by Kubernetes (although HA cannot be fully realized until Enterprise Gateway supports persistent sessions).\n\nThe following sample kernel specifications apply to Kubernetes deployments:\n\n- R_kubernetes\n- python_kubernetes\n- python_tf_gpu_kubernetes\n- python_tf_kubernetes\n- scala_kubernetes\n- spark_R_kubernetes\n- spark_python_kubernetes\n- spark_scala_kubernetes\n- spark_python_operator\n\nEnterprise Gateway deployments use the [elyra/enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) image from the Enterprise Gateway dockerhub organization [elyra](https://hub.docker.com/r/elyra/) along with other kubernetes-based images. See [Docker Images](../contributors/docker.md) for image details.\n\nWhen deployed within a [spark-on-kubernetes](https://spark.apache.org/docs/latest/running-on-kubernetes.html) cluster, Enterprise Gateway can easily support cluster-managed kernels distributed across the cluster. Enterprise Gateway will also provide standalone (i.e., _vanilla_) kernel invocation (where spark contexts are not automatically created) which also benefits from their distribution across the cluster.\n\n````{note}\nIf you plan to use kernel specifications derived from the `spark_python_operator` sample, ensure that the\n[Kubernetes Operator for Apache Spark is installed](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator#installation)\nin your Kubernetes cluster.\n\n```{tip}\nTo ensure the proper flow of environment variables to your spark application, make sure the\nwebhook server is enabled when deploying the helm chart:\n\n`helm install my-release spark-operator/spark-operator --namespace spark-operator --set webhook.enable=true`\n````\n\nWe are using helm templates to manage Kubernetes resource configurations, which allows an end-user to easily customize their Enterprise Gateway deployment.\n\nThere are two main deployment scenarios if RBAC is enabled in your Kubernetes cluster:\n\n1. Deployment user has **_Cluster Administrator Access_**. In this scenario, you have full access to the cluster and can deploy all components as needed.\n1. Deployment user has **_Namespace Administrator Access_**. This is typical for shared multi-tenant environments where each Team has control over their namespace, but not the cluster. In this scenario, your cluster Administrator can deploy the RBAC resources and Kernel Image Puller and you can deploy Enterprise Gateway.\n\n## Prerequisites\n\n- Install and configure [kubectl](https://kubernetes.io/docs/tasks/tools/) and [helm3](https://helm.sh/docs/intro/install/) on your workstation.\n\n- Create the kubernetes namespace where you want to deploy Enterprise Gateway, for example:\n\n  ```sh\n  kubectl create namespace enterprise-gateway\n  ```\n\n- If you use RBAC, you will need cluster Admin access to configure RBAC resources\n\n- If you plan to use Private docker registry, you will need to have credentials (see configuration steps below)\n\nOnce the Kubernetes cluster is configured and `kubectl` is demonstrated to be working, it is time to deploy Enterprise Gateway. There are a couple of different deployment options - using helm or kubectl.\n\n## Deploying with helm\n\nChoose this option if you want to deploy via a [helm](https://helm.sh/) chart. You can customize your deployment using value files - review the configuration section below for details.\n\n### Create the Enterprise Gateway kubernetes service and deployment\n\nYou can execute the helm command from the checked-out release of the Enterprise Gateway git [repository](https://github.com/jupyter-server/enterprise_gateway.git):\n\n```bash\nhelm  upgrade --install  enterprise-gateway \\\n  etc/kubernetes/helm/enterprise-gateway \\\n   --kube-context [mycluster-context-name] \\\n   --namespace [namespace-name]\n\n```\n\nAlternatively, the helm chart tarball is also accessible as an asset on our [release](https://github.com/jupyter-server/enterprise_gateway/releases) page, replace \\[VERSION\\] with specific release version you want to use:\n\n```bash\nhelm  upgrade --install  enterprise-gateway \\\n  https://github.com/jupyter-server/enterprise_gateway/releases/download/v[VERSION]/jupyter_enterprise_gateway_helm-[VERSION].tar.gz \\\n   --kube-context [mycluster-context-name] \\\n   --namespace [namespace-name]\n```\n\n### Access to Enterprise Gateway from outside the cluster\n\nTake a look at the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster-services/#ways-to-connect) on how you can access the Kubernetes service from outside the cluster.\n\nA Kubernetes Ingress is the most user-friendly way of interacting with the service and that is what we will cover in this section.\nIf you do not have a Kubernetes Ingress configured on your cluster the easiest way to get access will be using the NodePort service.\n\n#### Kubernetes Ingress Setup\n\n##### Prerequisites\n\n- Ingress controller deployed on your Kubernetes cluster. Review the Kubernetes [documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) for available options.\n- Wildcard DNS record is configured to point to the IP of the LoadBalancer, which frontends your ingress controller\n- Review specific Ingress controller configuration to enable wildcard path support if you are using Kubernetes version \\< v1.18\n- With Kubernetes v1.18 Ingress uses `PathType` parameter which is set to `Prefix` in the helm chart by default, so no additional configuration is required\n- Refer to your ingress controller documentation on how to set up TLS with your ingress\n\n##### Update Helm deployment to enable ingress\n\nCreate file `values-ingress.yaml` with the following content:\n\n```bash\ningress:\n  enabled: true\n  # Ingress resource host\n  hostName: \"[unique-fully-qualified-domain-name]\"\n\n```\n\nAdd this file to your helm command and apply to the cluster replacing \\[PLACEHOLDER\\] with appropriate values for your environment:\n\n```bash\nhelm  upgrade --install enterprise-gateway \\\n  etc/kubernetes/helm/enterprise-gateway \\\n   --kube-context [mycluster-context-name] \\\n   --namespace [namespace-name] \\\n   -f values-ingress.yaml\n```\n\n## Basic Full Configuration Example of Enterprise Gateway Deployment\n\n### Option 1. Use Kubernetes Ingress\n\nCreate file `values-full.yaml` with the following content:\n\n```bash\nservice:\n  type: \"ClusterIP\"\n  ports:\n    # The primary port on which Enterprise Gateway is servicing requests.\n    - name: \"http\"\n      port: 8888\n      targetPort: 8888\n    # The  port on which Enterprise Gateway will receive kernel connection info responses.\n    - name: \"http-response\"\n      port: 8877\n      targetPort: 8877\ndeployment:\n  # Update CPU/Memory as needed\n  resources:\n    limits:\n      cpu: 2\n      memory: 10Gi\n    requests:\n      cpu: 1\n      memory: 2Gi\n  # Update to deploy multiple replicas of EG.\n  replicas: 2\n  # Give Enteprise Gateway some time to gracefully shutdown\n  terminationGracePeriodSeconds: 60\n\nkip:\n  enabled: false # turn this off, if running DaemonSets is restricted by your cluster Administrator\n\ningress:\n  enabled: true\n  # Ingress resource host\n  hostName: \"[unique-fully-qualified-domain-name]\"\n\n```\n\n### Option 2. Use NodePort Service\n\nCreate file `values-full.yaml` with the following content, you can set the node port value or have Kubernetes allocate a random port:\n\n```bash\nservice:\n  type: \"NodePort\"\n  ports:\n    # The primary port on which Enterprise Gateway is servicing requests.\n    - name: \"http\"\n      port: 8888\n      targetPort: 8888\n    #  nodePort: 32652 # optional nodePort\n    # The  port on which Enterprise Gateway will receive kernel connection info responses.\n    - name: \"http-response\"\n      port: 8877\n      targetPort: 8877\n    #  nodePort: 30481 # optional nodePort\n\ndeployment:\n  # Update CPU/Memory as needed\n  resources:\n    limits:\n      cpu: 2\n      memory: 10Gi\n    requests:\n      cpu: 1\n      memory: 2Gi\n  # Update to deploy multiple replicas of EG.\n  replicas: 2\n  # Give Enteprise Gateway some time to gracefully shutdown\n  terminationGracePeriodSeconds: 60\n\nkip:\n  enabled: false # turn this off, if running DaemonSets is restricted by your cluster Administrator\n\ningress:\n  enabled: false\n```\n\n### Option 3. Use NodePort Service with Private Docker Registry\n\nCreate file `values-full.yaml` with the following content, you can set the node port value or have Kubernetes allocate a random port:\n\n```bash\nglobal:\n  # Create RBAC resources\n  rbac: true\n  # ImagePullSecrets for a ServiceAccount, list of secrets in the same namespace\n  # to use for pulling any images in pods that reference this ServiceAccount.\n  # Must be set for any cluster configured with private docker registry.\n  imagePullSecrets:\n    - private-registry-key # provide the name of the secret to use\n\n# You can optionally create imagePull Secrets\nimagePullSecretsCreate:\n  enabled: false\n  annotations: {}\n    # this annotation allows you to keep the secret even if the helm release is deleted\n    # \"helm.sh/resource-policy\": \"keep\"\n  secrets:\n    - private-registry-key # provide the name of the secret to create\n\nservice:\n  type: \"NodePort\"\n  ports:\n    # The primary port on which Enterprise Gateway is servicing requests.\n    - name: \"http\"\n      port: 8888\n      targetPort: 8888\n    #  nodePort: 32652 # optional nodePort\n    # The  port on which Enterprise Gateway will receive kernel connection info responses.\n    - name: \"http-response\"\n      port: 8877\n      targetPort: 8877\n    #  nodePort: 30481 # optional nodePort\n\n# Enterprise Gateway image name and tag to use from private registry.\nimage: private.io/elyra/enterprise-gateway:dev\n\ndeployment:\n  # Update CPU/Memory as needed\n  resources:\n    limits:\n      cpu: 2\n      memory: 10Gi\n    requests:\n      cpu: 1\n      memory: 2Gi\n  # Update to deploy multiple replicas of EG.\n  replicas: 2\n  # Give Enteprise Gateway some time to gracefully shutdown\n  terminationGracePeriodSeconds: 60\n\nkip:\n  enabled: false # turn this off, if running DaemonSets is restricted by your cluster Administrator\n  # Kernel Image Puller image name and tag to use from private registry.\n  image: private.io/elyra/kernel-image-puller:dev\n\ningress:\n  enabled: false\n```\n\n### Deploy with helm\n\nAdd values file to your helm command and apply to the cluster replacing \\[PLACEHOLDER\\] with appropriate values for your environment:\n\n```bash\nhelm  upgrade --install enterprise-gateway\n  etc/kubernetes/helm/enterprise-gateway \\\n   --kube-context [mycluster-context-name] \\\n   --namespace [namespace-name] \\\n   -f values-full.yaml\n```\n\nif you are using private registry add setting base64 encoded secret value to you command:\n`--set imagePullSecretsCreate.secrets[0].data=\"UHJvZCBTZWNyZXQgSW5mb3JtYXRpb24K\"`\n\n### Deploy with kubectl\n\nChoose this deployment option if you want to deploy directly from Kubernetes template files with kubectl, rather than using a package manager like helm.\n\nAdd values file to your helm command and generate `yaml` files replacing \\[PLACEHOLDER\\] with appropriate values for your environment:\n\n```bash\nhelm template \\\n  --output-dir [/tmp/mydeployment] \\\n  enterprise-gateway \\\n  etc/kubernetes/helm/enterprise-gateway \\\n   --namespace [namespace-name] \\\n   -f values-full.yaml\n```\n\nif you are using private registry add setting base64 encoded secret value to you command:\n`--set imagePullSecretsCreate.secrets[0].data=\"UHJvZCBTZWNyZXQgSW5mb3JtYXRpb24K\"`\n\nNow you can review generated `yaml` files and apply them to your Kubernetes cluster:\n\n```bash\nkubectl apply -f /tmp/mydeployment/enterprise-gateway/templates/\n```\n\n```{important}\nNever store secrets in your source code control files!\n```\n\n### Validation\n\nYou can start jupyter notebook to connect to the configured endpoint `http://jupyter-e-gw.example.com`\n\n## Advanced Configuration Example of Enterprise Gateway Deployment\n\nIf you need to deploy Enterprise Gateway to a restricted Kubernetes cluster with _RBAC_ and _PodSecurityPolicies_ enabled, you may want to consider deploying Enterprise Gateway components as separate helm releases:\n\n### 1. Helm release which will configure required RBAC, PSP, and service accounts\n\n- Typically, this will be done by the Cluster Administrator.\n\nCreate `values-rbac.yaml` file with the following content:\n\n```bash\nglobal:\n  # Create RBAC resources\n  rbac: true\n  serviceAccountName: 'enterprise-gateway-sa'\n\ndeployment:\n  enabled: false\n\ningress:\n  enabled: false\n\nkip:\n  enabled: false\n  serviceAccountName: 'kernel-image-puller-sa'\n  podSecurityPolicy:\n    create: true\n```\n\nRun helm deploy:\n\n```bash\nhelm  upgrade --install enterprise-gateway \\\n  etc/kubernetes/helm/enterprise-gateway \\\n   --kube-context [mycluster-context-name] \\\n   --namespace [namespace-name] \\\n   -f values-rbac.yaml\n```\n\n### 2. Helm release to deploy Kernel Image Puller\n\n- Typically, this will be done by the Cluster Administrator.\n\nCreate `values-kip.yaml` file with the following content:\n\n```bash\nglobal:\n  # Create RBAC resources\n  rbac: true\n\ndeployment:\n  enabled: false\n\ningress:\n  enabled: false\n\n# Kernel Image Puller (daemonset)\nkip:\n  enabled: true\n  serviceAccountName: 'kernel-image-puller-sa'\n  podSecurityPolicy:\n    create: false\n  resources:\n    limits:\n      cpu: 100m\n      memory: 200Mi\n    requests:\n      cpu: 50m\n      memory: 100Mi\n```\n\nRun helm deploy:\n\n```bash\nhelm  upgrade --install enterprise-gateway \\\n  etc/kubernetes/helm/enterprise-gateway \\\n   --kube-context [mycluster-context-name] \\\n   --namespace [namespace-name] \\\n   -f values-kip.yaml\n```\n\n### 3. Helm release to deploy Enterprise Gateway\n\n- This can be done by namespace Administrator.\n\nCreate `values-eg.yaml` file with the following content:\n\n```bash\nglobal:\n  rbac: false\n\nservice:\n  type: \"ClusterIP\"\n  ports:\n    # The primary port on which Enterprise Gateway is servicing requests.\n    - name: \"http\"\n      port: 8888\n      targetPort: 8888\n      # nodePort: 32652 # optional nodePort\n    # The  port on which Enterprise Gateway will receive kernel connection info responses.\n    - name: \"http-response\"\n      port: 8877\n      targetPort: 8877\n      # nodePort: 30481 # optional nodePort\n\ndeployment:\n  enabled: true\n  resources:\n    limits:\n      cpu: 2\n      memory: 10Gi\n    requests:\n      cpu: 500m\n      memory: 2Gi\n  # Update to deploy multiple replicas of EG.\n  replicas: 1\n  # Give Enteprise Gateway some time to gracefully shutdown\n  terminationGracePeriodSeconds: 60\n\ningress:\n  enabled: true\n  # Ingress resource host\n  hostName: \"[unique-fully-qualified-domain-name]\"\n\nkip:\n  enabled: false\n\n```\n\nRun helm deploy:\n\n```bash\nhelm  upgrade --install enterprise-gateway \\\n  etc/kubernetes/helm/enterprise-gateway \\\n   --kube-context [mycluster-context-name] \\\n   --namespace [namespace-name] \\\n   -f values-eg.yaml\n```\n\n## Helm Configuration Parameters\n\nHere are the values that you can set when deploying the helm chart. You\ncan override them with helm's `--set` or `--values` options. Always use `--set` to configure secrets.\n\n| **Parameter**                              | **Description**                                                                                                                                                                                                                                  | **Default**                                                                    |\n| ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ |\n| `global.rbac`                              | Create Kubernetes RBAC resources                                                                                                                                                                                                                 | `true`                                                                         |\n| `global.commonLabels`                      | Common labels to apply to daemonset and deployment resources                                                                                                                                                                                     | `{}`                                                                           |\n| `global.imagePullSecrets`                  | Optional array of image pull secrets for Service Account for pulling images from private service registries                                                                                                                                      | \\[\\]                                                                           |\n| `imagePullSecretsCreate.enabled`           | Optional enable creation of the Kubernetes secrets to access private registries.                                                                                                                                                                 | 'false'                                                                        |\n| `imagePullSecretsCreate.annotations`       | Annotations for Kubernetes secrets                                                                                                                                                                                                               | '{}'                                                                           |\n| `imagePullSecretsCreate.secrets`           | Array of Kubernetes secrets to create with the following structure: `name` - secret name and `data` - base64 encoded Secret value. Example: `{ name: \"myregistrykey\", data: \"SGVsbG8gc2VjcmV0Cg==\" }`                                            | '\\[\\]'                                                                         |\n| `image`                                    | Enterprise Gateway image name and tag to use. Ensure the tag is updated to the version of Enterprise Gateway you wish to run.                                                                                                                    | `elyra/enterprise-gateway:VERSION`, where `VERSION` is the release being used  |\n| `imagePullPolicy`                          | Enterprise Gateway image pull policy. Use `IfNotPresent` policy so that dev-based systems don't automatically update. This provides more control. Since formal tags will be release-specific this policy should be sufficient for them as well.  | `IfNotPresent`                                                                 |\n| `service.type`                             | Kubernetes Service Type - Nodeport,ClusterIP,LoadBalancer                                                                                                                                                                                        | `ClusterIP`                                                                    |\n| `service.externalIPs.k8sMasterPublicIP`    | Master public IP on which to expose EG.                                                                                                                                                                                                          | nil                                                                            |\n| `service.ports`                            | An array of service ports for Kubernetes Service                                                                                                                                                                                                 | see below                                                                      |\n| `service.ports[0].name`                    | The primary port name for Enterprise Gateway is servicing requests.                                                                                                                                                                              | `http`                                                                         |\n| `service.ports[0].port`                    | The primary port on which Enterprise Gateway is servicing requests.                                                                                                                                                                              | `8888`                                                                         |\n| `service.ports[1].name`                    | The port name on which Enterprise Gateway will receive kernel connection info responses.                                                                                                                                                         | `http-response`                                                                |\n| `service.ports[1].port`                    | The port on which Enterprise Gateway will receive kernel connection info responses.                                                                                                                                                              | `8877`                                                                         |\n| `deployment.enabled`                       | flag to enable run Enterprise Gateway deployment                                                                                                                                                                                                 | `true`                                                                         |\n| `deployment.serviceAccountName`            | Kubernetes Service Account to run Enterprise Gateway                                                                                                                                                                                             | `enterprise-gateway-sa`                                                        |\n| `deployment.tolerations`                   | Kubernetes tolerations for Enterprise Gateway pods to ensure that pods are not scheduled onto inappropriate nodes                                                                                                                                | `[]`                                                                           |\n| `deployment.affinity`                      | Kubernetes affinity for Enterprise Gateway pods to keep pods scheduled onto appropriate nodes                                                                                                                                                    | `{}`                                                                           |\n| `deployment.nodeSelector`                  | Kubernetes nodeselector for Enterprise Gateway pods to keep pods scheduled onto appropriate nodes - simpler alternative to tolerations and affinity                                                                                              | `{}`                                                                           |\n| `deployment.terminationGracePeriodSeconds` | Time to wait for Enterprise Gateway to gracefully shutdown.                                                                                                                                                                                      | `30`                                                                           |\n| `deployment.resources`                     | set Enterprise Gateway container resources.                                                                                                                                                                                                      | valid Yaml resources, see values file for example                              |\n| `deployment.replicas`                      | Update to deploy multiple replicas of EG.                                                                                                                                                                                                        | `1`                                                                            |\n| `deployment.extraEnv`                      | Additional environment variables to set for Enterprise Gateway.                                                                                                                                                                                  | `{}`                                                                           |\n| `logLevel`                                 | Log output level.                                                                                                                                                                                                                                | `DEBUG`                                                                        |\n| `mirrorWorkingDirs`                        | Whether to mirror working directories. NOTE: This requires appropriate volume mounts to make notebook dir accessible.                                                                                                                            | `false`                                                                        |\n| `authToken`                                | Optional authorization token passed in all requests (see --EnterpriseGatewayApp.auth_token)                                                                                                                                                      | `nil`                                                                          |\n| `kernel.clusterRole`                       | Kernel cluster role created by this chart. Used if no KERNEL_NAMESPACE is provided by client.                                                                                                                                                    | `kernel-controller`                                                            |\n| `kernel.shareGatewayNamespace`             | Will start kernels in the same namespace as EG if True.                                                                                                                                                                                          | `false`                                                                        |\n| `kernel.defaultServiceAccountName`         | Service account name to use for kernel pods when no service account is specified. This service account should exist in the namespace where kernel pods are launched.                                                                             | `default`                                                                      |\n| `kernel.launchTimeout`                     | Timeout for kernel launching in seconds.                                                                                                                                                                                                         | `60`                                                                           |\n| `kernel.cullIdleTimeout`                   | Idle timeout in seconds. Default is 1 hour.                                                                                                                                                                                                      | `3600`                                                                         |\n| `kernel.cullConnected`                     | Whether to cull idle kernels that still have clients connected.                                                                                                                                                                                  | `false`                                                                        |\n| `kernel.allowedKernels`                    | List of kernel names that are available for use.                                                                                                                                                                                                 | `{r_kubernetes,...}` (see `values.yaml`)                                       |\n| `kernel.defaultKernelName`                 | Default kernel name should be something from the allowedKernels                                                                                                                                                                                  | `python_kubernetes`                                                            |\n| `kernelspecs.image`                        | Optional custom data image containing kernelspecs to use. Cannot be used with NFS enabled.                                                                                                                                                       | `nil`                                                                          |\n| `kernelspecs.imagePullPolicy`              | Kernelspecs image pull policy.                                                                                                                                                                                                                   | `Always`                                                                       |\n| `nfs.enabled`                              | Whether NFS-mounted kernelspecs are enabled. Cannot be used with `kernelspecs.image` set.                                                                                                                                                        | `false`                                                                        |\n| `nfs.internalServerIPAddress`              | IP address of NFS server. Required if NFS is enabled.                                                                                                                                                                                            | `nil`                                                                          |\n| `nfs.internalServerIPAddress`              | IP address of NFS server. Required if NFS is enabled.                                                                                                                                                                                            | `nil`                                                                          |\n| `kernelspecsPvc.enabled`                   | Use a persistent volume claim to store kernelspecs in a persistent volume                                                                                                                                                                        | `false`                                                                        |\n| `kernelspecsPvc.name`                      | PVC name. Required if want mount kernelspecs without nfs. PVC should create in the same namespace before EG deployed.                                                                                                                            | `nil`                                                                          |\n| `ingress.enabled`                          | Whether to include an EG ingress resource during deployment.                                                                                                                                                                                     | `false`                                                                        |\n| `ingress.ingressClassName`                 | Specify a Kubernetes ingress class name for enterprise gateway deployment ingress deployment.                                                                                                                                                    | `\"\"`                                                                           |\n| `ingress.hostName`                         | Kubernetes Ingress hostname, required. .                                                                                                                                                                                                         | nil                                                                            |\n| `ingress.pathType`                         | Kubernetes Ingress PathType (`ImplementationSpecific`,`Prefix`).                                                                                                                                                                                 | `Prefix`                                                                       |\n| `ingress.path`                             | Kubernetes Ingress Path.                                                                                                                                                                                                                         | `/`                                                                            |\n| `ingress.annotations`                      | Use annotations to configure ingress. See examples for Traefik and nginx. NOTE: A traefik or nginx controller must be installed and `ingress.enabled` must be set to `true`.                                                                     | see values file for examples                                                   |\n| `kip.enabled`                              | Whether the Kernel Image Puller should be used                                                                                                                                                                                                   | `true`                                                                         |\n| `kip.podSecurityPolicy.create`             | enable creation of PSP for Image Puller, requires `global.rbac: true` and non-empy KIP service account                                                                                                                                           | `false`                                                                        |\n| `kip.podSecurityPolicy.annotatons`         | annotations for Image Puller PSP account                                                                                                                                                                                                         | `{}`                                                                           |\n| `kip.tolerations`                          | Kubernetes tolerations for Kernel Image Puller pods to ensure that pods are not scheduled onto inappropriate nodes                                                                                                                               | `[]`                                                                           |\n| `kip.affinity`                             | Kubernetes affinity for Kernel Image Puller pods to keep pods scheduled onto appropriate nodes                                                                                                                                                   | `{}`                                                                           |\n| `kip.nodeSelector`                         | Kubernetes nodeselector for Kernel Image Puller pods to keep pods scheduled onto appropriate nodes - simpler alternative to tolerations and affinity                                                                                             | `{}`                                                                           |\n| `kip.serviceAccountName`                   | Kubernetes Service Account to run Kernel Image Puller Gateway                                                                                                                                                                                    | `kernel-image-puller-sa`                                                       |\n| `kip.resources`                            | set Kernel Image Puller container resources.                                                                                                                                                                                                     | valid Yaml resources, see values file for example                              |\n| `kip.image`                                | Kernel Image Puller image name and tag to use. Ensure the tag is updated to the version of the Enterprise Gateway release you wish to run.                                                                                                       | `elyra/kernel-image-puller:VERSION`, where `VERSION` is the release being used |\n| `kip.imagePullPolicy`                      | Kernel Image Puller image pull policy. Use `IfNotPresent` policy so that dev-based systems don't automatically update. This provides more control. Since formal tags will be release-specific this policy should be sufficient for them as well. | `IfNotPresent`                                                                 |\n| `kip.interval`                             | The interval (in seconds) at which the Kernel Image Puller fetches kernelspecs to pull kernel images.                                                                                                                                            | `300`                                                                          |\n| `kip.pullPolicy`                           | Determines whether the Kernel Image Puller will pull kernel images it has previously pulled (`Always`) or only those it hasn't yet pulled (`IfNotPresent`)                                                                                       | `IfNotPresent`                                                                 |\n| `kip.criSocket`                            | The container runtime interface socket, use `/run/containerd/containerd.sock` for containerd installations                                                                                                                                       | `/var/run/docker.sock`                                                         |\n| `kip.defaultContainerRegistry`             | Prefix to use if a registry is not already specified on image name (e.g., elyra/kernel-py:VERSION)                                                                                                                                               | `docker.io`                                                                    |\n| `kip.fetcher`                              | fetcher to fetch image names, defaults to KernelSpecsFetcher                                                                                                                                                                                     | `KernelSpecsFetcher`                                                           |\n| `kip.images`                               | if StaticListFetcher is used KIP_IMAGES defines the list of images pullers will fetch                                                                                                                                                            | `[]`                                                                           |\n| `kip.internalFetcher `                     | if CombinedImagesFetcher is used KIP_INTERNAL_FETCHERS defines the fetchers that get used internally                                                                                                                                             | `KernelSpecsFetcher`                                                           |\n\n## Uninstalling Enterprise Gateway\n\nWhen using helm, you can uninstall Enterprise Gateway with the following command:\n\n```bash\nhelm uninstall enterprise-gateway \\\n  --kube-context [mycluster-context-name] \\\n   --namespace [namespace-name]\n```\n\n## Enterprise Gateway Deployment Details\n\nEnterprise Gateway is deployed as a Kubernetes deployment and exposed by a Kubernetes service. It can be accessed by the service name `enterprise-gateway` within the cluster. In addition, all objects related to Enterprise Gateway, including kernel instances, have the kubernetes label of `app=enterprise-gateway` applied.\n\nThe Enterprise Gateway Kubernetes service _type_ can be:\n\n- `NodePort`: allows to access Enterprise Gateway with `http://[worker IP]:[NodePort]` or having a load balancer route traffic to `http://[worker IP's]:[NodePort]`\n- `LoadBalancer`: requires appropriate network plugin available\n- `ClusterIP`: requires Kubernetes Ingress Controller\n\nKernels are stateful, therefore service is configured with a `sessionAffinity` of `ClientIP`. As a result, kernel creation requests will be routed to the same pod.\n\nIncrease the number of `replicas` of Enterprise Gateway Deployment to improve deployment availability, but because `sessionAffinity` of `ClientIP`, traffic from the same client will be sent to the same pod of the Enterprise Gateway and if that pod goes down, client will get an error and will need to reestablish connection to another pod of the Enterprise Gateway.\n\n### Namespaces\n\nA best practice for Kubernetes applications running in an enterprise is to isolate applications via namespaces. Since Enterprise Gateway also requires isolation at the kernel level, it makes sense to use a namespace for each kernel, by default.\n\nThe primary namespace is created prior to the initial Helm deployment (e.g., `enterprise-gateway`). This value is communicated to Enterprise Gateway via the env variable `EG_NAMESPACE`. All Enterprise Gateway components reside in this namespace.\n\nBy default, kernel namespaces are created when the respective kernel is launched. At that time, the kernel namespace name is computed from the kernel username (`KERNEL_USERNAME`) and its kernel ID (`KERNEL_ID`) just like the kernel pod name. Upon a kernel's termination, this namespace - provided it was created by Enterprise Gateway - will be deleted.\n\nInstallations wishing to pre-create the kernel namespace can do so by conveying the name of the kernel namespace via `KERNEL_NAMESPACE` in the `env` portion of the kernel creation request. (They must also provide the namespace's service account name via `KERNEL_SERVICE_ACCOUNT_NAME` - see next section.) When `KERNEL_NAMESPACE` is set, Enterprise Gateway will not attempt to create a kernel-specific namespace, nor will it attempt its deletion. As a result, kernel namespace lifecycle management is the user's responsibility.\n\n```{tip}\nIf you need to associate resources to users, one suggestion is to create a namespace per user and set `KERNEL_NAMESPACE = KERNEL_USERNAME` on the client (see [Kernel Environment Variables](../users/kernel-envs.md)).\n```\n\nAlthough **not recommended**, installations requiring everything in the same namespace - Enterprise Gateway and all its kernels - can do so by setting the helm chart value `kernel.shareGatewayNamespace` to `true` - which is then set into the `EG_SHARED_NAMESPACE` env. When set, all kernels will run in the Enterprise Gateway namespace, essentially eliminating all aspects of isolation between kernel instances (and resources).\n\n### Role-Based Access Control (RBAC)\n\nAnother best practice of Kubernetes applications is to define the minimally viable set of permissions for the application. Enterprise Gateway does this by defining role-based access control (RBAC) objects for both Enterprise Gateway and kernels.\n\nBecause the Enterprise Gateway pod must create kernel namespaces, pods, services (for Spark support) and role bindings, a cluster-scoped role binding is required.\n\nThe cluster role binding `enterprise-gateway-controller` also references the subject, `enterprise-gateway-sa`, which is the service account associated with the Enterprise Gateway namespace and also created by [eg-clusterrolebinding.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kubernetes/helm/enterprise-gateway/templates/eg-clusterrolebinding.yaml)).\n\nThe [`eg-clusterrole.yaml`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kubernetes/helm/enterprise-gateway/templates/eg-clusterrole.yaml) defines the minimally viable roles for a kernel pod - most of which are required for Spark support.\n\nSince kernels, by default, reside within their own namespace created upon their launch, a cluster role is used within a namespace-scoped role binding created when the kernel's namespace is created. The name of the kernel cluster role is `kernel-controller` and, when Enterprise Gateway creates the namespace and role binding, is also the name of the role binding instance.\n\n#### Kernel Service Account Name\n\nAs noted above, installations wishing to pre-create their own kernel namespaces should provide the name of the service account associated with the namespace via `KERNEL_SERVICE_ACCOUNT_NAME` in the `env` portion of the kernel creation request (along with `KERNEL_NAMESPACE`). If not provided, the built-in namespace service account, `default`, will be referenced. In such circumstances, Enterprise Gateway will **not** create a role binding on the name for the service account, so it is the user's responsibility to ensure that the service account has the capability to perform equivalent operations as defined by the `kernel-controller` role.\n\n#### Example Custom Namespace\n\nHere's an example of the creation of a custom namespace (`kernel-ns`) with its own service account (`kernel-sa`) and role binding (`kernel-controller`) that references the cluster-scoped role (`kernel-controller`) and includes appropriate labels to help with administration and analysis:\n\n```yaml\napiVersion: v1\nkind: Namespace\nmetadata:\n  name: kernel-ns\n  labels:\n    app: enterprise-gateway\n    component: kernel\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: kernel-sa\n  namespace: kernel-ns\n  labels:\n    app: enterprise-gateway\n    component: kernel\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: kernel-controller\n  namespace: kernel-ns\n  labels:\n    app: enterprise-gateway\n    component: kernel\nsubjects:\n  - kind: ServiceAccount\n    name: kernel-sa\n    namespace: kernel-ns\nroleRef:\n  kind: ClusterRole\n  name: kernel-controller\n  apiGroup: rbac.authorization.k8s.io\n```\n\n### Kernel Image Puller\n\nKernels docker images can be big and their download from a container repository (e.g., docker.io or quay.io), which may cause slow kernel pod startup whenever the kernel image is first accessed on any given node.\n\nTo mitigate this issue, Enterprise Gateway deployment includes `kernel-image-puller` or `KIP` Kubernetes DaemonSet. This DaemonSet is responsible for polling Enterprise Gateway for the current set of configured kernelspecs, picking out any configured image name references, and pulling those images to the node on which KIP is running. Because it's a daemon set, this will also address the case when new nodes are added to a configuration (although spinning up new nodes on a kernel start request will likely time out anyway).\n\n#### KIP Configuration\n\n`KIP` is using same kubernetes Service Account as Enterprise Gateway itself, so it will use same credentials to access private docker registry - see helm configuration section for details.\n\n`KIP_INTERVAL` - The Kernel Image Puller can be configured for the interval at which it checks for new kernelspecs\n\n`KIP_NUM_PULLERS`- the number of puller threads it will utilize per node ()\n\n`KIP_NUM_RETRIES` - the number of retries it will attempt for a given image (),\n\n`KIP_PULL_POLICY` - and the pull policy () - which essentially dictates whether it will attempt to pull images that its already encountered (`Always`) vs. only pulling the image if it hasn't seen it yet (`IfNotPresent`).\n\nIf the Enterprise Gateway defines an authentication token (`EG_AUTH_TOKEN`) then that same token should be configured here as (`KIP_AUTH_TOKEN`) so that the puller can correctly authenticate its requests.\n\n#### KIP Container Runtime\n\nThe Kernel Image Puller also supports multiple container runtimes since Docker is no longer configured by default in Kubernetes. KIP currently supports Docker and Containerd runtimes. If another runtime is encountered, KIP will try to proceed using the Containerd client `crictl` against the configured socket. As a result, it is import that the `criSocket` value be appropriately configured relative to the container runtime. If the runtime is something other than Docker or Containerd and `crictl` isn't able to pull images, it may be necessary to manually pre-seed images or incur kernel start timeouts the first time a given node is asked to start a kernel associated with a non-resident image.\n\nKIP also supports the notion of a _default container registry_ whereby image names that do not specify a registry (e.g., `docker.io` or `quay.io`) KIP will apply the configured default. Ideally, the image name should be fully qualified.\n\n### Kernelspec Modifications\n\nOne of the more common areas of customization we see occurs within the kernelspec files located in `/usr/local/share/jupyter/kernels`. To accommodate the ability to customize the kernel definitions, you have two different options: NFS mounts, or custom container images. The two options are mutually exclusive, because they mount kernelspecs into the same location in the Enterprise Gateway pod.\n\n#### Via NFS\n\nThe kernels directory can be mounted as an NFS volume into the Enterprise Gateway pod, thereby making the kernelspecs available to all EG pods within the Kubernetes cluster (provided the NFS mounts exist on all applicable nodes).\n\nAs an example, we have included the necessary entries for mounting an existing NFS mount point into the Enterprise Gateway pod. By default, these references are commented out as they require the operator to configure the appropriate NFS mounts and server IP. If you are deploying Enterprise Gateway via the helm chart, you can enable NFS directly via helm values.\n\nHere you can see how `deployment.yaml` references use of the volume (ia `volumeMounts`\nfor the container specification and `volumes` in the pod specification (non-applicable entries have been omitted):\n\n```yaml\nspec:\n  containers:\n    # Uncomment to enable NFS-mounted kernelspecs\n    volumeMounts:\n      - name: kernelspecs\n        mountPath: '/usr/local/share/jupyter/kernels'\n  volumes:\n    - name: kernelspecs\n      nfs:\n        server: <internal-ip-of-nfs-server>\n        path: '/usr/local/share/jupyter/kernels'\n```\n\n```{tip}\nBecause the kernel pod definition file, [kernel-pod.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2), resides in the kernelspecs hierarchy, customizations to the deployments of future kernel instances can now also take place.  In addition, these same entries can be added to the kernel-pod.yaml definitions if access to the same or other NFS mount points are desired within kernel pods. (We'll be looking at ways to make modifications to per-kernel configurations more manageable.)\n```\n\nUse of more formal persistent volume types must include the [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes) and corresponding Persistent Volume Claim stanzas.\n\n#### Via Custom Container Image\n\nIf you are deploying Enterprise Gateway via the helm chart, then instead of using NFS, you can build your custom kernelspecs into a container image that Enterprise Gateway consumes. Here's an example Dockerfile for such a container:\n\n```\nFROM alpine:3.9\n\nCOPY kernels /kernels\n```\n\nThis assumes that your source contains a `kernels/` directory with all the kernelspecs you'd like to end up in the image, e.g. `kernels/python_kubernetes/kernel.json` and any associated files.\n\nOnce you build your custom kernelspecs image and push it to a container registry, you can refer to it from your helm deployment. For instance:\n\n```bash\nhelm upgrade --install --atomic --namespace enterprise-gateway enterprise-gateway etc/kubernetes/helm --set kernelspecs.image=your-custom-image:latest\n```\n\n...where `your-custom-image:latest` is the image name and tag of your kernelspecs image. Once deployed, the helm chart copies the data from the `/kernels` directory of your container into the `/usr/local/share/jupyter/kernels` directory of the Enterprise Gateway pod. Note that when this happens, the built-in kernelspecs are no longer available. So include all kernelspecs that you want to be available in your container image.\n\nAlso, you should update the helm chart `kernel.allowedKernels` (or usually comprehended as kernel whitelist) value with the name(s) of your custom kernelspecs.\n\n## Kubernetes Kernel Instances\n\nThere are essentially two kinds of kernels (independent of language) launched within an Enterprise Gateway Kubernetes cluster - _vanilla_ and _spark-on-kubernetes_ (if available).\n\nWhen _vanilla_ kernels are launched, Enterprise Gateway is responsible for creating the corresponding pod. On the other hand, _spark-on-kubernetes_ kernels are launched via `spark-submit` with a specific `master` URI - which then creates the corresponding pod(s) (including executor pods). Images can be launched using both forms provided they have the appropriate support for Spark installed.\n\nHere's the yaml configuration used when _vanilla_ kernels are launched. As noted in the `KubernetesProcessProxy` section below, this file ([kernel-pod.yaml.j2](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2)) serves as a template where each of the tags surrounded with `{{` and `}}` represent variables that are substituted at the time of the kernel's launch. All `{{ kernel_xxx }}` parameters correspond to `KERNEL_XXX` environment variables that can be specified from the client in the kernel creation request's json body.\n\n```yaml+jinja\napiVersion: v1\nkind: Pod\nmetadata:\n  name: \"{{ kernel_pod_name }}\"\n  namespace: \"{{ kernel_namespace }}\"\n  labels:\n    kernel_id: \"{{ kernel_id }}\"\n    app: enterprise-gateway\n    component: kernel\nspec:\n  restartPolicy: Never\n  serviceAccountName: \"{{ kernel_service_account_name }}\"\n  {% if kernel_uid is defined or kernel_gid is defined %}\n  securityContext:\n    {% if kernel_uid is defined %}\n    runAsUser: {{ kernel_uid | int }}\n    {% endif %}\n    {% if kernel_gid is defined %}\n    runAsGroup: {{ kernel_gid | int }}\n    {% endif %}\n    fsGroup: 100\n  {% endif %}\n  containers:\n  - env:\n    - name: EG_RESPONSE_ADDRESS\n      value: \"{{ eg_response_address }}\"\n    - name: EG_PUBLIC_KEY\n      value: \"{{ eg_public_key }}\"\n    - name: KERNEL_LANGUAGE\n      value: \"{{ kernel_language }}\"\n    - name: KERNEL_SPARK_CONTEXT_INIT_MODE\n      value: \"{{ kernel_spark_context_init_mode }}\"\n    - name: KERNEL_NAME\n      value: \"{{ kernel_name }}\"\n    - name: KERNEL_USERNAME\n      value: \"{{ kernel_username }}\"\n    - name: KERNEL_ID\n      value: \"{{ kernel_id }}\"\n    - name: KERNEL_NAMESPACE\n      value: \"{{ kernel_namespace }}\"\n    image: \"{{ kernel_image }}\"\n    name: \"{{ kernel_pod_name }}\"\n```\n\nThere are a number of items worth noting:\n\n1. Kernel pods can be identified in three ways using `kubectl`:\n\n   1. By the global label `app=enterprise-gateway` - useful when needing to identify all related objects (e.g., `kubectl get all -l app=enterprise-gateway`)\n   1. By the _kernel_id_ label `kernel_id=<kernel_id>` - useful when only needing specifics about a given kernel. This label is used internally by enterprise-gateway when performing its discovery and lifecycle management operations.\n   1. By the _component_ label `component=kernel` - useful when needing to identity only kernels and not other enterprise-gateway components. (Note, the latter can be isolated via `component=enterprise-gateway`.)\n\n   Note that since kernels run in isolated namespaces by default, it's often helpful to include the clause `--all-namespaces` on commands that will span namespaces. To isolate commands to a given namespace, you'll need to add the namespace clause `--namespace <namespace-name>`.\n\n1. Each kernel pod is named by the invoking user (via the `KERNEL_USERNAME` env) and its kernel_id (env `KERNEL_ID`). This identifier also applies to those kernels launched within `spark-on-kubernetes`.\n\n1. Kernel pods use the specified `securityContext`. If env `KERNEL_UID` is not specified in the kernel creation request a default value of `1000` (the jovyan user) will be used. Similarly, for `KERNEL_GID`, whose default is `100` (the users group). In addition, Enterprise Gateway enforces a list of prohibited UID and GID values. By default, this list is initialized to the 0 (root) UID and GID. Administrators can configure the `EG_PROHIBITED_UIDS` and `EG_PROHIBITED_GIDS` environment variables via the `deployment.yaml` file with comma-separated values to alter the set of user and group ids to be prevented.\n\n1. As noted above, if `KERNEL_NAMESPACE` is not provided in the request, Enterprise Gateway will create a namespace using the same naming algorithm for the pod. In addition, the `kernel-controller` cluster role will be bound to a namespace-scoped role binding of the same name using the namespace's default service account as its subject. Users wishing to use their own kernel namespaces must provide **both** `KERNEL_NAMESPACE` and `KERNEL_SERVICE_ACCOUNT_NAME` as these are both used in the `kernel-pod.yaml.j2` as `{{ kernel_namespace }}` and `{{ kernel_service_account_name }}`, respectively.\n\n1. Kernel pods have restart policies of `Never`. This is because the Jupyter framework already has built-in logic for auto-restarting failed kernels and any other restart policy would likely interfere with the built-in behaviors.\n\n1. The parameters to the launcher that is built into the image are communicated via environment variables as noted in the `env:` section above.\n\n## Unconditional Volume Mounts\n\nUnconditional volume mounts can be added in the `kernel-pod.yaml.j2` template. An example of these unconditional volume mounts can be found when extending docker shared memory. For some I/O jobs the pod will need more than the default `64mb` of shared memory on the `/dev/shm` path.\n\n```yaml+jinja\nvolumeMounts:\n# Define any \"unconditional\" mounts here, followed by \"conditional\" mounts that vary per client\n{% if kernel_volume_mounts %}\n  {% for volume_mount in kernel_volume_mounts %}\n- {{ volume_mount }}\n  {% endfor %}\n{% endif %}\nvolumes:\n# Define any \"unconditional\" volumes here, followed by \"conditional\" volumes that vary per client\n{% if kernel_volumes %}\n{% for volume in kernel_volumes %}\n- {{ volume }}\n{% endfor %}\n{% endif %}\n```\n\nThe conditional volumes are handled by the loops inside the yaml file. Any unconditional volumes can be added before these conditions. In the scenario where the `/dev/shm` will need to be expanded the following mount has to be added.\n\n```yaml+jinja\nvolumeMounts:\n# Define any \"unconditional\" mounts here, followed by \"conditional\" mounts that vary per client\n- mountPath: /dev/shm\n  name: dshm\n{% if kernel_volume_mounts %}\n  {% for volume_mount in kernel_volume_mounts %}\n- {{ volume_mount }}\n  {% endfor %}\n{% endif %}\nvolumes:\n# Define any \"unconditional\" volumes here, followed by \"conditional\" volumes that vary per client\n- name: dshm\nemptyDir:\n  medium: Memory\n{% if kernel_volumes %}\n{% for volume in kernel_volumes %}\n- {{ volume }}\n{% endfor %}\n{% endif %}\n```\n\n## Kubernetes Resource Quotas\n\nWhen deploying kernels on a Kubernetes cluster a best practice is to define request and limit quotas for CPUs, GPUs, and Memory. These quotas can be defined from the client via KERNEL\\_-prefixed environment variables which are passed through to the kernel at startup.\n\n- `KERNEL_CPUS` - CPU Request by Kernel\n- `KERNEL_MEMORY` - MEMORY Request by Kernel\n- `KERNEL_GPUS` - GPUS Request by Kernel\n- `KERNEL_CPUS_LIMIT` - CPU Limit\n- `KERNEL_MEMORY_LIMIT` - MEMORY Limit\n- `KERNEL_GPUS_LIMIT` - GPUS Limit\n\nMemory and CPU units are based on the [Kubernetes Official Documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) while GPU is using the NVIDIA `nvidia.com/gpu` parameter. The desired units should be included in the variable's value.\n\nWhen defined, these variables are then substituted into the appropriate location of the corresponding kernel-pod.yaml.j2 template.\n\n```yaml+jinja\n{% if kernel_cpus is defined or kernel_memory is defined or kernel_gpus is defined or kernel_cpus_limit is defined or kernel_memory_limit is defined or kernel_gpus_limit is defined %}\n  resources:\n    {% if kernel_cpus is defined or kernel_memory is defined or kernel_gpus is defined %}\n    requests:\n      {% if kernel_cpus is defined %}\n      cpu: \"{{ kernel_cpus }}\"\n      {% endif %}\n      {% if kernel_memory is defined %}\n      memory: \"{{ kernel_memory }}\"\n      {% endif %}\n      {% if kernel_gpus is defined %}\n      nvidia.com/gpu: \"{{ kernel_gpus }}\"\n      {% endif %}\n    {% endif %}\n    {% if kernel_cpus_limit is defined or kernel_memory_limit is defined or kernel_gpus_limit is defined %}\n    limits:\n      {% if kernel_cpus_limit is defined %}\n      cpu: \"{{ kernel_cpus_limit }}\"\n      {% endif %}\n      {% if kernel_memory_limit is defined %}\n      memory: \"{{ kernel_memory_limit }}\"\n      {% endif %}\n      {% if kernel_gpus_limit is defined %}\n      nvidia.com/gpu: \"{{ kernel_gpus_limit }}\"\n      {% endif %}\n    {% endif %}\n  {% endif %}\n```\n\n## KubernetesProcessProxy\n\nTo indicate that a given kernel should be launched into a Kubernetes configuration, the kernel.json file's `metadata` stanza must include a `process_proxy` stanza indicating a `class_name:` of `KubernetesProcessProxy`. This ensures the appropriate lifecycle management will take place relative to a Kubernetes environment.\n\nAlong with the `class_name:` entry, this process proxy stanza should also include a proxy configuration stanza which specifies the container image to associate with the kernel's pod. If this entry is not provided, the Enterprise Gateway implementation will use a default entry of `elyra/kernel-py:VERSION`. In either case, this value is made available to the rest of the parameters used to launch the kernel by way of an environment variable: `KERNEL_IMAGE`.\n\n_(Please note that the use of `VERSION` in docker image tags is a placeholder for the appropriate version-related image tag. When kernelspecs are built via the Enterprise Gateway Makefile, `VERSION` is replaced with the appropriate version denoting the target release. A full list of available image tags can be found in the dockerhub repository corresponding to each image.)_\n\n```json\n{\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-py:VERSION\"\n      }\n    }\n  }\n}\n```\n\nAs always, kernels are launched by virtue of the `argv:` stanza in their respective kernel.json files. However, when launching _vanilla_ kernels in a kubernetes environment, what gets invoked isn't the kernel's launcher, but, instead, a python script that is responsible for using the [Kubernetes Python API](https://github.com/kubernetes-client/python) to create the corresponding pod instance. The pod is _configured_ by applying the values to each of the substitution parameters into the [kernel-pod.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2) file previously displayed. This file resides in the same `scripts` directory as the kubernetes launch script - `launch_kubernetes.py` - which is referenced by the kernel.json's `argv:` stanza:\n\n```json\n{\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/python_kubernetes/scripts/launch_kubernetes.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n```\n\nBy default, _vanilla_ kernels use a value of `none` for the spark context initialization mode so no context will be created automatically.\n\nWhen the kernel is intended to target _Spark-on-kubernetes_, its launch is very much like kernels launched in YARN _cluster mode_, albeit with a completely different set of parameters. Here's an example `SPARK_OPTS` string value which best conveys the idea:\n\n```\n  \"SPARK_OPTS\": \"--master k8s://https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT} --deploy-mode cluster --name ${KERNEL_USERNAME}-${KERNEL_ID} --conf spark.kubernetes.driver.label.app=enterprise-gateway --conf spark.kubernetes.driver.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.executor.label.app=enterprise-gateway --conf spark.kubernetes.executor.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.driver.docker.image=${KERNEL_IMAGE} --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor-py:v2.5.0-kubernetes-0.5.0 --conf spark.kubernetes.submission.waitAppCompletion=false\",\n```\n\nNote that each of the labels previously discussed are also applied to the _driver_ and _executor_ pods.\n\nFor these invocations, the `argv:` is nearly identical to non-kubernetes configurations, invoking a `run.sh` script which essentially holds the `spark-submit` invocation that takes the aforementioned `SPARK_OPTS` as its primary parameter:\n\n```json\n{\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/spark_python_kubernetes/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.spark-context-initialization-mode\",\n    \"lazy\"\n  ]\n}\n```\n\n### Confirming deployment and the service port mapping\n\n```bash\nkubectl get all --all-namespaces -l app=enterprise-gateway\n\nNAME                        DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE\ndeploy/enterprise-gateway   1         1         1            1           2h\n\nNAME                               DESIRED   CURRENT   READY     AGE\nrs/enterprise-gateway-74c46cb7fc   1         1         1         2h\n\nNAME                                     READY     STATUS    RESTARTS   AGE\npo/enterprise-gateway-74c46cb7fc-jrkl7   1/1       Running   0          2h\n\nNAME                     TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE\nsvc/enterprise-gateway   NodePort   10.110.253.2.3   <none>        8888:32422/TCP   2h\n```\n\nOf particular importance is the mapping to port `8888` (e.g.,`32422`). If you are performing this on the same host as where the notebook will run, then you will need to note the cluster-ip entry (e.g.,`10.110.253.2.3`).\n\n(Note: if the number of replicas is > 1, then you will see two pods listed with different five-character suffixes.)\n\n```{tip}\n You can avoid the need to point at a different port each time EG is launched by adding an `externalIPs:` entry to the `spec:` section of the `service.yaml` file.  This entry can be specifed in the `values.yaml` via the `service.externalIPs.k8sMasterPublicIP` entry.\n```\n\nThe value of the `JUPYTER_GATEWAY_URL` used by the gateway-enabled Notebook server will vary depending on whether you choose to define an external IP or not. If and external IP is defined, you'll set `JUPYTER_GATEWAY_URL=<externalIP>:8888` else you'll set `JUPYTER_GATEWAY_URL=<k8s-master>:32422` **but also need to restart clients each time Enterprise Gateway is started.** As a result, use of the `externalIPs:` value is highly recommended.\n\n## Kubernetes Tips\n\nThe following items illustrate some useful commands for navigating Enterprise Gateway within a kubernetes environment.\n\n- All objects created on behalf of Enterprise Gateway can be located using the label `app=enterprise-gateway`. You'll probably see duplicated entries for the deployments(deploy) and replication sets (rs) - we didn't include the duplicates here.\n\n```bash\nkubectl get all -l app=enterprise-gateway --all-namespaces\n\nNAME                        DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE\ndeploy/enterprise-gateway   1         1         1            1           3h\n\nNAME                               DESIRED   CURRENT   READY     AGE\nrs/enterprise-gateway-74c46cb7fc   1         1         1         3h\n\nNAME                                             READY     STATUS    RESTARTS   AGE\npod/alice-5e755458-a114-4215-96b7-bcb016fc7b62   1/1       Running   0          8s\npod/enterprise-gateway-74c46cb7fc-jrkl7          1/1       Running   0          3h\n```\n\n- All objects related to a given kernel can be located using the label `kernel_id=<kernel_id>`\n\n```bash\nkubectl get all -l kernel_id=5e755458-a114-4215-96b7-bcb016fc7b62 --all-namespaces\n\nNAME                                             READY     STATUS    RESTARTS   AGE\npod/alice-5e755458-a114-4215-96b7-bcb016fc7b62   1/1       Running   0          28s\n```\n\nNote: because kernels are, by default, isolated to their own namespace, you could also find all objects of a\ngiven kernel using only the `--namespace <kernel-namespace>` clause.\n\n- To enter into a given pod (i.e., container) in order to get a better idea of what might be happening within the container, use the exec command with the pod name\n\n```bash\nkubectl exec -it enterprise-gateway-74c46cb7fc-jrkl7 /bin/bash\n```\n\n- Logs can be accessed against the pods or deployment (requires the object type prefix (e.g., `pod/`))\n\n```bash\nkubectl logs -f pod/alice-5e755458-a114-4215-96b7-bcb016fc7b62\n```\n\nNote that if using multiple replicas, commands against each pod are required.\n\n- The Kubernetes dashboard is useful as well. It's located at port `3.2.3` of the master node\n\n```bash\nhttps://elyra-kube1.foo.bar.com:3.2.3/dashboard/#!/overview?namespace=default\n```\n\nFrom there, logs can be accessed by selecting the `Pods` option in the left-hand pane followed by the _lined_ icon on\nthe far right.\n\n- User \"system:serviceaccount:default:default\" cannot list pods in the namespace \"default\"\n\nOn a recent deployment, Enterprise Gateway was not able to create or list kernel pods. Found\nthe following command was necessary. (Kubernetes security relative to Enterprise Gateway is still under construction.)\n\n```bash\nkubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin  --serviceaccount=default:default\n```\n"
  },
  {
    "path": "docs/source/operators/deploy-single.md",
    "content": "# Single-server deployments\n\nSingle-server deployment can be useful for development and is not meant to be run in production environments as it subjects the gateway server to resource exhaustion.\n\nSteps to deploy a single server are:\n\n1. [Install Enterprise Gateway](installing-eg.md)\n1. [Install the desired kernels](installing-kernels.md)\n1. Install and configure the server and desired kernel specifications (see below)\n1. [Launch Enterprise Gateway](launching-eg.md)\n\nIf you just want to try Enterprise Gateway in a single-server setup, you can use the following kernels specification (no need for a kernel launcher since the kernel runs locally):\n\n```json\n{\n  \"display_name\": \"Python 3 Local\",\n  \"language\": \"python\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.processproxy.LocalProcessProxy\"\n    }\n  },\n  \"argv\": [\"python\", \"-m\", \"ipykernel_launcher\", \"-f\", \"{connection_file}\"]\n}\n```\n\n`process_proxy` is optional (if Enterprise Gateway encounters a kernel specification without the `process_proxy` stanza, it will treat that specification as if it contained `LocalProcessProxy`).\n\n```{tip}\nYou can run a local kernel in [Distributed mode](./deploy-distributed.md) by setting `remote_hosts` to the localhost. Why would you do that?\n\n1. One reason is that it decreases the window in which a port conflict can occur since the 5 kernel ports are created by the launcher (within the same process and therefore closer to the actual invocation of the kernel) rather than by the server prior to the launch of the kernel process.\n2. The second reason is that auto-restarted kernels - when an issue occurs - say due to a port conflict - will create a new set of ports rather than try to re-use the same set that produced the failure in the first place. In this case, you'd want to use the [per-kernel configuration](config-kernel-override.md#per-kernel-configuration-overrides) approach and set `remote_hosts` in the config stanza of the `process_proxy` stanza (using the stanza instead of the global `EG_REMOTE_HOSTS` allows you to not interfere with the other resource managers configuration, e.g. Spark Standalone or YARN Client kernels - Those other kernels need to be able to continue leveraging the full cluster nodes).\n\n```\n"
  },
  {
    "path": "docs/source/operators/deploy-yarn-cluster.md",
    "content": "# Hadoop YARN deployments\n\nTo leverage the full distributed capabilities of Jupyter Enterprise Gateway, there is a need to provide additional configuration options in a cluster deployment.\n\nThe following sample kernelspecs are currently available on YARN cluster:\n\n- spark_R_yarn_cluster\n- spark_python_yarn_cluster\n- spark_scala_yarn_cluster\n\nSteps required to complete deployment on a Hadoop YARN cluster are:\n\n1. [Install Enterprise Gateway](installing-eg.md) on the primary node of the Hadoop YARN cluster. Note, this location is not a hard-requirement, but recommended. If installed remotely, some extra configuration will be necessary relative to the Hadoop configuration.\n1. [Install the desired kernels](installing-kernels.md)\n1. Install and configure the server and desired kernel specifications (see below)\n1. [Launch Enterprise Gateway](launching-eg.md)\n\nThe distributed capabilities are currently based on an Apache Spark cluster utilizing Hadoop YARN as the resource manager and thus require the following environment variables to be set to facilitate the integration between Apache Spark and Hadoop YARN components:\n\n- `SPARK_HOME` must point to the Apache Spark installation path\n\n```\nSPARK_HOME:/usr/hdp/current/spark2-client  # For HDP distribution\n```\n\n- EG_YARN_ENDPOINT: Must point to the YARN resource manager endpoint if remote from YARN cluster\n\n```\nEG_YARN_ENDPOINT=http://${YARN_RESOURCE_MANAGER_FQDN}:8088/ws/v1/cluster\n```\n\n```{note}\nIf Enterprise Gateway is using an applicable `HADOOP_CONF_DIR` that contains a valid `yarn-site.xml` file, then this config value can remain unset (default = None) and the YARN client library will locate the appropriate resource manager from the configuration.  This is also true in cases where the YARN cluster is configured for high availability.\n```\n\nIf Enterprise Gateway is remote from the YARN cluster (i.e., no `HADOOP_CONF_DIR`) and the YARN cluster is configured for high availability, then the alternate endpoint should also be specified...\n\n```\nEG_ALT_YARN_ENDPOINT=http://${ALT_YARN_RESOURCE_MANAGER_FQDN}:8088/ws/v1/cluster #Common to YARN deployment\n```\n\n## Configuring Kernels for YARN Cluster mode\n\nFor each supported kernel (IPyKernel for Python, Apache Toree for Scala, and IRKernel for R), we have provided sample kernel configurations and launchers as assets associated with each [Enterprise Gateway release](https://github.com/jupyter-server/enterprise_gateway/releases). For Hadoop YARN configurations, you can access those specific kernel specifications within the `jupyter_enterprise_gateway_kernelspecs_yarn-VERSION.tar.gz` file. (Replace `VERSION` with the desired release number.)\n\n```{note}\nThe sample kernels specifications in `jupyter_enterprise_gateway_kernelspecs_yarn-VERSION.tar.gz` also contain specification for YARN client mode (in addition to cluster mode).  Both are usable in this situation.\n```\n\n```{tip}\nWe recommend installing kernel specifications into a shared folder like `/usr/local/share/jupyter/kernels`.  This is the location in which they reside within container images and where many of the document references assume they'll be located.\n```\n\n### Python Kernel (IPython kernel)\n\nConsidering we would like to enable the IPython kernel to run on YARN Cluster and Client mode we would have to copy the sample configuration folder **spark_python_yarn_cluster** to where the Jupyter kernels are installed (e.g. jupyter kernelspec list)\n\n```bash\nwget https://github.com/jupyter-server/enterprise_gateway/releases/download/v3.2.3/jupyter_enterprise_gateway_kernelspecs-3.2.3.tar.gz\nKERNELS_FOLDER=/usr/local/share/jupyter/kernels\ntar -zxvf jupyter_enterprise_gateway_kernelspecs-3.2.3.tar.gz --strip 1 --directory $KERNELS_FOLDER/spark_python_yarn_cluster/ spark_python_yarn_cluster/\n```\n\nFor more information about the IPython kernel, please visit the [IPython kernel](https://ipython.readthedocs.io/en/stable/) page.\n\n### Scala Kernel (Apache Toree)\n\nConsidering we would like to enable the Scala Kernel to run on YARN Cluster and Client mode we would have to copy the sample configuration folder **spark_scala_yarn_cluster** to where the Jupyter kernels are installed (e.g. jupyter kernelspec list)\n\n```bash\nwget https://github.com/jupyter-server/enterprise_gateway/releases/download/v3.2.3/jupyter_enterprise_gateway_kernelspecs-3.2.3.tar.gz\nKERNELS_FOLDER=/usr/local/share/jupyter/kernels\ntar -zxvf jupyter_enterprise_gateway_kernelspecs-3.2.3.tar.gz --strip 1 --directory $KERNELS_FOLDER/spark_scala_yarn_cluster/ spark_scala_yarn_cluster/\n```\n\nFor more information about the Scala kernel, please visit the [Apache Toree](https://toree.apache.org/) page.\n\n### R Kernel (IRkernel)\n\nConsidering we would like to enable the IRkernel to run on YARN Cluster and Client mode we would have to copy the sample configuration folder **spark_R_yarn_cluster** to where the Jupyter kernels are installed (e.g. jupyter kernelspec list)\n\n```Bash\nwget https://github.com/jupyter-server/enterprise_gateway/releases/download/v3.2.3/jupyter_enterprise_gateway_kernelspecs-3.2.3.tar.gz\nKERNELS_FOLDER=/usr/local/share/jupyter/kernels\ntar -zxvf jupyter_enterprise_gateway_kernelspecs-3.2.3.tar.gz --strip 1 --directory $KERNELS_FOLDER/spark_R_yarn_cluster/ spark_R_yarn_cluster/\n```\n\nFor more information about the iR kernel, please visit the [IRkernel](https://irkernel.github.io/) page.\n\n### Adjusting the kernel specifications\n\nAfter installing the kernel specifications, you should have a `kernel.json` that resembles the following (this one is relative to the Python kernel):\n\n```json\n{\n  \"language\": \"python\",\n  \"display_name\": \"Spark - Python (YARN Cluster Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy\"\n    }\n  },\n  \"env\": {\n    \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n    \"PYSPARK_PYTHON\": \"/opt/conda/bin/python\",\n    \"PYTHONPATH\": \"${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip\",\n    \"SPARK_YARN_USER_ENV\": \"PYTHONUSERBASE=/home/yarn/.local,PYTHONPATH=${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip,PATH=/opt/conda/bin:$PATH\",\n    \"SPARK_OPTS\": \"--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false\",\n    \"LAUNCH_OPTS\": \"\"\n  },\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/spark_python_yarn_cluster/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n```\n\nThe `metadata` and `argv` entries for each kernel specification should be nearly identical and not require changes. You will need to adjust the `env` entries to apply to your specific configuration.\n\nYou should also check the same kinds of environment and path settings in the corresponding `bin/run.sh` file - although changes are not typically necessary.\n\nAfter making any necessary adjustments such as updating `SPARK_HOME` or other environment specific configuration and paths, you now should have a new kernel available to execute your notebook cell code distributed on a Hadoop YARN Spark Cluster.\n"
  },
  {
    "path": "docs/source/operators/index.rst",
    "content": "Operators Guide\n===============\n\nThese pages are targeted at *operators* that need to deploy and configure a Jupyter Enterprise Gateway instance.\n\n.. admonition:: Use cases\n\n    - *As an operator, I want to fix the bottleneck on the Jupyter Kernel Gateway server due to large number of kernels running on it and the size of each kernel (spark driver) process, by deploying the Enterprise Gateway, such that kernels can be launched as managed resources within a Hadoop YARN cluster, distributing the resource-intensive driver processes across the cluster, while still allowing the multiple data analysts to leverage the compute power of a large cluster.*\n    - *As an operator, I want to constrain applications to specific port ranges so I can more easily identify issues and manage network configurations that adhere to my corporate policy.*\n    - *As an operator, I want to constrain the number of active kernels that each of my users can have at any given time.*\n\n\nDeploying Enterprise Gateway\n----------------------------\nThe deployment of Enterprise Gateway consists of several items, depending on\nthe nature of the target environment.  Because this topic differs depending on\nwhether the runtime environment is targeting containers or traditional servers,\nwe've separated the discussions accordingly.\n\nContainer-based deployments\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\nEnterprise Gateway includes support for two forms of container-based environments, Kubernetes and Docker.\n\n.. toctree::\n   :maxdepth: 1\n   :name: container-deployments\n\n   deploy-kubernetes\n   deploy-docker\n\nServer-based deployments\n~~~~~~~~~~~~~~~~~~~~~~~~\nTasks for traditional server deployments are nearly identical with respect to\nEnterprise Gateway's installation and invocation, differing slightly with how\nthe kernel specifications are configured.  As a result, we marked those topics\nas \"common\" relative to the others.\n\n.. toctree::\n   :maxdepth: 1\n   :name: node-deployments\n\n   installing-eg\n   installing-kernels\n   launching-eg\n   deploy-yarn-cluster\n   deploy-conductor\n   deploy-distributed\n   deploy-single\n\nConfiguring Enterprise Gateway\n------------------------------\nJupyter Enterprise Gateway adheres to\n`Jupyter's common configuration approach <https://jupyter.readthedocs.io/en/latest/use/config.html>`_\n. You can configure an instance of Enterprise Gateway using a configuration file (recommended), via command-line parameters, or by setting the corresponding environment variables.\n\n.. toctree::\n   :maxdepth: 1\n   :name: configuring\n\n   config-file\n   config-cli\n   config-add-env\n   config-env-debug\n   config-sys-env\n   config-kernel-override\n   config-dynamic\n   config-culling\n   config-availability\n   config-security\n"
  },
  {
    "path": "docs/source/operators/installing-eg.md",
    "content": "# Installing Enterprise Gateway (common)\n\nFor new users, we **highly recommend** [installing Anaconda](https://www.anaconda.com/download).\nAnaconda conveniently installs Python, the [Jupyter Notebook](https://jupyter.readthedocs.io/en/latest/install.html), the [IPython kernel](http://ipython.readthedocs.io/en/stable/install/kernel_install.html) and other commonly used\npackages for scientific computing and data science.\n\nUse the following installation steps:\n\n- Download [Anaconda](https://www.anaconda.com/download). We recommend downloading Anaconda's\n  latest Python version (currently Python 3.11+).\n\n- Install the version of Anaconda which you downloaded, following the instructions on the download page.\n\n- Install the latest version of Jupyter Enterprise Gateway from [PyPI](https://pypi.python.org/pypi/jupyter_enterprise_gateway/)\n  or [conda forge](https://conda-forge.org/) along with its dependencies.\n\n```{warning}\nEnterprise Gateway is currently incompatible with `jupyter_client >= 7.0`.  As a result, you should **not** install Enterprise Gateway into the same Python environment in which you intend to run Jupyter Notebook or Jupyter Lab since they will likely be using `jupyter_client >= 7.0`.  Since Enterprise Gateway is tupically installed on servers remote from the notebook users, this is usually not an issue.\n```\n\n```{note}\n**Known Dependency Constraints:** Enterprise Gateway pins several key dependencies:\n- `jupyter_client < 7` -- Enterprise Gateway's process proxy mechanism is incompatible with the kernel provisioner framework introduced in jupyter_client 7.x. This cap will be removed when EG adopts kernel provisioners (targeted for 4.0).\n- `jupyter_server < 2.0` -- For the same kernel provisioner compatibility reason.\n- `pyzmq < 25.0` -- pyzmq 25 removed deprecated APIs that jupyter_client 6.x still relies on.\n\nThese constraints mean EG should be installed in a dedicated Python environment separate from notebook/lab installations that use newer versions of these packages.\n```\n\n```bash\n# install using pip from pypi\npip install --upgrade jupyter_enterprise_gateway\n```\n\n```bash\n# install using conda from conda forge\nconda install -c conda-forge jupyter_enterprise_gateway\n```\n\nAt this point, the Jupyter Enterprise Gateway deployment provides local kernel support which is fully compatible with Jupyter Kernel Gateway.\n\nTo uninstall Jupyter Enterprise Gateway...\n\n```bash\n#uninstall using pip\npip uninstall jupyter_enterprise_gateway\n```\n\n```bash\n#uninstall using conda\nconda uninstall jupyter_enterprise_gateway\n```\n"
  },
  {
    "path": "docs/source/operators/installing-kernels.md",
    "content": "# Installing supported kernels (common)\n\nEnterprise Gateway includes kernel specifications that support the following kernels:\n\n- IPython kernel (Python)\n- Apache Toree (Scala)\n- IRKernel (R)\n\nRefer to the following for instructions on installing the respective kernels. For cluster-based environments, these steps should be performed on each applicable node of the cluster, unless noted otherwise.\n\n## Python Kernel (IPython kernel)\n\nThe IPython kernel comes pre-installed with Anaconda and we have tested with its default version of [IPython kernel](https://ipython.readthedocs.io/en/stable/).\n\n```{admonition} Important!\n:class: warning\nFor proper operation across the cluster, the Python kernel package (not the kernel specification) must be installed on every node of the cluster available to Enterprise Gateway.  For example, run `pip install ipykernel` on each applicable node.\n\nThis step is also required for the IRkernel (see below).  However, it is **not** required for the Scala (Apache Toree) Kernel as that can be expressed as a dependency in the `spark_submit` invocation.\n```\n\n## Scala Kernel (Apache Toree)\n\nWe have tested the latest version of [Apache Toree](https://toree.apache.org/) with Scala 2.11 support. Please note that the Apache Toree kernel is now bundled in the kernelspecs tar file for each of the Scala kernelspecs provided by Enterprise Gateway.\n\nThe sample kernel specifications included in Enterprise Gateway include the necessary Apach Toree libraries so its installation is not necessary. In addition, because Apache Toree targets Spark installations, its distribution can be achieved via `spark-submit` and its installation is not necessary on worker nodes - except for [distributed deployments](deploy-distributed.md).\n\n## R Kernel (IRkernel)\n\nPerform the following steps on Jupyter Enterprise Gateway hosting system as well as all worker nodes. Please refer to the [IRKernel documentation](https://irkernel.github.io/) for further details.\n\n```Bash\nconda install --yes --quiet -c r r-essentials r-irkernel r-argparse\n# Create an R-script to run and install packages and update IRkernel\ncat <<'EOF' > install_packages.R\ninstall.packages(c('repr', 'IRdisplay', 'evaluate', 'git2r', 'crayon', 'pbdZMQ',\n                   'devtools', 'uuid', 'digest', 'RCurl', 'curl', 'argparse'),\n                   repos='http://cran.rstudio.com/')\ndevtools::install_github('IRkernel/IRkernel@0.8.14')\nIRkernel::installspec(user = FALSE)\nEOF\n# run the package install script\n$ANACONDA_HOME/bin/Rscript install_packages.R\n# OPTIONAL: check the installed R packages\nls $ANACONDA_HOME/lib/R/library\n```\n"
  },
  {
    "path": "docs/source/operators/launching-eg.md",
    "content": "# Launching Enterprise Gateway (common)\n\nVery few arguments are necessary to minimally start Enterprise Gateway. The following command could be considered a minimal command:\n\n```bash\njupyter enterprisegateway --ip=0.0.0.0 --port_retries=0\n```\n\nwhere `--ip=0.0.0.0` exposes Enterprise Gateway on the public network and `--port_retries=0` ensures that a single instance will be started.\n\n```{note}\nThe ability to target resource-managed clusters (and use remote kernels) will require additional configuration settings depending on the resource manager.  For additional information see the appropriate server-based deployment topic of our Operators Guide.\n```\n\nWe recommend starting Enterprise Gateway as a background task. As a result, you might find it best to create a start script to maintain options, file redirection, etc.\n\nThe following script starts Enterprise Gateway with `DEBUG` tracing enabled (default is `INFO`) and idle kernel culling for any kernels idle for 12 hours with idle check intervals occurring every 60 seconds. The Enterprise Gateway log can then be monitored via `tail -F enterprise_gateway.log` and it can be stopped via `kill $(cat enterprise_gateway.pid)`\n\n```bash\n#!/bin/bash\n\nLOG=/var/log/enterprise_gateway.log\nPIDFILE=/var/run/enterprise_gateway.pid\n\njupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --log-level=DEBUG --RemoteKernelManager.cull_idle_timeout=43200 --MappingKernelManager.cull_interval=60 > $LOG 2>&1 &\nif [ \"$?\" -eq 0 ]; then\n  echo $! > $PIDFILE\nelse\n  exit 1\nfi\n```\n\n```{tip}\nRemember that any options set via the command-line will not be available for [dynamic configuration funtionality](config-dynamic.md#dynamic-configurables).\n```\n"
  },
  {
    "path": "docs/source/other/index.rst",
    "content": "Other helpful information\n===========================\nThis section includes some additional information you might find helpful and that spans the various *guides*, like troubleshooting and related resources.\n\n.. toctree::\n   :maxdepth: 1\n   :name: other\n\n   troubleshooting\n   related-resources\n"
  },
  {
    "path": "docs/source/other/related-resources.md",
    "content": "# Related Resources\n\nHere are some resources related to the Jupyter Enterprise Gateway project.\n\n- [Jupyter.org](https://jupyter.org)\n- [Jupyter Server Team Compass](https://github.com/jupyter-server/team-compass#jupyter-server-team-compass)\n- [Jupyter Calendar - Community Meetings](https://docs.jupyter.org/en/latest/community/content-community.html#jupyter-community-meetings)\n- [Jupyter Community Discourse Forum](https://discourse.jupyter.org/)\n- [Jupyter Kernel Gateway Github Repo](https://github.com/jupyter-server/kernel_gateway) - the source code for Kernel Gateway - which supports local kernels and notebook-hosted end-points.\n- [Jupyter Server Github Repo](https://github.com/jupyter-server/jupyter_server) - the source code for the Jupyter Server. Many of the Enterprise Gateway's handlers and kernel management classes either _are_ or are derived from the Jupyter Server classes.\n- [Jupyter Notebook Github Repo](https://github.com/jupyter/notebook) - the source code for the classic Notebook from which the gateways and Jupyter Server were derived.\n- [Jupyter Client Github Repo](https://github.com/jupyter/jupyter_client) - the source code for the base kernel lifecycle management and message classes. Enterprise Gateway extends the `KernelManager` classes of `jupyter_client`.\n"
  },
  {
    "path": "docs/source/other/troubleshooting.md",
    "content": "# Troubleshooting Guide\n\nThis page identifies scenarios we've encountered when running Enterprise Gateway. We also provide\ninstructions for setting up a debug environment on our [Debugging Jupyter Enterprise Gateway](../contributors/debug.md) page.\n\n## Fresh Install\n\nScenario: **I just installed Enterprise Gateway but nothing happens, how do I proceed?**\n\nBecause Enterprise Gateway is one element of a networked application, there are various _touch points_ that should\nbe validated independently. The following items can be used as a checklist to confirm general operability.\n\n1. Confirm that Enterprise Gateway is servicing general requests. This can be accomplished using the following\n   `curl` command, which should produce the json corresponding to the configured kernelspecs:\n   `bash curl http://<gateway_server>:<gateway_port>/api/kernelspecs `\n1. Independently validate any resource manager you're running against. Various resource managers usually provide\n   examples for how to go about validating their configuration.\n1. Confirm that the Enterprise Gateway arguments for contacting the configured resource manager are in place. These\n   should be covered in the deployment section of our Operators Guide.\n1. If using a Notebook server as your front-end, ensure that the Gateway configuration options or NB2KG extension settings are properly configured.\n   Once the notebook has started, a refresh on the tree view should issue the same `kernelspecs` request in step 1 and\n   the drop-down menu items for available kernels should reflect an entry for each kernelspec returned.\n1. **Always** consult your Enterprise Gateway log file. If you have not redirected `stdout` and `stderr` to a\n   file you are highly encouraged to do so. In addition, you should enable `DEBUG` logging at least until your\n   configuration is stable. Please note, however, that you may be asked to produce an Enterprise Gateway log with\n   `DEBUG` enabled when reporting issues. An example of output redirection and `DEBUG` logging is also provided in our\n   [Operators Guide](../operators/launching-eg.md#launching-enterprise-gateway-common).\n\n## Hadoop YARN Cluster Mode\n\nScenario: **I'm trying to launch a (Python/Scala/R) kernel in YARN Cluster Mode, but it failed with\na \"Kernel error\" and State: 'FAILED'.**\n\n1. Check the output from Enterprise Gateway for an error message. If an applicationId was\n   generated, make a note of it. For example, you can locate the applicationId\n   `application_15065522733.2.3011` from the following snippet of message:\n   `[D 2017-09-28 17:13:22.675 EnterpriseGatewayApp] 13: State: 'ACCEPTED', Host: 'burna2.yourcompany.com', KernelID: '28a5e827-4676-4415-bbfc-ac30a0dcc4c3', ApplicationID: 'application_15065522733.2.3011' 17/09/28 17:13:22 INFO YarnClientImpl: Submitted application application_15065522733.2.3011 17/09/28 17:13:22 INFO Client: Application report for application_15065522733.2.3011 (state: ACCEPTED) 17/09/28 17:13:22 INFO Client: client token: N/A diagnostics: AM container is launched, waiting for AM container to Register with RM ApplicationMaster host: N/A ApplicationMaster RPC port: -1 queue: default start time: 1506644002471 final status: UNDEFINED tracking URL: http://burna1.yourcompany.com:8088/proxy/application_15065522733.2.3011/`\n1. Lookup the YARN log for that applicationId in the YARN ResourceManager UI: ![YARN ResourceManager UI](../images/yarnui.jpg)\n1. Drill down from the applicationId to find logs for the failed attempts and take appropriate\n   actions. For example, for the error below,\n   ```\n   Traceback (most recent call last):\n    File \"launch_ipykernel.py\", line 7, in <module>\n      from ipython_genutils.py3compat import str_to_bytes\n    ImportError: No module named ipython_genutils.py3compat\n   ```\n   Simply running \"pip install ipython_genutils\" should fix the problem. If Anaconda is\n   installed, make sure the environment variable for Python, i.e. `PYSPARK_PYTHON`, is\n   properly configured in the kernelspec and matches the actual Anaconda installation\n   directory.\n\n## SSH Permissions\n\nScenario: **I'm trying to launch a (Python/Scala/R) kernel in YARN Client Mode, but it failed with\na \"Kernel error\" and an `AuthenticationException`.**\n\n```\n[E 2017-09-29 11:13:23.277 EnterpriseGatewayApp] Exception 'AuthenticationException' occurred\nwhen creating a SSHClient connecting to 'xxx.xxx.xxx.xxx' with user 'elyra',\nmessage='Authentication failed.'.\n```\n\nThis error indicates that the password-less ssh may not be properly configured. Password-less\nssh needs to be configured on the node that the Enterprise Gateway is running on to all other\nworker nodes.\n\nYou might also see an `SSHException` indicating a similar issue.\n\n```\n[E 2017-09-29 11:13:23.277 EnterpriseGatewayApp] Exception 'SSHException' occurred\nwhen creating a SSHClient connecting to 'xxx.xxx.xxx.xxx' with user 'elyra',\nmessage='No authentication methods available.'.\n```\n\nIn general, you can look for more information in the kernel log for YARN Client\nkernels. The default location is /tmp with a filename of `kernel-<kernel_id>.log`. The location\ncan be configured using the environment variable `EG_KERNEL_LOG_DIR` during Enterprise Gateway start up.\n\n```{seealso}\n[Launching Enterprise Gateway](../operators/launching-eg.md#launching-enterprise-gateway-common) for an\nexample of starting the Enterprise Gateway from a script and the\n[Operators Guide](../operators/config-add-env.md#additional-environment-variables)\nfor a list of configurable environment variables.\n```\n\n## SSH Tunneling\n\nScenario: **I'm trying to launch a (Python/Scala/R) kernel in YARN Client Mode with SSH tunneling enabled,\nbut it failed with a \"Kernel error\" and a SSHException.**\n\n```\n[E 2017-10-26 11:48:20.922 EnterpriseGatewayApp] The following exception occurred waiting\nfor connection file response for KernelId 'da3d0dde-9de1-44b1-b1b4-e6f3cf52dfb9' on host\n'remote-host-name': The authenticity of the host can't be established.\n```\n\nThis error indicates that fingerprint for the ECDSA key of the remote host has not been added\nto the list of known hosts from where the SSH tunnel is being established.\n\nFor example, if the Enterprise Gateway is running on `node1` under service-user `jdoe` and\nenvironment variable `EG_REMOTE_HOSTS` is set to `node2,node3,node4`, then the Kernels can be\nlaunched on any of those hosts and a SSH tunnel will be established between `node1` and\nany of the those hosts.\n\nTo address this issue, you need to perform a one-time step that requires you to login to\n`node1` as `jdoe` and manually SSH into each of the remote hosts and accept the fingerprint\nof the ECDSA key of the remote host to be added to the list of known hosts as shown below:\n\n```\n[jdoe@node1 ~]$ ssh node2\nThe authenticity of host 'node2 (172.16.207.191)' can't be established.\nECDSA key fingerprint is SHA256:Mqi3txf4YiRC9nXg8a/4gQq5vC4SjWmcN1V5Z0+nhZg.\nECDSA key fingerprint is MD5:bc:4b:b2:39:07:98:c1:0b:b4:c3:24:38:92:7a:2d:ef.\nAre you sure you want to continue connecting (yes/no)? yes\nWarning: Permanently added 'node2,172.16.207.191' (ECDSA) to the list of known hosts.\n[jdoe@node2 ~] exit\n```\n\nRepeat the aforementioned step as `jdoe` on `node1` for each of the hosts listed in\n`EG_REMOTE_HOSTS` and restart Enterprise Gateway.\n\n## Kernel Encounters `TypeError`\n\nScenario: **I'm trying to launch a (Python/Scala/R) kernel, but it failed with `TypeError: Incorrect padding`.**\n\n```\nTraceback (most recent call last):\n  File \"/opt/conda/lib/python3.8/site-packages/tornado/web.py\", line 1512, in _execute\n    result = yield result\n  File \"/opt/conda/lib/python3.8/site-packages/tornado/gen.py\", line 1055, in run\n    value = future.result()\n  ...\n  ...\n  File \"/opt/conda/lib/python3.8/site-packages/enterprise_gateway/services/kernels/remotemanager.py\", line 125, in _launch_kernel\n    return self.process_proxy.launch_process(kernel_cmd, **kw)\n  File \"/opt/conda/lib/python3.8/site-packages/enterprise_gateway/services/processproxies/yarn.py\", line 63, in launch_process\n    self.confirm_remote_startup(kernel_cmd, **kw)\n  File \"/opt/conda/lib/python3.8/site-packages/enterprise_gateway/services/processproxies/yarn.py\", line 174, in confirm_remote_startup\n    ready_to_connect = self.receive_connection_info()\n  File \"/opt/conda/lib/python3.8/site-packages/enterprise_gateway/services/processproxies/processproxy.py\", line 565, in receive_connection_info\n    raise e\nTypeError: Incorrect padding\n```\n\nTo address this issue, first ensure that the launchers used for each kernel are derived\nfrom the same release as the Enterprise Gateway server. Next ensure that `pycryptodomex 3.9.7`\nor later is installed on all hosts using either `pip install` or `conda install` as shown below:\n\n```\n[jdoe@node1 ~]$ pip uninstall pycryptodomex\n[jdoe@node1 ~]$ pip install pycryptodomex\n```\n\nor\n\n```\n[jdoe@node1 ~]$ conda install pycryptodomex\n```\n\nThis should be done on the host running Enterprise Gateway as well as all the remote hosts\non which the kernel is launched.\n\n## Port Range\n\nScenario: **I'm trying to launch a (Python/Scala/R) kernel with port range, but it failed with `RuntimeError: Invalid port range `.**\n\n```\nTraceback (most recent call last):\n  File \"/opt/conda/lib/python3.8/site-packages/tornado/web.py\", line 1511, in _execute\n    result = yield result\n  File \"/opt/conda/lib/python3.8/site-packages/tornado/gen.py\", line 1055, in run\n    value = future.result()\n  ....\n  ....\n  File \"/opt/conda/lib/python3.8/site-packages/enterprise_gateway/services/processproxies/processproxy.py\", line 478, in __init__\n    super(RemoteProcessProxy, self).__init__(kernel_manager, proxy_config)\n  File \"/opt/conda/lib/python3.8/site-packages/enterprise_gateway/services/processproxies/processproxy.py\", line 87, in __init__\n    self._validate_port_range(proxy_config)\n  File \"/opt/conda/lib/python3.8/site-packages/enterprise_gateway/services/processproxies/processproxy.py\", line 407, in _validate_port_range\n    \"port numbers is (1024, 65535).\".format(self.lower_port))\nRuntimeError: Invalid port range '1000..2000' specified. Range for valid port numbers is (1024, 65535).\n```\n\nTo address this issue, make sure that the specified port range does not overlap with TCP's well-known\nport range of (0, 1024\\].\n\n## Hadoop YARN Timeout\n\nScenario: **I'm trying to launch a (Python/Scala/R) kernel, but it times out and the YARN application status remain `ACCEPTED`.**\n\nEnterprise Gateway log from server will look like the one below, and will complain that there are no resources:\n`launch timeout due to: YARN resources unavailable`\n\n```bash\n    State: 'ACCEPTED', Host: '', KernelID: '3181db50-8bb5-4f91-8556-988895f63efa', ApplicationID: 'application_1537119233094_0001'\n    State: 'ACCEPTED', Host: '', KernelID: '3181db50-8bb5-4f91-8556-988895f63efa', ApplicationID: 'application_1537119233094_0001'\n  ...\n  ...\n    SIGKILL signal sent to pid: 19690\n    YarnClusterProcessProxy.kill, application ID: application_1537119233094_0001, kernel ID: 3181db50-8bb5-4f91-8556-988895f63efa, state: ACCEPTED\n    KernelID: '3181db50-8bb5-4f91-8556-988895f63efa' launch timeout due to: YARN resources unavailable after 61.0 seconds for app application_1537119233094_0001, launch timeout: 60.0!  Check YARN configuration.\n```\n\nThe most common cause for this is that YARN Resource Managers are failing to start and the cluster see no resources available.\nMake sure YARN Resource Managerss are running ok. We have also noticed that, in Kerborized environments, sometimes there are\nissues with directory access rights that cause the YARN Resource Managers to fail to start and this can be corrected by validating\nthe existence of `/hadoop/yarn` and that it's owned by `yarn: hadoop`.\n\n## Kernel Resources\n\nScenario: **My kernel keeps dying when processing jobs that require large amount of resources (e.g. large files)**\n\nThis is usually seen when you are trying to use more resources then what is available for your kernel.\nTo address this issue, increase the amount of memory available for your Hadoop YARN application or another\nresource manager managing the kernel. For example, on Kubernetes, this may be a time when the kernel specification's [kernel-pod.yaml.j2](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2) file should be extended with resource quotas.\n\n## Kerberos\n\nScenario: **I'm trying to use a notebook with user impersonation on a Kerberos enabled cluster, but it fails to authenticate.**\n\nWhen using user impersonation in a YARN cluster with Kerberos authentication, if Kerberos is not\nsetup properly you will usually see the following warning in your Enterprise Gateway log that will keep a notebook from connecting:\n\n```bash\n  WARN Client: Exception encountered while connecting to the server : javax.security.sasl.SaslException: GSS initiate failed\n    [Caused by GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt)]\n```\n\nThe most common cause for this WARN is when the user that started Enterprise Gateway is not authenticated\nwith Kerberos. This can happen when the user has either not run `kinit` or their previous ticket has expired.\n\n## Openshift Kubernetes\n\nScenario: **Running Jupyter Enterprise Gateway on OpenShift Kubernetes Environment fails trying to create /home/jovyan/.local**\n\nAs described [in the OpenShift Admin Guide](https://docs.openshift.com/container-platform/4.10/openshift_images/create-images.html)\nthere is a need to issue the following command to enable running with `USER` in Dockerfile.\n\n```bash\noc adm policy add-scc-to-group anyuid system:authenticated\n```\n\n## Opening an issue\n\nScenario: **None of the scenarios on this page match or resolve my issue, what do I do next?**\n\nIf you are unable to resolve your issue, take a look at our\n[open issues list](https://github.com/jupyter-server/enterprise_gateway/issues) to see if there is an applicable scenario\nalready reported. If found, please add a comment to the issue so that we can get a sense of urgency (although all\nissues are important to us). If not found, please provide the following information if possible in a **new issue**.\n\n1. Describe the issue in as much detail as possible. This should include configuration information about your environment.\n1. Gather and _attach_ the following files to the issue. If possible, please archive the files first.\n   1. The **complete** Enterprise Gateway log file. If possible, please enable `DEBUG` logging that encompasses\n      the issue. You can refer to this section of our [Operators Guide](../operators/launching-eg.md#launching-enterprise-gateway-common)\n      for redirection and `DEBUG` enablement.\n   1. The log file(s) produced from the corresponding kernel. This is primarily a function of the underlying resource\n      manager.\n      - For containerized installations like Kubernetes or Docker Swarm, kernel log output can be captured by\n        running the appropriate `logs` command against the pod or container, respectively. The names of the\n        corresponding pod/container can be found in the Enterprise Gateway log.\n      - For `Hadoop YARN` environments,\n        you'll need to navigate to the appropriate log directory relative the application ID associated with the kernel.\n        The application ID can be located in the Enterprise Gateway log. If you have access to an administrative console,\n        you can usually navigate to the application logs more easily.\n   1. Although unlikely, the notebook log may also be helpful. If we find that the issue is more client-side\n      related, we may ask for `DEBUG` logging there as well.\n1. If you have altered or created new kernel specifications, the files corresponding to the failing kernels would be\n   helpful. These files could also be added to the attached archive or attached separately.\n\nPlease know that we understand that some information cannot be provided due to its sensitivity. In such cases, just\nlet us know and we'll be happy to approach the resolution of your issue from a different angle.\n"
  },
  {
    "path": "docs/source/users/client-config.md",
    "content": "# Gateway Client Configuration\n\nThe set of Gateway Client configuration options include the following. To get the current set of supported options, run the following:\n\n```bash\njupyter server --help-all\n```\n\nor\n\n```bash\njupyter server --generate-config\n```\n\nThe following is produced from the `--help-all` option. To determine the corresponding configuration file option, replace `--` with `c.`.\n\n```\n--GatewayClient.auth_scheme=<Unicode>\n    The auth scheme, added as a prefix to the authorization token used in the HTTP headers.\n            (JUPYTER_GATEWAY_AUTH_SCHEME env var)\n    Default: None\n--GatewayClient.auth_token=<Unicode>\n    The authorization token used in the HTTP headers. The header will be\n    formatted as::\n                {\n                    'Authorization': '{auth_scheme} {auth_token}'\n                }\n            (JUPYTER_GATEWAY_AUTH_TOKEN env var)\n    Default: None\n--GatewayClient.ca_certs=<Unicode>\n    The filename of CA certificates or None to use defaults.\n    (JUPYTER_GATEWAY_CA_CERTS env var)\n    Default: None\n--GatewayClient.client_cert=<Unicode>\n    The filename for client SSL certificate, if any.\n    (JUPYTER_GATEWAY_CLIENT_CERT env var)\n    Default: None\n--GatewayClient.client_key=<Unicode>\n    The filename for client SSL key, if any.  (JUPYTER_GATEWAY_CLIENT_KEY env\n    var)\n    Default: None\n--GatewayClient.connect_timeout=<Float>\n    The time allowed for HTTP connection establishment with the Gateway server.\n            (JUPYTER_GATEWAY_CONNECT_TIMEOUT env var)\n    Default: 40.0\n--GatewayClient.env_whitelist=<Unicode>\n    A comma-separated list of environment variable names that will be included, along with\n             their values, in the kernel startup request.  The corresponding `env_whitelist` configuration\n             value must also be set on the Gateway server - since that configuration value indicates which\n             environmental values to make available to the kernel. (JUPYTER_GATEWAY_ENV_WHITELIST env var)\n    Default: ''\n--GatewayClient.gateway_retry_interval=<Float>\n    The time allowed for HTTP reconnection with the Gateway server for the first time.\n                Next will be JUPYTER_GATEWAY_RETRY_INTERVAL multiplied by two in factor of numbers of retries\n                but less than JUPYTER_GATEWAY_RETRY_INTERVAL_MAX.\n                (JUPYTER_GATEWAY_RETRY_INTERVAL env var)\n    Default: 1.0\n--GatewayClient.gateway_retry_interval_max=<Float>\n    The maximum time allowed for HTTP reconnection retry with the Gateway server.\n                (JUPYTER_GATEWAY_RETRY_INTERVAL_MAX env var)\n    Default: 30.0\n--GatewayClient.gateway_retry_max=<Int>\n    The maximum retries allowed for HTTP reconnection with the Gateway server.\n                (JUPYTER_GATEWAY_RETRY_MAX env var)\n    Default: 5\n--GatewayClient.headers=<Unicode>\n    Additional HTTP headers to pass on the request.  This value will be converted to a dict.\n              (JUPYTER_GATEWAY_HEADERS env var)\n    Default: '{}'\n--GatewayClient.http_pwd=<Unicode>\n    The password for HTTP authentication.  (JUPYTER_GATEWAY_HTTP_PWD env var)\n    Default: None\n--GatewayClient.http_user=<Unicode>\n    The username for HTTP authentication. (JUPYTER_GATEWAY_HTTP_USER env var)\n    Default: None\n--GatewayClient.kernels_endpoint=<Unicode>\n    The gateway API endpoint for accessing kernel resources\n    (JUPYTER_GATEWAY_KERNELS_ENDPOINT env var)\n    Default: '/api/kernels'\n--GatewayClient.kernelspecs_endpoint=<Unicode>\n    The gateway API endpoint for accessing kernelspecs\n    (JUPYTER_GATEWAY_KERNELSPECS_ENDPOINT env var)\n    Default: '/api/kernelspecs'\n--GatewayClient.kernelspecs_resource_endpoint=<Unicode>\n    The gateway endpoint for accessing kernelspecs resources\n                (JUPYTER_GATEWAY_KERNELSPECS_RESOURCE_ENDPOINT env var)\n    Default: '/kernelspecs'\n--GatewayClient.request_timeout=<Float>\n    The time allowed for HTTP request completion.\n    (JUPYTER_GATEWAY_REQUEST_TIMEOUT env var)\n    Default: 40.0\n--GatewayClient.url=<Unicode>\n    The url of the Kernel or Enterprise Gateway server where\n            kernel specifications are defined and kernel management takes place.\n            If defined, this Notebook server acts as a proxy for all kernel\n            management and kernel specification retrieval.  (JUPYTER_GATEWAY_URL env var)\n    Default: None\n--GatewayClient.validate_cert=<Bool>\n    For HTTPS requests, determines if server's certificate should be validated or not.\n            (JUPYTER_GATEWAY_VALIDATE_CERT env var)\n    Default: True\n--GatewayClient.ws_url=<Unicode>\n    The websocket url of the Kernel or Enterprise Gateway server.  If not provided, this value\n            will correspond to the value of the Gateway url with 'ws' in place of 'http'.  (JUPYTER_GATEWAY_WS_URL env var)\n    Default: None\n```\n"
  },
  {
    "path": "docs/source/users/connecting-to-eg.md",
    "content": "# Connecting the server to Enterprise Gateway\n\nTo leverage the benefits of Enterprise Gateway, it's helpful to redirect a Jupyter server's kernel management to the Gateway server. This allows better separation of the user's notebooks from the managed computer cluster (Kubernetes, Hadoop YARN, Docker Swarm, etc.) on which Enterprise Gateway resides. A Jupyter server can be configured to relay kernel requests to an Enterprise Gateway server in several ways.\n\n## Command line\n\nTo instruct the server to connect to an Enterprise Gateway instance running on host `<EG_HOST_IP>` on port `<EG_PORT>`, the following command line options can be used:\n\n```bash\njupyter lab --gateway-url=http://<EG_HOST_IP>:<EG_PORT> --GatewayClient.http_user=guest --GatewayClient.http_pwd=guest-password\n```\n\n## Configuration file\n\nIf command line options are not appropriate for your environment, the Jupyter server configuration can be used to express Enterprise Gateway options. Note however, that command line options always override configuration file options:\n\nIn your `jupyter_server_config.py` file add the following for the equivalent options:\n\n```python\nc.GatewayClient.url = \"http://<EG_HOST_IP>:<EG_PORT>\"\nc.GatewayClient.http_user = \"guest\"\nc.GatewayClient.http_pwd = \"guest-password\"\n```\n\n## Docker image\n\nAll GatewayClient options have corresponding environment variable support, so if you have Jupyter Lab or Notebook already in a docker image, a corresponding docker invocation would look something like this:\n\n```bash\ndocker run -t --rm \\\n  -e JUPYTER_GATEWAY_URL='http://<EG_HOST_IP>:<EG_PORT>' \\\n  -e JUPYTER_GATEWAY_HTTP_USER=guest \\\n  -e JUPYTER_GATEWAY_HTTP_PWD=guest-password \\\n  -e LOG_LEVEL=DEBUG \\\n  -p 8888:8888 \\\n  -v ${HOME}/notebooks/:/tmp/notebooks \\\n  -w /tmp/notebooks \\\n  my-image\n```\n\nNotebook files residing in `${HOME}/notebooks` can then be accessed via `http://localhost:8888`.\n\n## Connection Timeouts\n\nSometimes, depending on the kind of cluster Enterprise Gateway is servicing, connection establishment and kernel startup can take a while (sometimes upwards of minutes). This is particularly true for managed clusters that perform scheduling like Hadoop YARN or Kubernetes. In these configurations it is important to configure both the connection and request timeout values.\n\nThese options are handled by the `GatewayClient.connect_timeout` (env: `JUPYTER_GATEWAY_CONNECT_TIMEOUT`) and `GatewayClient.request_timeout` (env: `JUPYTER_GATEWAY_REQUEST_TIMEOUT`) options and default to 40 seconds.\n\nThe `KERNEL_LAUNCH_TIMEOUT` environment variable will be set from these values or vice versa (whichever is greater). This value is used by EG to determine when it should give up on waiting for the kernel's startup to complete, while the other timeouts are used by Lab or Notebook when establishing the connection to EG.\n"
  },
  {
    "path": "docs/source/users/index.rst",
    "content": "Users Guide\n===========\n\nBecause Enterprise Gateway is a headless web server, it is typically accessed from other applications like JupyterLab and Jupyter Notebook.\n\n.. admonition:: Use cases\n\n    - *As a data scientist, I want to run my notebook using the Enterprise Gateway such that I can free up resources on my own laptop and leverage my company's large Hadoop YARN cluster to run my compute-intensive operations.*\n\n    - *As a student, my Data Science 101 course is leveraging GPUs in our experiments.  Since GPUs are expensive, we must share resources within the university's compute cluster and configure our Notebooks to leverage the department's Enterprise Gateway server, which can then spawn container-based kernels that have access to a GPU on Kubernetes.*\n\nThe following assumes an Enterprise Gateway server has been configured and deployed.  Please consult the `operators <../operators/index.html>`_ documentation to deploy and configure the Enterprise Gateway server.\n\n.. note::\n  There are two primary client applications that can use Enterprise Gateway, JupyterLab running on Jupyter Server and Jupyter Notebook.  When a reference to a *Jupyter server* (lowercase 'server') or *the server* is made, the reference applies to both Jupyter Server and Jupyter Notebook.  Generally speaking, the client-side behaviors are identical between the two, although references to Jupyter Server are preferred since it's more current.  If anything is different, that difference will be noted, otherwise, please assume discussion of the two are interchangeable.\n\n.. toctree::\n   :maxdepth: 1\n   :name: users\n\n   installation\n   connecting-to-eg\n   client-config\n   kernel-envs\n..\n   other clients (nbclient, papermill)\n"
  },
  {
    "path": "docs/source/users/installation.md",
    "content": "# Installing the client\n\nIn terms of Enterprise Gateway, the client application is typically Jupyter Server (hosting JupyterLab) or Jupyter Notebook. These applications are then configured to connect to Enterprise Gateway.\n\nTo install Jupyter Server via `pip`:\n\n```bash\npip install jupyter_server\n```\n\nor via `conda`:\n\n```bash\nconda install -c conda-forge jupyter_server\n```\n\nLikewise, for Jupyter Notebook via `pip`:\n\n```bash\npip install notebook\n```\n\nor via `conda`:\n\n```bash\nconda install -c conda-forge notebook\n```\n\nFor additional information regarding the installation of [Jupyter Server](https://jupyter-server.readthedocs.io/en/latest/index.html) or [Jupyter Notebook](https://jupyter-notebook.readthedocs.io/en/latest/), please refer to their respective documentation (see embedded links).\n"
  },
  {
    "path": "docs/source/users/kernel-envs.md",
    "content": "# Kernel Environment Variables\n\nThe Enterprise Gateway client software will also include _any_ environment variables prefixed with `KERNEL_` in the start kernel request sent to the Enterprise Gateway Server. This enables the ability to _statically parameterize_ aspects of kernel start requests relative to other clients using the same Enterprise Gateway instance.\n\nThere are several supported `KERNEL_` variables that the Enterprise Gateway server looks for and uses, but others can be sent to customize behaviors. The following kernel-specific environment variables are used by Enterprise Gateway. As mentioned above, all `KERNEL_` variables submitted in the kernel startup request's JSON body will be available to the kernel for its launch.\n\n```text\n  KERNEL_GID=<from user> or 100\n    Containers only. This value represents the group id in which the container will run.\n    The default value is 100 representing the users group - which is how all kernel images\n    produced by Enterprise Gateway are built.  See also KERNEL_UID.\n    Kubernetes: Warning - If KERNEL_GID is set it is strongly recommened that feature-gate\n    RunAsGroup be enabled, otherwise, this value will be ignored and the pod will run as\n    the root group id.  As a result, the setting of this value into the Security Context\n    of the kernel pod is commented out in the kernel-pod.yaml file and must be enabled\n    by the administrator.\n    Docker: Warning - This value is only added to the supplemental group ids.  As a result,\n    if used with KERNEL_UID, the resulting container will run as the root group with this\n    value listed in its supplemental groups.\n\n  KERNEL_EXECUTOR_IMAGE=<from kernel.json process-proxy stanza> or KERNEL_IMAGE\n    Kubernetees Spark only. This indicates the image that Spark on Kubernetes will use\n    for the its executors.  Although this value could come from the user, its strongly\n    recommended that the process-proxy stanza of the corresponding kernel's kernelspec\n    (kernel.json) file be updated to include the image name.  If no image name is\n    provided, the value of KERNEL_IMAGE will be used.\n\n  KERNEL_EXTRA_SPARK_OPTS=<from user>\n    Spark only. This variable allows users to add additional spark options to the\n    current set of options specified in the corresponding kernel.json file.  This\n    variable is purely optional with no default value.  In addition, it is the\n    responsibility of the user setting this value to ensure the options passed\n    are appropriate relative to the target environment.  Because this variable contains\n    space-separate values, it requires appropriate quotation.  For example, to use with\n    the notebook docker image jupyterhub/k8s-singleuser-sample , the environment variable would look something like\n    this:\n\n    docker run ... -e KERNEL_EXTRA_SPARK_OPTS=\\\"--conf spark.driver.memory=2g\n    --conf spark.executor.memory=2g\\\" ... jupyterhub/k8s-singleuser-sample\n\n  KERNEL_ID=<from user> or <system generated>\n    This value represents the identifier used by the Jupyter framework to identify\n    the kernel.  Although this value could be provided by the user, it is recommended\n    that it be generated by the system.\n\n  KERNEL_IMAGE=<from user> or <from kernel.json process-proxy stanza>\n    Containers only. This indicates the image to use for the kernel in containerized\n    environments - Kubernetes or Docker.  Although it can be provided by the user, it\n    is strongly recommended that the process-proxy stanza of the corresponding kernel's\n    kernelspec (kernel.json) file be updated to include the image name.\n\n  KERNEL_LAUNCH_TIMEOUT=<from user> or EG_KERNEL_LAUNCH_TIMEOUT\n    Indicates the time (in seconds) to allow for a kernel's launch.  This value should\n    be submitted in the kernel startup if that particular kernel's startup time is\n    expected to exceed that of the EG_KERNEL_LAUNCH_TIMEOUT set when Enterprise\n    Gateway starts.\n\n  KERNEL_NAMESPACE=<from user> or KERNEL_POD_NAME or EG_NAMESPACE\n    Kubernetes only.  This indicates the name of the namespace to use or create on\n    Kubernetes in which the kernel pod will be located.  For users wishing to use a\n    pre-created namespace, this value should be submitted in the kernel startup\n    request.  In such cases, the user must also provide KERNEL_SERVICE_ACCOUNT_NAME.\n    If not provided, Enterprise Gateway will create a new namespace for the kernel\n    whose value is derived from KERNEL_POD_NAME.  In rare cases where\n    EG_SHARED_NAMESPACE is True, this value will be set to the value of EG_NAMESPACE.\n\n    Note that if the namespace is created by Enterprise Gateway, it will be removed\n    upon the kernel's termination.  Otherwise, the Enterprise Gateway will not\n    remove the namespace.\n\n  KERNEL_POD_NAME=<from user> or KERNEL_USERNAME-KERNEL_ID\n    Kubernetes only. By default, Enterprise Gateway will use a kernel pod name whose\n    value is derived from KERNEL_USERNAME and KERNEL_ID separated by a hyphen\n    ('-').  This variable is typically NOT provided by the user, but, in such\n    cases, Enterprise Gateway will honor that value.  However, when provided,\n    it is the user's responsibility that KERNEL_POD_NAME is unique relative to\n    any pods in the target namespace.  In addition, the pod must NOT exist -\n    unlike the case if KERNEL_NAMESPACE is provided. The KERNEL_POD_NAME can\n    also be provided as a template string using simple variable substitution\n    (e.g. \"{{ kernel_username }}-{{ kernel_id }}\"). Only simple\n    {{ variable_name }} references are supported -- Jinja2 filters and\n    expressions are NOT supported and will be rejected for security reasons.\n    Available variables include all KERNEL_* environment variables (lowercased,\n    e.g. kernel_username, kernel_namespace) plus kernel_id. Variable names\n    must start with a letter and contain only letters, digits, and underscores.\n    In case of invalid template syntax or missing variables, Enterprise Gateway\n    will fall back to the default pod name using KERNEL_USERNAME-KERNEL_ID.\n\n  KERNEL_REMOTE_HOST=<remote host name>\n    DistributedProcessProxy only.  When specified, this value will override the\n    configured load-balancing algorithm.\n\n  KERNEL_SERVICE_ACCOUNT_NAME=<from user> or EG_DEFAULT_KERNEL_SERVICE_ACCOUNT_NAME\n    Kubernetes only.  This value represents the name of the service account that\n    Enterprise Gateway should equate with the kernel pod.  If Enterprise Gateway\n    creates the kernel's namespace, it will be associated with the cluster role\n    identified by EG_KERNEL_CLUSTER_ROLE.  If not provided, it will be derived\n    from EG_DEFAULT_KERNEL_SERVICE_ACCOUNT_NAME.\n\n  KERNEL_SPARKAPP_CONFIG_MAP=<from user> or None\n    Spark k8s-operator only. The name of a Kubernetes ConfigMap which will be used to configure\n    the SparkApplication. See the SparkApplicationSpec\n    (https://googlecloudplatform.github.io/spark-on-k8s-operator/docs/api-docs.html#sparkoperator.k8s.io/v1beta2.SparkApplicationSpec)\n    sparkConfigMap for more information.\n\n  KERNEL_UID=<from user> or 1000\n    Containers only. This value represents the user id in which the container will run.\n    The default value is 1000 representing the jovyan user - which is how all kernel images\n    produced by Enterprise Gateway are built.  See also KERNEL_GID.\n    Kubernetes: Warning - If KERNEL_UID is set it is strongly recommened that feature-gate\n    RunAsGroup be enabled and KERNEL_GID also be set, otherwise, the pod will run as\n    the root group id. As a result, the setting of this value into the Security Context\n    of the kernel pod is commented out in the kernel-pod.yaml file and must be enabled\n    by the administrator.\n\n  KERNEL_USERNAME=<from user> or <enterprise-gateway-user>\n    This value represents the logical name of the user submitting the request to\n    start the kernel. Of all the KERNEL_ variables, KERNEL_USERNAME is the one that\n    should be submitted in the request. In environments in which impersonation is\n    used it represents the target of the impersonation.\n\n  KERNEL_VOLUMES=<from user> or None\n    Kubernetes and Spark Operator only. A JSON-formatted string defining\n    Kubernetes volume specifications to mount into the kernel pod. The value\n    is parsed via yaml.safe_load and passed to the kernel pod or\n    SparkApplication template as the kernel_volumes variable. Example:\n    KERNEL_VOLUMES='[{\"name\": \"my-vol\", \"persistentVolumeClaim\": {\"claimName\": \"my-pvc\"}}]'\n    See the kernel-pod.yaml.j2 and sparkoperator templates for how volumes\n    are rendered.\n\n  KERNEL_VOLUME_MOUNTS=<from user> or None\n    Kubernetes and Spark Operator only. A JSON-formatted string defining\n    Kubernetes volumeMount specifications for the kernel container. The value\n    is parsed via yaml.safe_load and passed to the kernel pod or\n    SparkApplication template as the kernel_volume_mounts variable. Example:\n    KERNEL_VOLUME_MOUNTS='[{\"name\": \"my-vol\", \"mountPath\": \"/data\"}]'\n    Must correspond to volumes defined via KERNEL_VOLUMES.\n\n  KERNEL_WORKING_DIR=<from user> or None\n    Containers only.  This value should model the directory in which the active\n    notebook file is running.   It is intended to be used in conjunction with appropriate volume\n    mounts in the kernel container such that the user's notebook filesystem exists\n    in the container and enables the sharing of resources used within the notebook.\n    As a result, the primary use case for this is for Jupyter Hub users running in\n    Kubernetes.  When a value is provided and EG_MIRROR_WORKING_DIRS=True, Enterprise\n    Gateway will set the container's working directory to the value specified in\n    KERNEL_WORKING_DIR.  If EG_MIRROR_WORKING_DIRS is False, KERNEL_WORKING_DIR will\n    not be available for use during the kernel's launch.  See also EG_MIRROR_WORKING_DIRS.\n```\n"
  },
  {
    "path": "enterprise_gateway/__init__.py",
    "content": "\"\"\"Lazy-loading entrypoint for the enterprise gateway package.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom ._version import __version__  # noqa\n\n\ndef launch_instance(*args, **kwargs):\n    from enterprise_gateway.enterprisegatewayapp import launch_instance\n\n    launch_instance(*args, **kwargs)\n"
  },
  {
    "path": "enterprise_gateway/__main__.py",
    "content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"CLI entrypoint for the enterprise gateway package.\"\"\"\n\nif __name__ == \"__main__\":\n    import enterprise_gateway.enterprisegatewayapp as app\n\n    app.launch_instance()\n"
  },
  {
    "path": "enterprise_gateway/_version.py",
    "content": "\"\"\"enterprise_gateway version info\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n__version__ = \"3.3.0.dev0\"\n"
  },
  {
    "path": "enterprise_gateway/base/__init__.py",
    "content": ""
  },
  {
    "path": "enterprise_gateway/base/handlers.py",
    "content": "\"\"\"Tornado handlers for the base of the API.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n\nimport json\nfrom typing import List\n\nimport jupyter_server._version\nfrom jupyter_server.base.handlers import APIHandler\nfrom tornado import web\n\nfrom .._version import __version__\nfrom ..mixins import CORSMixin, JSONErrorsMixin, TokenAuthorizationMixin\n\n\nclass APIVersionHandler(TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, APIHandler):\n    \"\"\" \"\n    Extends the jupyter_server base API handler with token auth, CORS, and\n    JSON errors to produce version information for jupyter_server and gateway.\n    \"\"\"\n\n    def get(self):\n        \"\"\"Get the API version.\"\"\"\n        # not authenticated, so give as few info as possible\n        # to be backwards compatibile, use only 'version' for the jupyter_server version\n        # and be more specific for gateway_version\n        self.finish(\n            json.dumps({\"version\": jupyter_server.__version__, \"gateway_version\": __version__})\n        )\n\n\nclass NotFoundHandler(JSONErrorsMixin, web.RequestHandler):\n    \"\"\"\n    Catches all requests and responds with 404 JSON messages.\n\n    Installed as the fallback error for all unhandled requests.\n\n    Raises\n    ------\n    tornado.web.HTTPError\n        Always 404 Not Found\n    \"\"\"\n\n    def prepare(self):\n        \"\"\"Prepare the response.\"\"\"\n        raise web.HTTPError(404)\n\n\ndefault_handlers: List[tuple] = [(r\"/api\", APIVersionHandler), (r\"/(.*)\", NotFoundHandler)]\n"
  },
  {
    "path": "enterprise_gateway/client/__init__.py",
    "content": ""
  },
  {
    "path": "enterprise_gateway/client/gateway_client.py",
    "content": "\"\"\"An Enterprise Gateway client.\"\"\"\n\nimport logging\nimport os\nimport queue\nimport time\nfrom threading import Thread\nfrom uuid import uuid4\n\nimport requests\nimport websocket\nfrom tornado.escape import json_decode, json_encode, utf8\n\nREQUEST_TIMEOUT = int(os.getenv(\"REQUEST_TIMEOUT\", 120))\nlog_level = os.getenv(\"LOG_LEVEL\", \"INFO\")\n\nlogging.basicConfig(format=\"[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s] %(message)s\")\n\n\nclass GatewayClient:\n    \"\"\"\n    *** E X P E R I M E N T A L *** *** E X P E R I M E N T A L ***\n\n    An experimental Gateway Client that is used for Enterprise Gateway\n    integration tests and can be leveraged for micro service type of\n    connections.\n    \"\"\"\n\n    DEFAULT_USERNAME = os.getenv(\"KERNEL_USERNAME\", \"bob\")\n    DEFAULT_GATEWAY_HOST = os.getenv(\"GATEWAY_HOST\", \"localhost:8888\")\n    KERNEL_LAUNCH_TIMEOUT = os.getenv(\"KERNEL_LAUNCH_TIMEOUT\", \"40\")\n\n    def __init__(self, host=DEFAULT_GATEWAY_HOST, use_secure_connection=False):\n        \"\"\"Initialize the client.\"\"\"\n        self.http_api_endpoint = (\n            f\"https://{host}/api/kernels\" if use_secure_connection else f\"http://{host}/api/kernels\"\n        )\n        self.ws_api_endpoint = (\n            f\"wss://{host}/api/kernels\" if use_secure_connection else f\"ws://{host}/api/kernels\"\n        )\n        self.log = logging.getLogger(\"GatewayClient\")\n        self.log.setLevel(log_level)\n\n    def start_kernel(\n        self, kernelspec_name, username=DEFAULT_USERNAME, timeout=REQUEST_TIMEOUT, extra_env=None\n    ):\n        \"\"\"Start a kernel.\"\"\"\n        self.log.info(f\"Starting a {kernelspec_name} kernel ....\")\n\n        if extra_env is None:\n            extra_env = {}\n\n        env = {\n            \"KERNEL_USERNAME\": username,\n            \"KERNEL_LAUNCH_TIMEOUT\": GatewayClient.KERNEL_LAUNCH_TIMEOUT,\n        }\n        env.update(extra_env)\n\n        json_data = {\n            \"name\": kernelspec_name,\n            \"env\": env,\n        }\n\n        response = requests.post(self.http_api_endpoint, data=json_encode(json_data), timeout=60)\n        if response.status_code == 201:\n            json_data = response.json()\n            kernel_id = json_data.get(\"id\")\n            self.log.info(f\"Started kernel with id {kernel_id}\")\n        else:\n            msg = \"Error starting kernel : {} response code \\n {}\".format(\n                response.status_code, response.content\n            )\n            raise RuntimeError(msg)\n\n        return KernelClient(\n            self.http_api_endpoint,\n            self.ws_api_endpoint,\n            kernel_id,\n            timeout=timeout,\n            logger=self.log,\n        )\n\n    def shutdown_kernel(self, kernel):\n        \"\"\"Shut down a kernel.\"\"\"\n        self.log.info(f\"Shutting down kernel : {kernel.kernel_id} ....\")\n\n        if not kernel:\n            return False\n\n        kernel.shutdown()\n\n\nclass KernelClient:\n    \"\"\"A kernel client class.\"\"\"\n\n    DEAD_MSG_ID = \"deadbeefdeadbeefdeadbeefdeadbeef\"\n    POST_IDLE_TIMEOUT = 0.5\n    DEFAULT_INTERRUPT_WAIT = 1\n\n    def __init__(\n        self, http_api_endpoint, ws_api_endpoint, kernel_id, timeout=REQUEST_TIMEOUT, logger=None\n    ):\n        \"\"\"Initialize the client.\"\"\"\n        self.shutting_down = False\n        self.restarting = False\n        self.http_api_endpoint = http_api_endpoint\n        self.kernel_http_api_endpoint = f\"{http_api_endpoint}/{kernel_id}\"\n        self.ws_api_endpoint = ws_api_endpoint\n        self.kernel_ws_api_endpoint = f\"{ws_api_endpoint}/{kernel_id}/channels\"\n        self.kernel_id = kernel_id\n        self.log = logger\n        self.kernel_socket = None\n        self.response_reader = Thread(target=self._read_responses)\n        self.response_queues = {}\n        self.interrupt_thread = None\n        self.log.debug(f\"Initializing kernel client ({kernel_id}) to {self.kernel_ws_api_endpoint}\")\n\n        try:\n            self.kernel_socket = websocket.create_connection(\n                f\"{ws_api_endpoint}/{kernel_id}/channels\", timeout=timeout, enable_multithread=True\n            )\n        except Exception as e:\n            self.log.error(e)\n            self.shutdown()\n            raise e\n\n        # startup reader thread\n        self.response_reader.start()\n\n    def shutdown(self):\n        \"\"\"Shut down the client.\"\"\"\n        # Terminate thread, close socket and clear queues.\n        self.shutting_down = True\n\n        if self.kernel_socket:\n            self.kernel_socket.close()\n            self.kernel_socket = None\n\n        if self.response_queues:\n            self.response_queues.clear()\n            self.response_queues = None\n\n        if self.response_reader:\n            self.response_reader.join(timeout=2.0)\n            if self.response_reader.is_alive():\n                self.log.warning(\"Response reader thread is not terminated, continuing...\")\n            self.response_reader = None\n\n        url = f\"{self.http_api_endpoint}/{self.kernel_id}\"\n        response = requests.delete(url, timeout=60)\n        if response.status_code == 204:\n            self.log.info(f\"Kernel {self.kernel_id} shutdown\")\n            return True\n        else:\n            msg = f\"Error shutting down kernel {self.kernel_id}: {response.content}\"\n            raise RuntimeError(msg)\n\n    def execute(self, code, timeout=REQUEST_TIMEOUT):\n        \"\"\"\n        Executes the code provided and returns the result of that execution.\n        \"\"\"\n        response = []\n        has_error = False\n        try:\n            msg_id = self._send_request(code)\n\n            post_idle = False\n            while True:\n                response_message = self._get_response(msg_id, timeout, post_idle)\n                if response_message:\n                    response_message_type = response_message[\"msg_type\"]\n\n                    if response_message_type == \"error\" or (\n                        response_message_type == \"execute_reply\"\n                        and response_message[\"content\"][\"status\"] == \"error\"\n                    ):\n                        has_error = True\n                        response.append(\n                            \"{}:{}:{}\".format(\n                                response_message[\"content\"][\"ename\"],\n                                response_message[\"content\"][\"evalue\"],\n                                response_message[\"content\"][\"traceback\"],\n                            )\n                        )\n                    elif response_message_type == \"stream\":\n                        response.append(\n                            KernelClient._convert_raw_response(response_message[\"content\"][\"text\"])\n                        )\n\n                    elif (\n                        response_message_type == \"execute_result\"\n                        or response_message_type == \"display_data\"\n                    ):\n                        if \"text/plain\" in response_message[\"content\"][\"data\"]:\n                            response.append(\n                                KernelClient._convert_raw_response(\n                                    response_message[\"content\"][\"data\"][\"text/plain\"]\n                                )\n                            )\n                        elif \"text/html\" in response_message[\"content\"][\"data\"]:\n                            response.append(\n                                KernelClient._convert_raw_response(\n                                    response_message[\"content\"][\"data\"][\"text/html\"]\n                                )\n                            )\n                    elif response_message_type == \"status\":\n                        if response_message[\"content\"][\"execution_state\"] == \"idle\":\n                            post_idle = True  # indicate we're at the logical end and timeout poll for next message\n                            continue\n                    else:\n                        self.log.debug(\n                            \"Unhandled response for msg_id: {} of msg_type: {}\".format(\n                                msg_id, response_message_type\n                            )\n                        )\n\n                if (\n                    response_message is None\n                ):  # We timed out.  If post idle, its ok, else make mention of it\n                    if not post_idle:\n                        self.log.warning(\n                            f\"Unexpected timeout occurred for msg_id: {msg_id} - no 'idle' status received!\"\n                        )\n                    break\n\n        except Exception as e:\n            self.log.debug(e)\n\n        return \"\".join(response), has_error\n\n    def interrupt(self):\n        \"\"\"Interrupt the kernel.\"\"\"\n        url = \"{}/{}\".format(self.kernel_http_api_endpoint, \"interrupt\")\n        response = requests.post(url, timeout=60)\n        if response.status_code == 204:\n            self.log.debug(f\"Kernel {self.kernel_id} interrupted\")\n            return True\n        else:\n            msg = f\"Unexpected response interrupting kernel {self.kernel_id}: {response.content}\"\n            raise RuntimeError(msg)\n\n    def restart(self, timeout=REQUEST_TIMEOUT):\n        \"\"\"Restart the kernel.\"\"\"\n        self.restarting = True\n        self.kernel_socket.close()\n        self.kernel_socket = None\n        url = \"{}/{}\".format(self.kernel_http_api_endpoint, \"restart\")\n        response = requests.post(url, timeout=60)\n        if response.status_code == 200:\n            self.log.debug(f\"Kernel {self.kernel_id} restarted\")\n            self.kernel_socket = websocket.create_connection(\n                self.kernel_ws_api_endpoint, timeout=timeout, enable_multithread=True\n            )\n            self.restarting = False\n            return True\n        else:\n            self.restarting = False\n            msg = f\"Unexpected response restarting kernel {self.kernel_id}: {response.content}\"\n            self.log.debug(msg)\n            raise RuntimeError(msg)\n\n    def get_state(self):\n        \"\"\"Get the state of the client.\"\"\"\n        url = f\"{self.kernel_http_api_endpoint}\"\n        response = requests.get(url, timeout=60)\n        if response.status_code == 200:\n            json = response.json()\n            self.log.debug(f\"Kernel {self.kernel_id} state: {json}\")\n            return json[\"execution_state\"]\n        else:\n            msg = \"Unexpected response retrieving state for kernel {}: {}\".format(\n                self.kernel_id, response.content\n            )\n            raise RuntimeError(msg)\n\n    def start_interrupt_thread(self, wait_time=DEFAULT_INTERRUPT_WAIT):\n        \"\"\"Start the interrupt thread.\"\"\"\n        self.interrupt_thread = Thread(target=self.perform_interrupt, args=(wait_time,))\n        self.interrupt_thread.start()\n\n    def perform_interrupt(self, wait_time):\n        \"\"\"Perform an interrupt on the client.\"\"\"\n        time.sleep(wait_time)  # Allow parent to start executing cell to interrupt\n        self.interrupt()\n\n    def terminate_interrupt_thread(self):\n        \"\"\"Terminate the interrupt thread.\"\"\"\n        if self.interrupt_thread:\n            self.interrupt_thread.join()\n            self.interrupt_thread = None\n\n    def _send_request(self, code):\n        \"\"\"\n        Builds the request and submits it to the kernel.  Prior to sending the request it\n        creates an empty response queue and adds it to the dictionary using msg_id as the\n        key.  The msg_id is returned in order to read responses.\n        \"\"\"\n        msg_id = uuid4().hex\n        message = KernelClient.__create_execute_request(msg_id, code)\n\n        # create response-queue and add to map for this msg_id\n        self.response_queues[msg_id] = queue.Queue()\n\n        self.kernel_socket.send(message)\n\n        return msg_id\n\n    def _get_response(self, msg_id, timeout, post_idle):\n        \"\"\"\n        Pulls the next response message from the queue corresponding to msg_id.  If post_idle is true,\n        the timeout parameter is set to a very short value since a majority of time, there won't be a\n        message in the queue.  However, in cases where a race condition occurs between the idle status\n        and the execute_result payload - where the two are out of order, then this will pickup the result.\n        \"\"\"\n\n        if post_idle and timeout > KernelClient.POST_IDLE_TIMEOUT:\n            timeout = (\n                KernelClient.POST_IDLE_TIMEOUT\n            )  # overwrite timeout to small value following idle messages.\n\n        msg_queue = self.response_queues.get(msg_id)\n        try:\n            self.log.debug(f\"Getting response for msg_id: {msg_id} with timeout: {timeout}\")\n            response = msg_queue.get(timeout=timeout)\n            self.log.debug(\n                \"Got response for msg_id: {}, msg_type: {}\".format(\n                    msg_id, response[\"msg_type\"] if response else \"null\"\n                )\n            )\n        except queue.Empty:\n            response = None\n\n        return response\n\n    def _read_responses(self):\n        \"\"\"\n        Reads responses from the websocket.  For each response read, it is added to the response queue based\n        on the messages parent_header.msg_id.  It does this for the duration of the class's lifetime until its\n        shutdown method is called, at which time the socket is closed (unblocking the reader) and the thread\n        terminates.  If shutdown happens to occur while processing a response (unlikely), termination takes\n        place via the loop control boolean.\n        \"\"\"\n        try:\n            while not self.shutting_down:\n                try:\n                    raw_message = self.kernel_socket.recv()\n                    response_message = json_decode(utf8(raw_message))\n\n                    msg_id = KernelClient._get_msg_id(response_message, self.log)\n\n                    if msg_id not in self.response_queues:\n                        # this will happen when the msg_id is generated by the server\n                        self.response_queues[msg_id] = queue.Queue()\n\n                    # insert into queue\n                    self.log.debug(\n                        \"Inserting response for msg_id: {}, msg_type: {}\".format(\n                            msg_id, response_message[\"msg_type\"]\n                        )\n                    )\n                    self.response_queues.get(msg_id).put_nowait(response_message)\n                except BaseException as be1:\n                    if (\n                        self.restarting\n                    ):  # If restarting, wait until restart has completed - which includes new socket\n                        i = 1\n                        while self.restarting:\n                            if i >= 10 and i % 2 == 0:\n                                self.log.debug(f\"Still restarting after {i} secs...\")\n                            time.sleep(1)\n                            i += 1\n                        continue\n                    raise be1\n\n        except websocket.WebSocketConnectionClosedException:\n            pass  # websocket closure most likely due to shutdown\n\n        except BaseException as be2:\n            if not self.shutting_down:\n                self.log.warning(f\"Unexpected exception encountered ({be2})\")\n\n        self.log.debug(\"Response reader thread exiting...\")\n\n    @staticmethod\n    def _get_msg_id(message, logger):\n        msg_id = KernelClient.DEAD_MSG_ID\n        if message:\n            if \"msg_id\" in message[\"parent_header\"] and message[\"parent_header\"][\"msg_id\"]:\n                msg_id = message[\"parent_header\"][\"msg_id\"]\n            elif \"msg_id\" in message:\n                # msg_id may not be in the parent_header, see if present in response\n                # IPython kernel appears to do this after restarts with a 'starting' status\n                msg_id = message[\"msg_id\"]\n        else:  # Dump the \"dead\" message...\n            logger.debug(f\"+++++ Dumping dead message: {message}\")\n        return msg_id\n\n    @staticmethod\n    def _convert_raw_response(raw_response_message):\n        result = raw_response_message\n        if isinstance(raw_response_message, str) and \"u'\" in raw_response_message:\n            result = raw_response_message.replace(\"u'\", \"\")[:-1]\n\n        return result\n\n    @staticmethod\n    def __create_execute_request(msg_id, code):\n        return json_encode(\n            {\n                \"header\": {\n                    \"username\": \"\",\n                    \"version\": \"5.0\",\n                    \"session\": \"\",\n                    \"msg_id\": msg_id,\n                    \"msg_type\": \"execute_request\",\n                },\n                \"parent_header\": {},\n                \"channel\": \"shell\",\n                \"content\": {\n                    \"code\": \"\".join(code),\n                    \"silent\": False,\n                    \"store_history\": False,\n                    \"user_expressions\": {},\n                    \"allow_stdin\": False,\n                },\n                \"metadata\": {},\n                \"buffers\": {},\n            }\n        )\n"
  },
  {
    "path": "enterprise_gateway/enterprisegatewayapp.py",
    "content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Enterprise Gateway Jupyter application.\"\"\"\n\nimport asyncio\nimport errno\nimport getpass\nimport logging\nimport os\nimport signal\nimport ssl\nimport sys\nimport time\nimport weakref\nfrom typing import ClassVar, List, Optional\n\nfrom jupyter_client.kernelspec import KernelSpecManager\nfrom jupyter_core.application import JupyterApp, base_aliases\nfrom jupyter_server.serverapp import random_ports\nfrom jupyter_server.utils import url_path_join\nfrom tornado import httpserver, web\nfrom tornado.log import enable_pretty_logging\nfrom traitlets.config import Configurable\nfrom zmq.eventloop import ioloop\n\nfrom ._version import __version__\nfrom .base.handlers import default_handlers as default_base_handlers\nfrom .mixins import EnterpriseGatewayConfigMixin\nfrom .services.api.handlers import default_handlers as default_api_handlers\nfrom .services.kernels.handlers import default_handlers as default_kernel_handlers\nfrom .services.kernels.remotemanager import RemoteMappingKernelManager\nfrom .services.kernelspecs import KernelSpecCache\nfrom .services.kernelspecs.handlers import default_handlers as default_kernelspec_handlers\nfrom .services.sessions.handlers import default_handlers as default_session_handlers\nfrom .services.sessions.kernelsessionmanager import (\n    FileKernelSessionManager,\n    WebhookKernelSessionManager,\n)\nfrom .services.sessions.sessionmanager import SessionManager\n\n# Add additional command line aliases\naliases = dict(base_aliases)\naliases.update(\n    {\n        \"ip\": \"EnterpriseGatewayApp.ip\",\n        \"port\": \"EnterpriseGatewayApp.port\",\n        \"port_retries\": \"EnterpriseGatewayApp.port_retries\",\n        \"keyfile\": \"EnterpriseGatewayApp.keyfile\",\n        \"certfile\": \"EnterpriseGatewayApp.certfile\",\n        \"client-ca\": \"EnterpriseGatewayApp.client_ca\",\n        \"ssl_version\": \"EnterpriseGatewayApp.ssl_version\",\n    }\n)\n\n\nclass EnterpriseGatewayApp(EnterpriseGatewayConfigMixin, JupyterApp):\n    \"\"\"\n    Application that provisions Jupyter kernels and proxies HTTP/Websocket\n    traffic to the kernels.\n\n    - reads command line and environment variable settings\n    - initializes managers and routes\n    - creates a Tornado HTTP server\n    - starts the Tornado event loop\n    \"\"\"\n\n    name = \"jupyter-enterprise-gateway\"\n    version = __version__\n    description = \"\"\"\n        Jupyter Enterprise Gateway\n\n        Provisions remote Jupyter kernels and proxies HTTP/Websocket traffic to them.\n    \"\"\"\n\n    # Also include when generating help options\n    classes: ClassVar = [\n        KernelSpecCache,\n        FileKernelSessionManager,\n        WebhookKernelSessionManager,\n        RemoteMappingKernelManager,\n    ]\n\n    # Enable some command line shortcuts\n    aliases = aliases\n\n    def initialize(self, argv: Optional[List[str]] = None) -> None:\n        \"\"\"Initializes the base class, configurable manager instances, the\n        Tornado web app, and the tornado HTTP server.\n\n        Parameters\n        ----------\n        argv\n            Command line arguments\n        \"\"\"\n        super().initialize(argv)\n        self.init_configurables()\n        self.init_webapp()\n        self.init_http_server()\n\n    def init_configurables(self) -> None:\n        \"\"\"Initializes all configurable objects including a kernel manager, kernel\n        spec manager, session manager, and personality.\n        \"\"\"\n        self.kernel_spec_manager = KernelSpecManager(parent=self)\n\n        self.kernel_spec_manager = self.kernel_spec_manager_class(\n            parent=self,\n        )\n\n        self.kernel_spec_cache = self.kernel_spec_cache_class(\n            parent=self, kernel_spec_manager=self.kernel_spec_manager\n        )\n\n        # Only pass a default kernel name when one is provided. Otherwise,\n        # adopt whatever default the kernel manager wants to use.\n        kwargs = {}\n        if self.default_kernel_name:\n            kwargs[\"default_kernel_name\"] = self.default_kernel_name\n\n        self.kernel_manager = self.kernel_manager_class(\n            parent=self,\n            log=self.log,\n            connection_dir=self.runtime_dir,\n            kernel_spec_manager=self.kernel_spec_manager,\n            **kwargs,\n        )\n\n        self.session_manager = SessionManager(log=self.log, kernel_manager=self.kernel_manager)\n\n        self.kernel_session_manager = self.kernel_session_manager_class(\n            parent=self,\n            log=self.log,\n            kernel_manager=self.kernel_manager,\n            config=self.config,  # required to get command-line options visible\n        )\n\n        # For B/C purposes, check if session persistence is enabled.  If so, and availability\n        # mode is not enabled, go ahead and default availability mode to 'multi-instance'.\n        if self.kernel_session_manager.enable_persistence:\n            if self.availability_mode is None:\n                self.availability_mode = EnterpriseGatewayConfigMixin.AVAILABILITY_REPLICATION\n                self.log.info(\n                    f\"Kernel session persistence is enabled but availability mode is not.  \"\n                    f\"Setting EnterpriseGatewayApp.availability_mode to '{self.availability_mode}'.\"\n                )\n        else:\n            # Persistence is not enabled, check if availability_mode is configured and, if so,\n            # auto-enable persistence\n            if self.availability_mode is not None:\n                self.kernel_session_manager.enable_persistence = True\n                self.log.info(\n                    f\"Availability mode is set to '{self.availability_mode}' yet kernel session \"\n                    \"persistence is not enabled.  Enabling kernel session persistence.\"\n                )\n\n        # If we're using single-instance availability, attempt to start persisted sessions\n        if self.availability_mode == EnterpriseGatewayConfigMixin.AVAILABILITY_STANDALONE:\n            self.kernel_session_manager.start_sessions()\n\n        self.contents_manager = None  # Gateways don't use contents manager\n\n        self.init_dynamic_configs()\n\n    def _create_request_handlers(self) -> List[tuple]:\n        \"\"\"Create default Jupyter handlers and redefine them off of the\n        base_url path. Assumes init_configurables() has already been called.\n        \"\"\"\n        handlers = []\n\n        # append tuples for the standard kernel gateway endpoints\n        for handler in (\n            default_api_handlers\n            + default_kernel_handlers\n            + default_kernelspec_handlers\n            + default_session_handlers\n            + default_base_handlers\n        ):\n            # Create a new handler pattern rooted at the base_url\n            pattern = url_path_join(\"/\", self.base_url, handler[0])\n            # Some handlers take args, so retain those in addition to the\n            # handler class ref\n            new_handler = (pattern, *list(handler[1:]))\n            if self.authorized_origin:\n                self.__add_authorized_hostname_match(new_handler)\n\n            handlers.append(new_handler)\n        return handlers\n\n    def __add_authorized_hostname_match(self, handler: tuple) -> None:\n        base_prepare = handler[1].prepare\n        authorized_hostname = self.authorized_origin\n\n        def wrapped_prepare(self):\n            ssl_cert = self.request.get_ssl_certificate()\n            try:\n                ssl.match_hostname(ssl_cert, authorized_hostname)\n            except ssl.SSLCertVerificationError:\n                raise web.HTTPError(403, \"Forbidden\") from None\n            base_prepare(self)\n\n        handler[1].prepare = wrapped_prepare\n\n    def init_webapp(self) -> None:\n        \"\"\"Initializes Tornado web application with uri handlers.\n\n        Adds the various managers and web-front configuration values to the\n        Tornado settings for reference by the handlers.\n        \"\"\"\n        # Enable the same pretty logging the server uses\n        enable_pretty_logging()\n\n        # Configure the tornado logging level too\n        logging.getLogger().setLevel(self.log_level)\n\n        handlers = self._create_request_handlers()\n\n        # Instantiate the configured authorizer class\n        self.log.info(f\"Using authorizer: {self.authorizer_class}\")\n        authorizer = self.authorizer_class(parent=self, log=self.log)\n\n        self.web_app = web.Application(\n            handlers=handlers,\n            kernel_manager=self.kernel_manager,\n            session_manager=self.session_manager,\n            contents_manager=self.contents_manager,\n            kernel_spec_manager=self.kernel_spec_manager,\n            kernel_spec_cache=self.kernel_spec_cache,\n            eg_auth_token=self.auth_token,\n            eg_allow_credentials=self.allow_credentials,\n            eg_allow_headers=self.allow_headers,\n            eg_allow_methods=self.allow_methods,\n            eg_allow_origin=self.allow_origin,\n            eg_expose_headers=self.expose_headers,\n            eg_max_age=self.max_age,\n            eg_max_kernels=self.max_kernels,\n            eg_inherited_envs=self.inherited_envs,\n            eg_client_envs=self.client_envs,\n            eg_kernel_headers=self.kernel_headers,\n            eg_list_kernels=self.list_kernels,\n            eg_authorized_users=self.authorized_users,\n            eg_unauthorized_users=self.unauthorized_users,\n            # Also set the allow_origin setting used by jupyter_server so that the\n            # check_origin method used everywhere respects the value\n            allow_origin=self.allow_origin,\n            # Set base_url for use in request handlers\n            base_url=self.base_url,\n            # Always allow remote access (has been limited to localhost >= notebook 5.6)\n            allow_remote_access=True,\n            # setting ws_ping_interval value that can allow it to be modified for the purpose of toggling ping mechanism\n            # for zmq web-sockets or increasing/decreasing web socket ping interval/timeouts.\n            ws_ping_interval=self.ws_ping_interval * 1000,\n            # Use configurable authorizer\n            authorizer=authorizer,\n        )\n\n    def _build_ssl_options(self) -> Optional[ssl.SSLContext]:\n        \"\"\"Build an SSLContext for the tornado HTTP server.\"\"\"\n        if not any((self.certfile, self.keyfile, self.client_ca)):\n            # None indicates no SSL config\n            return None\n\n        ssl_context = ssl.SSLContext(protocol=self.ssl_version or self.ssl_version_default_value)\n        if self.certfile:\n            ssl_context.load_cert_chain(certfile=self.certfile, keyfile=self.keyfile)\n        if self.client_ca:\n            ssl_context.load_verify_locations(cafile=self.client_ca)\n            ssl_context.verify_mode = ssl.CERT_REQUIRED\n\n        return ssl_context\n\n    def init_http_server(self) -> None:\n        \"\"\"Initializes an HTTP server for the Tornado web application on the\n        configured interface and port.\n\n        Tries to find an open port if the one configured is not available using\n        the same logic as the Jupyter Notebook server.\n        \"\"\"\n        ssl_options = self._build_ssl_options()\n        self.http_server = httpserver.HTTPServer(\n            self.web_app, xheaders=self.trust_xheaders, ssl_options=ssl_options\n        )\n\n        for port in random_ports(self.port, self.port_retries + 1):\n            try:\n                self.http_server.listen(port, self.ip)\n            except OSError as e:\n                if e.errno == errno.EADDRINUSE:\n                    self.log.info(\"The port %i is already in use, trying another port.\" % port)\n                    continue\n                elif e.errno in (errno.EACCES, getattr(errno, \"WSAEACCES\", errno.EACCES)):\n                    self.log.warning(\"Permission to listen on port %i denied\" % port)\n                    continue\n                else:\n                    raise\n            else:\n                self.port = port\n                break\n        else:\n            self.log.critical(\n                \"ERROR: the gateway server could not be started because \"\n                \"no available port could be found.\"\n            )\n            self.exit(1)\n\n    def start(self) -> None:\n        \"\"\"Starts an IO loop for the application.\"\"\"\n\n        super().start()\n\n        self.log.info(\n            \"Jupyter Enterprise Gateway {} is available at http{}://{}:{}\".format(\n                EnterpriseGatewayApp.version, \"s\" if self.keyfile else \"\", self.ip, self.port\n            )\n        )\n        # If impersonation is enabled, issue a warning message if the gateway user is not in unauthorized_users.\n        if self.impersonation_enabled:\n            gateway_user = getpass.getuser()\n            if gateway_user.lower() not in self.unauthorized_users:\n                self.log.warning(\n                    \"Impersonation is enabled and gateway user '{}' is NOT specified in the set of \"\n                    \"unauthorized users!  Kernels may execute as that user with elevated privileges.\".format(\n                        gateway_user\n                    )\n                )\n\n        self.io_loop = ioloop.IOLoop.current()\n\n        if sys.platform != \"win32\":\n            signal.signal(signal.SIGHUP, signal.SIG_IGN)\n\n        signal.signal(signal.SIGTERM, self._signal_stop)\n\n        try:\n            self.io_loop.start()\n        except KeyboardInterrupt:\n            self.log.info(\"Interrupted...\")\n            # Ignore further interrupts (ctrl-c)\n            signal.signal(signal.SIGINT, signal.SIG_IGN)\n        finally:\n            self.shutdown()\n\n    def shutdown(self) -> None:\n        \"\"\"Shuts down all running kernels.\"\"\"\n        self.log.info(\"Jupyter Enterprise Gateway is shutting down all running kernels\")\n        kids = self.kernel_manager.list_kernel_ids()\n        for kid in kids:\n            try:\n                asyncio.get_event_loop().run_until_complete(\n                    self.kernel_manager.shutdown_kernel(kid, now=True)\n                )\n            except Exception as ex:\n                self.log.warning(f\"Failed to shut down kernel {kid}: {ex}\")\n        self.log.info(\"Shut down complete\")\n\n    def stop(self) -> None:\n        \"\"\"\n        Stops the HTTP server and IO loop associated with the application.\n        \"\"\"\n\n        def _stop():\n            self.http_server.stop()\n            self.io_loop.stop()\n\n        self.io_loop.add_callback(_stop)\n\n    def _signal_stop(self, sig, frame) -> None:\n        self.log.info(\"Received signal to terminate Enterprise Gateway.\")\n        self.io_loop.add_callback_from_signal(self.io_loop.stop)\n\n    _last_config_update = int(time.time())\n    _dynamic_configurables: ClassVar = {}\n\n    def update_dynamic_configurables(self) -> bool:\n        \"\"\"\n        Called periodically, this checks the set of loaded configuration files for updates.\n        If updates have been detected, reload the configuration files and update the list of\n        configurables participating in dynamic updates.\n        :return: True if updates were taken\n        \"\"\"\n        updated = False\n        configs = []\n        for file in self.loaded_config_files:\n            mod_time = int(os.path.getmtime(file))\n            if mod_time > self._last_config_update:\n                self.log.debug(f\"Config file was updated: {file}!\")\n                self._last_config_update = mod_time\n                updated = True\n\n        if updated:\n            # If config changes are present, reload the config files.  This will also update\n            # the Application's configuration, then update the config of each configurable\n            # from the newly loaded values.\n\n            self.load_config_file(self)\n\n            for config_name, configurable in self._dynamic_configurables.items():\n                # Since Application.load_config_file calls update_config on the Application, skip\n                # the configurable registered with self (i.e., the application).\n                if configurable is not self:\n                    configurable.update_config(self.config)\n                configs.append(config_name)\n\n            self.log.info(\n                \"Configuration file changes detected.  Instances for the following \"\n                f\"configurables have been updated: {configs}\"\n            )\n        return updated\n\n    def add_dynamic_configurable(self, config_name: str, configurable: Configurable) -> None:\n        \"\"\"\n        Adds the configurable instance associated with the given name to the list of Configurables\n        that can have their configurations updated when configuration file updates are detected.\n        :param config_name: the name of the config within this application\n        :param configurable: the configurable instance corresponding to that config\n        \"\"\"\n        if not isinstance(configurable, Configurable):\n            msg = f\"'{configurable}' is not a subclass of Configurable!\"\n            raise RuntimeError(msg)\n\n        self._dynamic_configurables[config_name] = weakref.proxy(configurable)\n\n    def init_dynamic_configs(self) -> None:\n        \"\"\"\n        Initialize the set of configurables that should participate in dynamic updates.  We should\n        also log that we're performing dynamic configuration updates, along with the list of CLI\n        options - that are not privy to dynamic updates.\n        :return:\n        \"\"\"\n        if self.dynamic_config_interval > 0:\n            self.add_dynamic_configurable(\"EnterpriseGatewayApp\", self)\n            self.add_dynamic_configurable(\"MappingKernelManager\", self.kernel_manager)\n            self.add_dynamic_configurable(\"KernelSpecManager\", self.kernel_spec_manager)\n            self.add_dynamic_configurable(\"KernelSessionManager\", self.kernel_session_manager)\n\n            self.log.info(\n                \"Dynamic updates have been configured.  Checking every {} seconds.\".format(\n                    self.dynamic_config_interval\n                )\n            )\n\n            self.log.info(\n                \"The following configuration options will not be subject to dynamic updates \"\n                \"(configured via CLI):\"\n            )\n            for config, options in self.cli_config.items():\n                for option, value in options.items():\n                    self.log.info(f\"    '{config}.{option}': '{value}'\")\n\n            if self.dynamic_config_poller is None:\n                self.dynamic_config_poller = ioloop.PeriodicCallback(\n                    self.update_dynamic_configurables, self.dynamic_config_interval * 1000\n                )\n            self.dynamic_config_poller.start()\n\n\nlaunch_instance = EnterpriseGatewayApp.launch_instance\n"
  },
  {
    "path": "enterprise_gateway/itests/__init__.py",
    "content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom tornado import ioloop\n\n\ndef teardown():\n    \"\"\"The test fixture appears to leak something on certain platforms that\n    endlessly tries an async socket connect and fails after the tests end.\n    As a stopgap, force a cleanup here.\n    \"\"\"\n    ioloop.IOLoop.current().stop()\n    ioloop.IOLoop.current().close(True)\n"
  },
  {
    "path": "enterprise_gateway/itests/kernels/authorization_test/kernel.json",
    "content": "{\n  \"display_name\": \"Authorization Testing\",\n  \"language\": \"python\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.processproxy.LocalProcessProxy\",\n      \"config\": {\n        \"authorized_users\": \"bob,alice,bad_guy\",\n        \"unauthorized_users\": \"bad_guy\"\n      }\n    }\n  },\n  \"env\": {},\n  \"argv\": [\"python\", \"-m\", \"ipykernel_launcher\", \"-f\", \"{connection_file}\"]\n}\n"
  },
  {
    "path": "enterprise_gateway/itests/test_authorization.py",
    "content": "import os\nimport unittest\n\nfrom enterprise_gateway.client.gateway_client import GatewayClient\n\n\nclass TestAuthorization(unittest.TestCase):\n    KERNELSPEC = os.getenv(\"AUTHORIZATION_KERNEL_NAME\", \"authorization_test\")\n\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n\n        # initialize environment\n        cls.gateway_client = GatewayClient()\n\n    def setUp(self):\n        pass\n\n    def tearDown(self):\n        pass\n\n    def test_authorized_users(self):\n        kernel = None\n        try:\n            kernel = self.gateway_client.start_kernel(TestAuthorization.KERNELSPEC, username=\"bob\")\n            result, has_error = kernel.execute(\"print('The cow jumped over the moon.')\")\n            self.assertEqual(result, \"The cow jumped over the moon.\\n\")\n            self.assertEqual(has_error, False)\n        finally:\n            if kernel:\n                self.gateway_client.shutdown_kernel(kernel)\n\n    def test_unauthorized_users(self):\n        kernel = None\n        try:\n            kernel = self.gateway_client.start_kernel(\n                TestAuthorization.KERNELSPEC, username=\"bad_guy\"\n            )\n            self.assertTrue(False, msg=\"Unauthorization exception expected!\")\n        except Exception as be:\n            self.assertRegex(be.args[0], \"403\")\n        finally:\n            if kernel:\n                self.gateway_client.shutdown_kernel(kernel)\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "enterprise_gateway/itests/test_base.py",
    "content": "import os\n\nexpected_hostname = os.getenv(\"ITEST_HOSTNAME_PREFIX\", \"\") + \"*\"  # use ${KERNEL_USERNAME} on k8s\nexpected_application_id = os.getenv(\n    \"EXPECTED_APPLICATION_ID\", \"application_*\"\n)  # use 'spark-application-*' on k8s\nexpected_spark_version = os.getenv(\"EXPECTED_SPARK_VERSION\", \"3.2.*\")  # use '2.4.*' on k8s\nexpected_spark_master = os.getenv(\"EXPECTED_SPARK_MASTER\", \"yarn\")  # use 'k8s:*' on k8s\nexpected_deploy_mode = os.getenv(\"EXPECTED_DEPLOY_MODE\", \"(cluster|client)\")  # use 'client' on k8s\n\n\nclass TestBase:\n    def get_expected_application_id(self):\n        return expected_application_id\n\n    def get_expected_spark_version(self):\n        return expected_spark_version\n\n    def get_expected_spark_master(self):\n        return expected_spark_master\n\n    def get_expected_deploy_mode(self):\n        return expected_deploy_mode\n\n    def get_expected_hostname(self):\n        return expected_hostname\n"
  },
  {
    "path": "enterprise_gateway/itests/test_python_kernel.py",
    "content": "import os\nimport unittest\n\nfrom enterprise_gateway.client.gateway_client import GatewayClient\n\nfrom .test_base import TestBase\n\n\nclass PythonKernelBaseTestCase(TestBase):\n    \"\"\"\n    Python related test cases common to vanilla IPython kernels\n    \"\"\"\n\n    def test_get_hostname(self):\n        result, has_error = self.kernel.execute(\n            \"import subprocess; subprocess.check_output(['hostname'])\"\n        )\n        self.assertEqual(has_error, False)\n        self.assertRegex(result, self.get_expected_hostname())\n\n    def test_hello_world(self):\n        result, has_error = self.kernel.execute(\"print('Hello World')\")\n        self.assertEqual(has_error, False)\n        self.assertRegex(result, \"Hello World\")\n\n    def test_restart(self):\n        # 1. Set a variable to a known value.\n        # 2. Restart the kernel\n        # 3. Attempt to increment the variable, verify an error was received (due to undefined variable)\n\n        self.kernel.execute(\"x = 123\")\n        original_value, has_error = self.kernel.execute(\"print(x)\")\n        self.assertEqual(int(original_value), 123)\n        self.assertEqual(has_error, False)\n\n        self.assertTrue(self.kernel.restart())\n\n        error_result, has_error = self.kernel.execute(\"y = x + 1\")\n        self.assertRegex(error_result, \"NameError\")\n        self.assertEqual(has_error, True)\n\n    def test_interrupt(self):\n        # 1. Set a variable to a known value.\n        # 2. Spawn a thread that will perform an interrupt after some number of seconds,\n        # 3. Issue a long-running command - that spans during of interrupt thread wait time,\n        # 4. Interrupt the kernel,\n        # 5. Attempt to increment the variable, verify expected result.\n\n        self.kernel.execute(\"x = 123\")\n        original_value, has_error = self.kernel.execute(\"print(x)\")\n        self.assertEqual(int(original_value), 123)\n        self.assertEqual(has_error, False)\n\n        # Start a thread that performs the interrupt.  This thread must wait long enough to issue\n        # the next cell execution.\n        self.kernel.start_interrupt_thread()\n\n        # Build the code list to interrupt, in this case, its a sleep call.\n        interrupted_code = []\n        interrupted_code.append(\"import time\\n\")\n        interrupted_code.append(\"print('begin')\\n\")\n        interrupted_code.append(\"time.sleep(60)\\n\")\n        interrupted_code.append(\"print('end')\\n\")\n\n        interrupted_result, has_error = self.kernel.execute(interrupted_code)\n\n        # Ensure the result indicates an interrupt occurred\n        self.assertRegex(interrupted_result, \"KeyboardInterrupt\")\n        self.assertEqual(has_error, True)\n\n        # Wait for thread to terminate - should be terminated already\n        self.kernel.terminate_interrupt_thread()\n\n        # Increment the pre-interrupt variable and ensure its value is correct\n        self.kernel.execute(\"y = x + 1\")\n        interrupted_value, has_error = self.kernel.execute(\"print(y)\")\n        self.assertEqual(int(interrupted_value), 124)\n        self.assertEqual(has_error, False)\n\n    def test_scope(self):\n        # Ensure global variable is accessible in function.\n        # See https://github.com/jupyter-server/enterprise_gateway/issues/687\n        # Build the example code...\n        scope_code = []\n        scope_code.append(\"a = 42\\n\")\n        scope_code.append(\"def scope():\\n\")\n        scope_code.append(\"    return a\\n\")\n        scope_code.append(\"\\n\")\n        scope_code.append(\"scope()\\n\")\n        result, has_error = self.kernel.execute(scope_code)\n        self.assertEqual(result, str(42))\n        self.assertEqual(has_error, False)\n\n\nclass PythonKernelBaseSparkTestCase(PythonKernelBaseTestCase):\n    \"\"\"\n    Python related tests cases common to Spark on Yarn\n    \"\"\"\n\n    def test_get_application_id(self):\n        result, has_error = self.kernel.execute(\"sc.getConf().get('spark.app.id')\")\n        self.assertRegex(result, self.get_expected_application_id())\n        self.assertEqual(has_error, False)\n\n    def test_get_deploy_mode(self):\n        result, has_error = self.kernel.execute(\"sc.getConf().get('spark.submit.deployMode')\")\n        self.assertRegex(result, self.get_expected_deploy_mode())\n        self.assertEqual(has_error, False)\n\n    def test_get_resource_manager(self):\n        result, has_error = self.kernel.execute(\"sc.getConf().get('spark.master')\")\n        self.assertRegex(result, self.get_expected_spark_master())\n        self.assertEqual(has_error, False)\n\n    def test_get_spark_version(self):\n        result, has_error = self.kernel.execute(\"sc.version\")\n        self.assertRegex(result, self.get_expected_spark_version())\n        self.assertEqual(has_error, False)\n\n    @unittest.skip(\"Temporarily disabled\")\n    def test_run_pi_example(self):\n        # Build the example code...\n        pi_code = []\n        pi_code.append(\"from random import random\\n\")\n        pi_code.append(\"from operator import add\\n\")\n        pi_code.append(\"partitions = 20\\n\")\n        pi_code.append(\"n = 100000 * partitions\\n\")\n        pi_code.append(\"def f(_):\\n\")\n        pi_code.append(\"    x = random() * 2 - 1\\n\")\n        pi_code.append(\"    y = random() * 2 - 1\\n\")\n        pi_code.append(\"    return 1 if x ** 2 + y ** 2 <= 1 else 0\\n\")\n        pi_code.append(\"count = sc.parallelize(range(1, n + 1), partitions).map(f).reduce(add)\\n\")\n        pi_code.append('print(\"Pi is roughly %f\" % (4.0 * count / n))\\n')\n        result, has_error = self.kernel.execute(pi_code)\n        self.assertRegex(result, \"Pi is roughly 3.14*\")\n        self.assertEqual(has_error, False)\n\n\nclass TestPythonKernelLocal(unittest.TestCase, PythonKernelBaseTestCase):\n    KERNELSPEC = os.getenv(\"PYTHON_KERNEL_LOCAL_NAME\", \"python3\")\n\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n        print(f\"\\nStarting Python kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # initialize environment\n        cls.gatewayClient = GatewayClient()\n        cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC)\n\n    @classmethod\n    def tearDownClass(cls):\n        super().tearDownClass()\n        print(f\"\\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # shutdown environment\n        cls.gatewayClient.shutdown_kernel(cls.kernel)\n\n\nclass TestPythonKernelDistributed(unittest.TestCase, PythonKernelBaseTestCase):\n    KERNELSPEC = os.getenv(\n        \"PYTHON_KERNEL_DISTRIBUTED_NAME\", \"python_distributed\"\n    )  # python_kubernetes for k8s\n\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n        print(f\"\\nStarting Python kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # initialize environment\n        cls.gatewayClient = GatewayClient()\n        cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC)\n\n    @classmethod\n    def tearDownClass(cls):\n        super().tearDownClass()\n        print(f\"\\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # shutdown environment\n        cls.gatewayClient.shutdown_kernel(cls.kernel)\n\n\nclass TestPythonKernelClient(unittest.TestCase, PythonKernelBaseSparkTestCase):\n    KERNELSPEC = os.getenv(\n        \"PYTHON_KERNEL_CLIENT_NAME\", \"spark_python_yarn_client\"\n    )  # spark_python_kubernetes for k8s\n\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n        print(f\"\\nStarting Python kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # initialize environment\n        cls.gatewayClient = GatewayClient()\n        cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC)\n\n    @classmethod\n    def tearDownClass(cls):\n        super().tearDownClass()\n        print(f\"\\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # shutdown environment\n        cls.gatewayClient.shutdown_kernel(cls.kernel)\n\n\nclass TestPythonKernelCluster(unittest.TestCase, PythonKernelBaseSparkTestCase):\n    KERNELSPEC = os.getenv(\n        \"PYTHON_KERNEL_CLUSTER_NAME\", \"spark_python_yarn_cluster\"\n    )  # spark_python_kubernetes for k8s\n\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n        print(f\"\\nStarting Python kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # initialize environment\n        cls.gatewayClient = GatewayClient()\n        cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC)\n\n    @classmethod\n    def tearDownClass(cls):\n        super().tearDownClass()\n        print(f\"\\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # shutdown environment\n        cls.gatewayClient.shutdown_kernel(cls.kernel)\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "enterprise_gateway/itests/test_r_kernel.py",
    "content": "import os\nimport unittest\n\nfrom enterprise_gateway.client.gateway_client import GatewayClient\n\nfrom .test_base import TestBase\n\n\nclass RKernelBaseTestCase(TestBase):\n    \"\"\"\n    R related test cases common to vanilla IRKernel kernels\n    \"\"\"\n\n    def test_get_hostname(self):\n        result, has_error = self.kernel.execute('system(\"hostname\", intern=TRUE)')\n        self.assertRegex(result, self.get_expected_hostname())\n        self.assertEqual(has_error, False)\n\n    def test_hello_world(self):\n        result, has_error = self.kernel.execute('print(\"Hello World\", quote = FALSE)')\n        self.assertRegex(result, \"Hello World\")\n        self.assertEqual(has_error, False)\n\n    def test_restart(self):\n        # 1. Set a variable to a known value.\n        # 2. Restart the kernel\n        # 3. Attempt to increment the variable, verify an error was received (due to undefined variable)\n\n        self.kernel.execute(\"x = 123\")\n        original_value, has_error = self.kernel.execute(\"write(x,stdout())\")\n        self.assertEqual(int(original_value), 123)\n        self.assertEqual(has_error, False)\n\n        self.assertTrue(self.kernel.restart())\n\n        error_result, has_error = self.kernel.execute(\"y = x + 1\")\n        self.assertRegex(error_result, \"Error in eval\")\n        self.assertEqual(has_error, True)\n\n    def test_interrupt(self):\n        # 1. Set a variable to a known value.\n        # 2. Spawn a thread that will perform an interrupt after some number of seconds,\n        # 3. Issue a long-running command - that spans during of interrupt thread wait time,\n        # 4. Interrupt the kernel,\n        # 5. Attempt to increment the variable, verify expected result.\n\n        self.kernel.execute(\"x = 123\")\n        original_value, has_error = self.kernel.execute(\"write(x,stdout())\")\n        self.assertEqual(int(original_value), 123)\n        self.assertEqual(has_error, False)\n\n        # Start a thread that performs the interrupt.  This thread must wait long enough to issue\n        # the next cell execution.\n        self.kernel.start_interrupt_thread()\n\n        # Build the code list to interrupt, in this case, its a sleep call.\n        interrupted_code = []\n        interrupted_code.append('write(\"begin\",stdout())\\n')\n        interrupted_code.append(\"Sys.sleep(30)\\n\")\n        interrupted_code.append('write(\"end\",stdout())\\n')\n        interrupted_result, has_error = self.kernel.execute(interrupted_code)\n\n        # Ensure the result indicates an interrupt occurred\n        self.assertEqual(interrupted_result.strip(), \"begin\")\n        self.assertEqual(has_error, False)\n\n        # Wait for thread to terminate - should be terminated already\n        self.kernel.terminate_interrupt_thread()\n\n        # Increment the pre-interrupt variable and ensure its value is correct\n        self.kernel.execute(\"y = x + 1\")\n        interrupted_value, has_error = self.kernel.execute(\"write(y,stdout())\")\n        self.assertEqual(int(interrupted_value), 124)\n        self.assertEqual(has_error, False)\n\n\nclass RKernelBaseSparkTestCase(RKernelBaseTestCase):\n    \"\"\"\n    R related tests cases common to Spark on Yarn\n    \"\"\"\n\n    def test_get_application_id(self):\n        result, has_error = self.kernel.execute(\n            'SparkR:::callJMethod(SparkR:::callJMethod(sc, \"sc\"), \"applicationId\")'\n        )\n        self.assertRegex(result, self.get_expected_application_id())\n        self.assertEqual(has_error, False)\n\n    def test_get_spark_version(self):\n        result, has_error = self.kernel.execute(\"sparkR.version()\")\n        self.assertRegex(result, self.get_expected_spark_version())\n        self.assertEqual(has_error, False)\n\n    def test_get_resource_manager(self):\n        result, has_error = self.kernel.execute('unlist(sparkR.conf(\"spark.master\"))')\n        self.assertRegex(result, self.get_expected_spark_master())\n        self.assertEqual(has_error, False)\n\n    def test_get_deploy_mode(self):\n        result, has_error = self.kernel.execute('unlist(sparkR.conf(\"spark.submit.deployMode\"))')\n        self.assertRegex(result, self.get_expected_deploy_mode())\n        self.assertEqual(has_error, False)\n\n\nclass TestRKernelLocal(unittest.TestCase, RKernelBaseTestCase):\n    KERNELSPEC = os.getenv(\"R_KERNEL_LOCAL_NAME\", \"ir\")  # R_kubernetes for k8s\n\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n        print(f\"\\nStarting R kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # initialize environment\n        cls.gatewayClient = GatewayClient()\n        cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC)\n\n    @classmethod\n    def tearDownClass(cls):\n        super().tearDownClass()\n        print(f\"\\nShutting down R kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # shutdown environment\n        cls.gatewayClient.shutdown_kernel(cls.kernel)\n\n\nclass TestRKernelClient(unittest.TestCase, RKernelBaseSparkTestCase):\n    KERNELSPEC = os.getenv(\n        \"R_KERNEL_CLIENT_NAME\", \"spark_R_yarn_client\"\n    )  # spark_R_kubernetes for k8s\n\n    @classmethod\n    def setUpClass(cls):\n        print(f\"\\nStarting R kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # initialize environment\n        cls.gatewayClient = GatewayClient()\n        cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC)\n\n    @classmethod\n    def tearDownClass(cls):\n        super().tearDownClass()\n        print(f\"\\nShutting down R kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # shutdown environment\n        cls.gatewayClient.shutdown_kernel(cls.kernel)\n\n\nclass TestRKernelCluster(unittest.TestCase, RKernelBaseSparkTestCase):\n    KERNELSPEC = os.getenv(\n        \"R_KERNEL_CLUSTER_NAME\", \"spark_R_yarn_cluster\"\n    )  # spark_R_kubernetes for k8s\n\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n        print(f\"\\nStarting R kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # initialize environment\n        cls.gatewayClient = GatewayClient()\n        cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC)\n\n    @classmethod\n    def tearDownClass(cls):\n        super().tearDownClass()\n        print(f\"\\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # shutdown environment\n        cls.gatewayClient.shutdown_kernel(cls.kernel)\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "enterprise_gateway/itests/test_scala_kernel.py",
    "content": "import os\nimport unittest\n\nfrom enterprise_gateway.client.gateway_client import GatewayClient\n\nfrom .test_base import TestBase\n\n\nclass ScalaKernelBaseTestCase(TestBase):\n    \"\"\"\n    Scala related test cases common to vanilla Scala kernels\n    \"\"\"\n\n    def test_get_hostname(self):\n        result, has_error = self.kernel.execute(\n            \"import java.net._; \\\n                                      val localhost: InetAddress = InetAddress.getLocalHost; \\\n                                      val localIpAddress: String = localhost.getHostName\"\n        )\n        self.assertRegex(result, self.get_expected_hostname())\n        self.assertEqual(has_error, False)\n\n    def test_hello_world(self):\n        result, has_error = self.kernel.execute('println(\"Hello World\")')\n        self.assertRegex(result, \"Hello World\")\n        self.assertEqual(has_error, False)\n\n    def test_restart(self):\n        # 1. Set a variable to a known value.\n        # 2. Restart the kernel\n        # 3. Attempt to increment the variable, verify an error was received (due to undefined variable)\n\n        self.kernel.execute(\"var x = 123\")\n        original_value, has_error = self.kernel.execute(\"x\")\n        self.assertEqual(int(original_value), 123)\n        self.assertEqual(has_error, False)\n\n        self.assertTrue(self.kernel.restart())\n\n        error_result, has_error = self.kernel.execute(\"var y = x + 1\")\n        self.assertRegex(error_result, \"not found: value x\")\n        self.assertEqual(has_error, True)\n\n    def test_interrupt(self):\n        # 1. Set a variable to a known value.\n        # 2. Spawn a thread that will perform an interrupt after some number of seconds,\n        # 3. Issue a long-running command - that spans during of interrupt thread wait time,\n        # 4. Interrupt the kernel,\n        # 5. Attempt to increment the variable, verify expected result.\n\n        self.kernel.execute(\"var x = 123\")\n        original_value, has_error = self.kernel.execute(\"x\")\n        self.assertEqual(int(original_value), 123)\n        self.assertEqual(has_error, False)\n\n        # Start a thread that performs the interrupt.  This thread must wait long enough to issue\n        # the next cell execution.\n        self.kernel.start_interrupt_thread()\n\n        # Build the code list to interrupt, in this case, its a sleep call.\n        interrupted_code = []\n        interrupted_code.append('println(\"begin\")\\n')\n        interrupted_code.append(\"Thread.sleep(60000)\\n\")\n        interrupted_code.append('println(\"end\")\\n')\n        interrupted_result, has_error = self.kernel.execute(interrupted_code)\n\n        # Ensure the result indicates an interrupt occurred\n        self.assertRegex(interrupted_result, \"java.lang.InterruptedException\")\n        self.assertEqual(has_error, True)\n\n        # Wait for thread to terminate - should be terminated already\n        self.kernel.terminate_interrupt_thread()\n\n        # Increment the pre-interrupt variable and ensure its value is correct\n        self.kernel.execute(\"var y = x + 1\")\n        interrupted_value, has_error = self.kernel.execute(\"y\")\n        self.assertEqual(int(interrupted_value), 124)\n        self.assertEqual(has_error, False)\n\n\nclass ScalaKernelBaseSparkTestCase(ScalaKernelBaseTestCase):\n    \"\"\"\n    Scala related tests cases common to Spark (with Yarn the default RM)\n    \"\"\"\n\n    def test_get_application_id(self):\n        result, has_error = self.kernel.execute(\"sc.applicationId\")\n        self.assertRegex(result, self.get_expected_application_id())\n        self.assertEqual(has_error, False)\n\n    def test_get_spark_version(self):\n        result, has_error = self.kernel.execute(\"sc.version\")\n        self.assertRegex(result, self.get_expected_spark_version())\n        self.assertEqual(has_error, False)\n\n    def test_get_resource_manager(self):\n        result, has_error = self.kernel.execute('sc.getConf.get(\"spark.master\")')\n        self.assertRegex(result, self.get_expected_spark_master())\n        self.assertEqual(has_error, False)\n\n    def test_get_deploy_mode(self):\n        result, has_error = self.kernel.execute('sc.getConf.get(\"spark.submit.deployMode\")')\n        self.assertRegex(result, self.get_expected_deploy_mode())\n        self.assertEqual(has_error, False)\n\n\nclass TestScalaKernelLocal(unittest.TestCase, ScalaKernelBaseTestCase):\n    SPARK_VERSION = os.getenv(\"SPARK_VERSION\")\n    DEFAULT_KERNELSPEC = f\"spark_{SPARK_VERSION}_scala\"\n    KERNELSPEC = os.getenv(\n        \"SCALA_KERNEL_LOCAL_NAME\", DEFAULT_KERNELSPEC\n    )  # scala_kubernetes for k8s\n\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n        print(f\"\\nStarting Scala kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # initialize environment\n        cls.gatewayClient = GatewayClient()\n        cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC)\n\n    @classmethod\n    def tearDownClass(cls):\n        super().tearDownClass()\n        print(f\"\\nShutting down Scala kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # shutdown environment\n        cls.gatewayClient.shutdown_kernel(cls.kernel)\n\n\nclass TestScalaKernelClient(unittest.TestCase, ScalaKernelBaseSparkTestCase):\n    KERNELSPEC = os.getenv(\n        \"SCALA_KERNEL_CLIENT_NAME\", \"spark_scala_yarn_client\"\n    )  # spark_scala_kubernetes for k8s\n\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n        print(f\"\\nStarting Scala kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # initialize environment\n        cls.gatewayClient = GatewayClient()\n        cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC)\n\n    @classmethod\n    def tearDownClass(cls):\n        super().tearDownClass()\n        print(f\"\\nShutting down Scala kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # shutdown environment\n        cls.gatewayClient.shutdown_kernel(cls.kernel)\n\n\nclass TestScalaKernelCluster(unittest.TestCase, ScalaKernelBaseSparkTestCase):\n    KERNELSPEC = os.getenv(\n        \"SCALA_KERNEL_CLUSTER_NAME\", \"spark_scala_yarn_cluster\"\n    )  # spark_scala_kubernetes for k8s\n\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n        print(f\"\\nStarting Scala kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # initialize environment\n        cls.gatewayClient = GatewayClient()\n        cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC)\n\n    @classmethod\n    def tearDownClass(cls):\n        super().tearDownClass()\n        print(f\"\\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec\")\n\n        # shutdown environment\n        cls.gatewayClient.shutdown_kernel(cls.kernel)\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "enterprise_gateway/mixins.py",
    "content": "\"\"\"Mixins for Tornado handlers.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n\nimport json\nimport os\nimport ssl\nimport traceback\nfrom distutils.util import strtobool\nfrom http.client import responses\nfrom typing import Any, Awaitable, ClassVar, Dict, List, Optional, Set\n\nfrom tornado import web\nfrom tornado.log import LogFormatter\nfrom traitlets import (\n    Bool,\n    CaselessStrEnum,\n    CBool,\n    Instance,\n    Integer,\n    TraitError,\n    Type,\n    Unicode,\n    default,\n    observe,\n    validate,\n)\nfrom traitlets import List as ListTrait\nfrom traitlets import Set as SetTrait\nfrom traitlets.config import Configurable\n\n\nclass CORSMixin:\n    \"\"\"\n    Mixes CORS headers into tornado.web.RequestHandlers.\n    \"\"\"\n\n    SETTINGS_TO_HEADERS: ClassVar = {\n        \"eg_allow_credentials\": \"Access-Control-Allow-Credentials\",\n        \"eg_allow_headers\": \"Access-Control-Allow-Headers\",\n        \"eg_allow_methods\": \"Access-Control-Allow-Methods\",\n        \"eg_allow_origin\": \"Access-Control-Allow-Origin\",\n        \"eg_expose_headers\": \"Access-Control-Expose-Headers\",\n        \"eg_max_age\": \"Access-Control-Max-Age\",\n    }\n\n    def set_default_headers(self) -> None:\n        \"\"\"\n        Sets the CORS headers as the default for all responses.\n\n        Disables CSP configured by the notebook package. It's not necessary\n        for a programmatic API.\n        \"\"\"\n        super().set_default_headers()\n        # Add CORS headers after default if they have a non-blank value\n        for settings_name, header_name in self.SETTINGS_TO_HEADERS.items():\n            header_value = self.settings.get(settings_name)\n            if header_value:\n                self.set_header(header_name, header_value)\n\n        # Don't set CSP: we're not serving frontend media types, only JSON\n        self.clear_header(\"Content-Security-Policy\")\n\n    def options(self) -> None:\n        \"\"\"\n        Override the notebook implementation to return the headers\n        configured in `set_default_headers instead of the hardcoded set\n        supported by the handler base class in the notebook project.\n        \"\"\"\n        self.finish()\n\n\nclass TokenAuthorizationMixin:\n    \"\"\"Mixes token auth into tornado.web.RequestHandlers and\n    tornado.websocket.WebsocketHandlers.\n    \"\"\"\n\n    header_prefix = \"token \"\n    header_prefix_len = len(header_prefix)\n\n    def prepare(self) -> Optional[Awaitable[None]]:\n        \"\"\"Ensures the correct auth token is present, either as a parameter\n        `token=<value>` or as a header `Authorization: token <value>`.\n        Does nothing unless an auth token is configured in eg_auth_token.\n\n        If eg_auth_token is set and the token is not present, responds\n        with 401 Unauthorized.\n\n        Notes\n        -----\n        Implemented in prepare rather than in `get_user` to avoid interaction\n        with the `@web.authenticated` decorated methods in the notebook\n        package.\n        \"\"\"\n        server_token = self.settings.get(\"eg_auth_token\")\n        if server_token and self.request.method != \"OPTIONS\":\n            client_token = self.get_argument(\"token\", None)\n            if client_token is None:\n                client_token = self.request.headers.get(\"Authorization\")\n                if client_token and client_token.startswith(self.header_prefix):\n                    client_token = client_token[self.header_prefix_len :]\n                else:\n                    client_token = None\n            if client_token != server_token:\n                return self.send_error(401)\n        return super().prepare()\n\n\nclass JSONErrorsMixin:\n    \"\"\"Mixes `write_error` into tornado.web.RequestHandlers to respond with\n    JSON format errors.\n    \"\"\"\n\n    def write_error(self, status_code: int, **kwargs) -> None:\n        \"\"\"Responds with an application/json error object.\n\n        Overrides the APIHandler.write_error in the notebook server until it\n        properly sets the 'reason' field.\n\n        Parameters\n        ----------\n        status_code\n            HTTP status code to set\n        **kwargs\n            Arbitrary keyword args. Only uses `exc_info[1]`, if it exists,\n            to get a `log_message`, `args`, and `reason` from a raised\n            exception that triggered this method\n\n        Examples\n        --------\n        {\"401\", reason=\"Unauthorized\", message=\"Invalid auth token\"}\n        \"\"\"\n        exc_info = kwargs.get(\"exc_info\")\n        message = \"\"\n        reason = responses.get(status_code, \"Unknown HTTP Error\")\n        reply = {\n            \"reason\": reason,\n            \"message\": message,\n        }\n        if exc_info:\n            exception = exc_info[1]\n            # Get the custom message, if defined\n            if isinstance(exception, web.HTTPError):\n                reply[\"message\"] = exception.log_message or message\n            else:\n                reply[\"message\"] = \"Unknown server error\"\n                reply[\"traceback\"] = \"\".join(traceback.format_exception(*exc_info))\n\n            # Construct the custom reason, if defined\n            custom_reason = getattr(exception, \"reason\", \"\")\n            if custom_reason:\n                reply[\"reason\"] = custom_reason\n\n        self.set_header(\"Content-Type\", \"application/json\")\n        self.set_status(status_code, reason=reply[\"reason\"])\n        self.finish(json.dumps(reply))\n\n\nclass EnterpriseGatewayConfigMixin(Configurable):\n    \"\"\"A mixin for enterprise gateway config.\"\"\"\n\n    # Server IP / PORT binding\n    port_env = \"EG_PORT\"\n    port_default_value = 8888\n    port = Integer(\n        port_default_value, config=True, help=\"Port on which to listen (EG_PORT env var)\"\n    )\n\n    @default(\"port\")\n    def _port_default(self) -> int:\n        return int(os.getenv(self.port_env, os.getenv(\"KG_PORT\", self.port_default_value)))\n\n    port_retries_env = \"EG_PORT_RETRIES\"\n    port_retries_default_value = 50\n    port_retries = Integer(\n        port_retries_default_value,\n        config=True,\n        help=\"\"\"Number of ports to try if the specified port is not available\n                           (EG_PORT_RETRIES env var)\"\"\",\n    )\n\n    @default(\"port_retries\")\n    def _port_retries_default(self) -> int:\n        return int(\n            os.getenv(\n                self.port_retries_env, os.getenv(\"KG_PORT_RETRIES\", self.port_retries_default_value)\n            )\n        )\n\n    ip_env = \"EG_IP\"\n    ip_default_value = \"127.0.0.1\"\n    ip = Unicode(\n        ip_default_value, config=True, help=\"IP address on which to listen (EG_IP env var)\"\n    )\n\n    @default(\"ip\")\n    def _ip_default(self) -> str:\n        return os.getenv(self.ip_env, os.getenv(\"KG_IP\", self.ip_default_value))\n\n    # Base URL\n    base_url_env = \"EG_BASE_URL\"\n    base_url_default_value = \"/\"\n    base_url = Unicode(\n        base_url_default_value,\n        config=True,\n        help=\"The base path for mounting all API resources (EG_BASE_URL env var)\",\n    )\n\n    @default(\"base_url\")\n    def _base_url_default(self) -> str:\n        return os.getenv(self.base_url_env, os.getenv(\"KG_BASE_URL\", self.base_url_default_value))\n\n    # Token authorization\n    auth_token_env = \"EG_AUTH_TOKEN\"  # noqa\n    auth_token = Unicode(\n        config=True, help=\"Authorization token required for all requests (EG_AUTH_TOKEN env var)\"\n    )\n\n    @default(\"auth_token\")\n    def _auth_token_default(self) -> str:\n        return os.getenv(self.auth_token_env, os.getenv(\"KG_AUTH_TOKEN\", \"\"))\n\n    # Begin CORS headers\n    allow_credentials_env = \"EG_ALLOW_CREDENTIALS\"\n    allow_credentials = Unicode(\n        config=True,\n        help=\"Sets the Access-Control-Allow-Credentials header. (EG_ALLOW_CREDENTIALS env var)\",\n    )\n\n    @default(\"allow_credentials\")\n    def _allow_credentials_default(self) -> str:\n        return os.getenv(self.allow_credentials_env, os.getenv(\"KG_ALLOW_CREDENTIALS\", \"\"))\n\n    allow_headers_env = \"EG_ALLOW_HEADERS\"\n    allow_headers = Unicode(\n        config=True, help=\"Sets the Access-Control-Allow-Headers header. (EG_ALLOW_HEADERS env var)\"\n    )\n\n    @default(\"allow_headers\")\n    def _allow_headers_default(self) -> str:\n        return os.getenv(self.allow_headers_env, os.getenv(\"KG_ALLOW_HEADERS\", \"\"))\n\n    allow_methods_env = \"EG_ALLOW_METHODS\"\n    allow_methods = Unicode(\n        config=True, help=\"Sets the Access-Control-Allow-Methods header. (EG_ALLOW_METHODS env var)\"\n    )\n\n    @default(\"allow_methods\")\n    def _allow_methods_default(self) -> str:\n        return os.getenv(self.allow_methods_env, os.getenv(\"KG_ALLOW_METHODS\", \"\"))\n\n    allow_origin_env = \"EG_ALLOW_ORIGIN\"\n    allow_origin = Unicode(\n        config=True, help=\"Sets the Access-Control-Allow-Origin header. (EG_ALLOW_ORIGIN env var)\"\n    )\n\n    @default(\"allow_origin\")\n    def _allow_origin_default(self) -> str:\n        return os.getenv(self.allow_origin_env, os.getenv(\"KG_ALLOW_ORIGIN\", \"\"))\n\n    expose_headers_env = \"EG_EXPOSE_HEADERS\"\n    expose_headers = Unicode(\n        config=True,\n        help=\"Sets the Access-Control-Expose-Headers header. (EG_EXPOSE_HEADERS env var)\",\n    )\n\n    @default(\"expose_headers\")\n    def _expose_headers_default(self) -> str:\n        return os.getenv(self.expose_headers_env, os.getenv(\"KG_EXPOSE_HEADERS\", \"\"))\n\n    trust_xheaders_env = \"EG_TRUST_XHEADERS\"\n    trust_xheaders = CBool(\n        False,\n        config=True,\n        help=\"\"\"Use x-* header values for overriding the remote-ip, useful when\n                           application is behind a proxy. (EG_TRUST_XHEADERS env var)\"\"\",\n    )\n\n    @default(\"trust_xheaders\")\n    def _trust_xheaders_default(self) -> bool:\n        return strtobool(\n            os.getenv(self.trust_xheaders_env, os.getenv(\"KG_TRUST_XHEADERS\", \"False\"))\n        )\n\n    certfile_env = \"EG_CERTFILE\"\n    certfile = Unicode(\n        None,\n        config=True,\n        allow_none=True,\n        help=\"The full path to an SSL/TLS certificate file. (EG_CERTFILE env var)\",\n    )\n\n    @default(\"certfile\")\n    def _certfile_default(self) -> Optional[str]:\n        return os.getenv(self.certfile_env, os.getenv(\"KG_CERTFILE\"))\n\n    keyfile_env = \"EG_KEYFILE\"\n    keyfile = Unicode(\n        None,\n        config=True,\n        allow_none=True,\n        help=\"The full path to a private key file for usage with SSL/TLS. (EG_KEYFILE env var)\",\n    )\n\n    @default(\"keyfile\")\n    def _keyfile_default(self) -> Optional[str]:\n        return os.getenv(self.keyfile_env, os.getenv(\"KG_KEYFILE\"))\n\n    client_ca_env = \"EG_CLIENT_CA\"\n    client_ca = Unicode(\n        None,\n        config=True,\n        allow_none=True,\n        help=\"\"\"The full path to a certificate authority certificate for SSL/TLS\n                        client authentication. (EG_CLIENT_CA env var)\"\"\",\n    )\n\n    @default(\"client_ca\")\n    def _client_ca_default(self) -> Optional[str]:\n        return os.getenv(self.client_ca_env, os.getenv(\"KG_CLIENT_CA\"))\n\n    ssl_version_env = \"EG_SSL_VERSION\"\n    ssl_version_default_value = ssl.PROTOCOL_TLSv1_2\n    ssl_version = Integer(\n        None,\n        config=True,\n        allow_none=True,\n        help=\"\"\"Sets the SSL version to use for the web socket\n                          connection. (EG_SSL_VERSION env var)\"\"\",\n    )\n\n    @default(\"ssl_version\")\n    def _ssl_version_default(self) -> Optional[int]:\n        ssl_from_env = os.getenv(self.ssl_version_env, os.getenv(\"KG_SSL_VERSION\"))\n        return ssl_from_env if ssl_from_env is None else int(ssl_from_env)\n\n    max_age_env = \"EG_MAX_AGE\"\n    max_age = Unicode(\n        config=True, help=\"Sets the Access-Control-Max-Age header. (EG_MAX_AGE env var)\"\n    )\n\n    @default(\"max_age\")\n    def _max_age_default(self) -> str:\n        return os.getenv(self.max_age_env, os.getenv(\"KG_MAX_AGE\", \"\"))\n\n    # End CORS headers\n\n    max_kernels_env = \"EG_MAX_KERNELS\"\n    max_kernels = Integer(\n        None,\n        config=True,\n        allow_none=True,\n        help=\"\"\"Limits the number of kernel instances allowed to run by this gateway.\n                          Unbounded by default. (EG_MAX_KERNELS env var)\"\"\",\n    )\n\n    @default(\"max_kernels\")\n    def _max_kernels_default(self) -> Optional[int]:\n        val = os.getenv(self.max_kernels_env, os.getenv(\"KG_MAX_KERNELS\"))\n        return val if val is None else int(val)\n\n    default_kernel_name_env = \"EG_DEFAULT_KERNEL_NAME\"\n    default_kernel_name = Unicode(\n        config=True,\n        help=\"Default kernel name when spawning a kernel (EG_DEFAULT_KERNEL_NAME env var)\",\n    )\n\n    @default(\"default_kernel_name\")\n    def _default_kernel_name_default(self) -> str:\n        # defaults to Jupyter's default kernel name on empty string\n        return os.getenv(self.default_kernel_name_env, os.getenv(\"KG_DEFAULT_KERNEL_NAME\", \"\"))\n\n    list_kernels_env = \"EG_LIST_KERNELS\"\n    list_kernels = Bool(\n        config=True,\n        help=\"\"\"Permits listing of the running kernels using API endpoints /api/kernels\n                        and /api/sessions. (EG_LIST_KERNELS env var) Note: Jupyter Notebook\n                        allows this by default but Jupyter Enterprise Gateway does not.\"\"\",\n    )\n\n    @default(\"list_kernels\")\n    def _list_kernels_default(self) -> bool:\n        return (\n            os.getenv(self.list_kernels_env, os.getenv(\"KG_LIST_KERNELS\", \"False\")).lower()\n            == \"true\"\n        )\n\n    env_whitelist = ListTrait(\n        config=True,\n        help=\"\"\"DEPRECATED, use client_envs.\"\"\",\n    )\n\n    @observe(\"env_whitelist\")\n    def _update_env_whitelist(self, change):\n        self.log.warning(\"env_whitelist is deprecated, use client_envs\")\n        self.client_envs = change[\"new\"]\n\n    client_envs_env = \"EG_CLIENT_ENVS\"\n    client_envs = ListTrait(\n        config=True,\n        help=\"\"\"Environment variables allowed to be set when a client requests a\n                               new kernel. (EG_CLIENT_ENVS env var)\"\"\",\n    )\n\n    @default(\"client_envs\")\n    def _client_envs_default(self):\n        return os.getenv(self.client_envs_env, os.getenv(\"EG_ENV_WHITELIST\", \"\")).split(\",\")\n\n    env_process_whitelist = ListTrait(\n        config=True,\n        help=\"\"\"DEPRECATED, use inherited_envs\"\"\",\n    )\n\n    @observe(\"env_process_whitelist\")\n    def _update_env_process_whitelist(self, change):\n        self.log.warning(\"env_process_whitelist is deprecated, use inherited_envs\")\n        self.inherited_envs = change[\"new\"]\n\n    inherited_envs_env = \"EG_INHERITED_ENVS\"\n    inherited_envs = ListTrait(\n        config=True,\n        help=\"\"\"Environment variables allowed to be inherited\n                                from the spawning process by the kernel. (EG_INHERITED_ENVS env var)\"\"\",\n    )\n\n    @default(\"inherited_envs\")\n    def _inherited_envs_default(self) -> List[str]:\n        return os.getenv(self.inherited_envs_env, os.getenv(\"EG_ENV_PROCESS_WHITELIST\", \"\")).split(\n            \",\"\n        )\n\n    kernel_headers_env = \"EG_KERNEL_HEADERS\"\n    kernel_headers = ListTrait(\n        config=True,\n        help=\"\"\"Request headers to make available to kernel launch framework.\n                          (EG_KERNEL_HEADERS env var)\"\"\",\n    )\n\n    @default(\"kernel_headers\")\n    def _kernel_headers_default(self) -> List[str]:\n        default_headers = os.getenv(self.kernel_headers_env)\n        return default_headers.split(\",\") if default_headers else []\n\n    # Remote hosts\n    remote_hosts_env = \"EG_REMOTE_HOSTS\"\n    remote_hosts_default_value = \"localhost\"\n    remote_hosts = ListTrait(\n        default_value=[remote_hosts_default_value],\n        config=True,\n        help=\"\"\"Bracketed comma-separated list of hosts on which DistributedProcessProxy\n                        kernels will be launched e.g., ['host1','host2']. (EG_REMOTE_HOSTS env var\n                        - non-bracketed, just comma-separated)\"\"\",\n    )\n\n    @default(\"remote_hosts\")\n    def _remote_hosts_default(self) -> List[str]:\n        return os.getenv(self.remote_hosts_env, self.remote_hosts_default_value).split(\",\")\n\n    # load_balancing_algorithm\n    load_balancing_algorithm_env = \"EG_LOAD_BALANCING_ALGORITHM\"\n    load_balancing_algorithm_default_value = \"round-robin\"\n    load_balancing_algorithm = Unicode(\n        load_balancing_algorithm_default_value,\n        config=True,\n        help=\"\"\"Specifies which load balancing algorithm DistributedProcessProxy should use.\n            Must be one of \"round-robin\" or \"least-connection\".  (EG_LOAD_BALANCING_ALGORITHM\n            env var)\n            \"\"\",\n    )\n\n    @default(\"load_balancing_algorithm\")\n    def _load_balancing_algorithm_default(self) -> str:\n        return os.getenv(\n            self.load_balancing_algorithm_env, self.load_balancing_algorithm_default_value\n        )\n\n    @validate(\"load_balancing_algorithm\")\n    def _validate_load_balancing_algorithm(self, proposal: Dict[str, str]) -> str:\n        value = proposal[\"value\"]\n        try:\n            if value not in [\"round-robin\", \"least-connection\"]:\n                msg = f\"Unrecognized proposal value {value}\"\n                raise AssertionError(msg)\n        except ValueError:\n            msg = f\"Invalid load_balancing_algorithm value {value}, not in [round-robin,least-connection]\"\n            raise TraitError(msg) from None\n        return value\n\n    # Yarn endpoint\n    yarn_endpoint_env = \"EG_YARN_ENDPOINT\"\n    yarn_endpoint = Unicode(\n        None,\n        config=True,\n        allow_none=True,\n        help=\"\"\"The http url specifying the YARN Resource Manager. Note: If this value is NOT set,\n                            the YARN library will use the files within the local HADOOP_CONFIG_DIR to determine the\n                            active resource manager. (EG_YARN_ENDPOINT env var)\"\"\",\n    )\n\n    @default(\"yarn_endpoint\")\n    def _yarn_endpoint_default(self) -> Optional[str]:\n        return os.getenv(self.yarn_endpoint_env)\n\n    # Alt Yarn endpoint\n    alt_yarn_endpoint_env = \"EG_ALT_YARN_ENDPOINT\"\n    alt_yarn_endpoint = Unicode(\n        None,\n        config=True,\n        allow_none=True,\n        help=\"\"\"The http url specifying the alternate YARN Resource Manager.  This value should\n                                be set when YARN Resource Managers are configured for high availability.  Note: If both\n                                YARN endpoints are NOT set, the YARN library will use the files within the local\n                                HADOOP_CONFIG_DIR to determine the active resource manager.\n                                (EG_ALT_YARN_ENDPOINT env var)\"\"\",\n    )\n\n    @default(\"alt_yarn_endpoint\")\n    def _alt_yarn_endpoint_default(self) -> Optional[str]:\n        return os.getenv(self.alt_yarn_endpoint_env)\n\n    yarn_endpoint_security_enabled_env = \"EG_YARN_ENDPOINT_SECURITY_ENABLED\"\n    yarn_endpoint_security_enabled_default_value = False\n    yarn_endpoint_security_enabled = Bool(\n        yarn_endpoint_security_enabled_default_value,\n        config=True,\n        help=\"\"\"Is YARN Kerberos/SPNEGO Security enabled (True/False).\n                                          (EG_YARN_ENDPOINT_SECURITY_ENABLED env var)\"\"\",\n    )\n\n    @default(\"yarn_endpoint_security_enabled\")\n    def _yarn_endpoint_security_enabled_default(self) -> bool:\n        return bool(\n            os.getenv(\n                self.yarn_endpoint_security_enabled_env,\n                self.yarn_endpoint_security_enabled_default_value,\n            )\n        )\n\n    # Conductor endpoint\n    conductor_endpoint_env = \"EG_CONDUCTOR_ENDPOINT\"\n    conductor_endpoint_default_value = None\n    conductor_endpoint = Unicode(\n        conductor_endpoint_default_value,\n        allow_none=True,\n        config=True,\n        help=\"\"\"The http url for accessing the Conductor REST API.\n                                 (EG_CONDUCTOR_ENDPOINT env var)\"\"\",\n    )\n\n    @default(\"conductor_endpoint\")\n    def _conductor_endpoint_default(self) -> Optional[str]:\n        return os.getenv(self.conductor_endpoint_env, self.conductor_endpoint_default_value)\n\n    _log_formatter_cls = LogFormatter  # traitlet default is LevelFormatter\n\n    @default(\"log_format\")\n    def _default_log_format(self) -> str:\n        \"\"\"override default log format to include milliseconds\"\"\"\n        return (\n            \"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s\"\n        )\n\n    # Impersonation enabled\n    impersonation_enabled_env = \"EG_IMPERSONATION_ENABLED\"\n    impersonation_enabled = Bool(\n        False,\n        config=True,\n        help=\"\"\"Indicates whether impersonation will be performed during kernel launch.\n                                 (EG_IMPERSONATION_ENABLED env var)\"\"\",\n    )\n\n    @default(\"impersonation_enabled\")\n    def _impersonation_enabled_default(self) -> bool:\n        return bool(os.getenv(self.impersonation_enabled_env, \"false\").lower() == \"true\")\n\n    # Unauthorized users\n    unauthorized_users_env = \"EG_UNAUTHORIZED_USERS\"\n    unauthorized_users_default_value = \"root\"\n    unauthorized_users = SetTrait(\n        default_value={unauthorized_users_default_value},\n        config=True,\n        help=\"\"\"Comma-separated list of user names (e.g., ['root','admin']) against which\n                             KERNEL_USERNAME will be compared.  Any match (case-sensitive) will prevent the\n                             kernel's launch and result in an HTTP 403 (Forbidden) error.\n                             (EG_UNAUTHORIZED_USERS env var - non-bracketed, just comma-separated)\"\"\",\n    )\n\n    @default(\"unauthorized_users\")\n    def _unauthorized_users_default(self) -> Set[str]:\n        return os.getenv(self.unauthorized_users_env, self.unauthorized_users_default_value).split(\n            \",\"\n        )\n\n    # Authorized users\n    authorized_users_env = \"EG_AUTHORIZED_USERS\"\n    authorized_users = SetTrait(\n        config=True,\n        help=\"\"\"Comma-separated list of user names (e.g., ['bob','alice']) against which\n                           KERNEL_USERNAME will be compared.  Any match (case-sensitive) will allow the kernel's\n                           launch, otherwise an HTTP 403 (Forbidden) error will be raised.  The set of unauthorized\n                           users takes precedence. This option should be used carefully as it can dramatically limit\n                           who can launch kernels.  (EG_AUTHORIZED_USERS env var - non-bracketed,\n                           just comma-separated)\"\"\",\n    )\n\n    @default(\"authorized_users\")\n    def _authorized_users_default(self) -> Set[str]:\n        au_env = os.getenv(self.authorized_users_env)\n        return au_env.split(\",\") if au_env is not None else []\n\n    # Authorized origin\n    authorized_origin_env = \"EG_AUTHORIZED_ORIGIN\"\n    authorized_origin = Unicode(\n        config=True,\n        help=\"\"\"Hostname (e.g. 'localhost', 'reverse.proxy.net') which the handler will match\n                                against the request's SSL certificate.  An HTTP 403 (Forbidden) error will be raised on\n                                a failed match.  This option requires TLS to be enabled.  It does not support IP\n                                addresses. (EG_AUTHORIZED_ORIGIN env var)\"\"\",\n    )\n\n    # Port range\n    port_range_env = \"EG_PORT_RANGE\"\n    port_range_default_value = \"0..0\"\n    port_range = Unicode(\n        port_range_default_value,\n        config=True,\n        help=\"\"\"Specifies the lower and upper port numbers from which ports are created.\n                         The bounded values are separated by '..' (e.g., 33245..34245 specifies a range of 1000 ports\n                         to be randomly selected). A range of zero (e.g., 33245..33245 or 0..0) disables port-range\n                         enforcement.  (EG_PORT_RANGE env var)\"\"\",\n    )\n\n    @default(\"port_range\")\n    def _port_range_default(self) -> str:\n        return os.getenv(self.port_range_env, self.port_range_default_value)\n\n    # Max Kernels per User\n    max_kernels_per_user_env = \"EG_MAX_KERNELS_PER_USER\"\n    max_kernels_per_user_default_value = -1\n    max_kernels_per_user = Integer(\n        max_kernels_per_user_default_value,\n        config=True,\n        help=\"\"\"Specifies the maximum number of kernels a user can have active\n                                   simultaneously.  A value of -1 disables enforcement.\n                                   (EG_MAX_KERNELS_PER_USER env var)\"\"\",\n    )\n\n    @default(\"max_kernels_per_user\")\n    def _max_kernels_per_user_default(self) -> int:\n        return int(\n            os.getenv(self.max_kernels_per_user_env, self.max_kernels_per_user_default_value)\n        )\n\n    ws_ping_interval_env = \"EG_WS_PING_INTERVAL_SECS\"\n    ws_ping_interval_default_value = 30\n    ws_ping_interval = Integer(\n        ws_ping_interval_default_value,\n        config=True,\n        help=\"\"\"Specifies the ping interval(in seconds) that should be used by zmq port\n                                     associated with spawned kernels. Set this variable to 0 to disable ping mechanism.\n                                    (EG_WS_PING_INTERVAL_SECS env var)\"\"\",\n    )\n\n    @default(\"ws_ping_interval\")\n    def _ws_ping_interval_default(self) -> int:\n        return int(os.getenv(self.ws_ping_interval_env, self.ws_ping_interval_default_value))\n\n    # Dynamic Update Interval\n    dynamic_config_interval_env = \"EG_DYNAMIC_CONFIG_INTERVAL\"\n    dynamic_config_interval_default_value = 0\n    dynamic_config_interval = Integer(\n        dynamic_config_interval_default_value,\n        min=0,\n        config=True,\n        help=\"\"\"Specifies the number of seconds configuration files are polled for\n                                      changes.  A value of 0 or less disables dynamic config updates.\n                                      (EG_DYNAMIC_CONFIG_INTERVAL env var)\"\"\",\n    )\n\n    @default(\"dynamic_config_interval\")\n    def _dynamic_config_interval_default(self) -> int:\n        return int(\n            os.getenv(self.dynamic_config_interval_env, self.dynamic_config_interval_default_value)\n        )\n\n    @observe(\"dynamic_config_interval\")\n    def _dynamic_config_interval_changed(self, event: Dict[str, Any]) -> None:\n        prev_val = event[\"old\"]\n        self.dynamic_config_interval = event[\"new\"]\n        if self.dynamic_config_interval != prev_val:\n            # Values are different.  Stop the current poller.  If new value is > 0, start a poller.\n            if self.dynamic_config_poller:\n                self.dynamic_config_poller.stop()\n                self.dynamic_config_poller = None\n\n            if self.dynamic_config_interval <= 0:\n                self.log.warning(\n                    \"Dynamic configuration updates have been disabled and cannot be re-enabled \"\n                    \"without restarting Enterprise Gateway!\"\n                )\n            # The interval has been changed, but still positive\n            elif prev_val > 0 and hasattr(self, \"init_dynamic_configs\"):\n                self.init_dynamic_configs()  # Restart the poller\n\n    dynamic_config_poller = None\n\n    # Availability Mode\n    AVAILABILITY_STANDALONE = \"standalone\"\n    AVAILABILITY_REPLICATION = \"replication\"\n    availability_mode_env = \"EG_AVAILABILITY_MODE\"\n    availability_mode_default_value = None\n    availability_mode = CaselessStrEnum(\n        allow_none=True,\n        values=[AVAILABILITY_REPLICATION, AVAILABILITY_STANDALONE],\n        config=True,\n        help=\"\"\"Specifies the type of availability.  Values must be one of \"standalone\" or \"replication\".\n                (EG_AVAILABILITY_MODE env var)\"\"\",\n    )\n\n    @default(\"availability_mode\")\n    def _availability_mode_env_default(self):\n        return os.getenv(self.availability_mode_env, self.availability_mode_default_value)\n\n    kernel_spec_manager = Instance(\"jupyter_client.kernelspec.KernelSpecManager\", allow_none=True)\n\n    kernel_spec_manager_class = Type(\n        default_value=\"jupyter_client.kernelspec.KernelSpecManager\",\n        config=True,\n        help=\"\"\"\n        The kernel spec manager class to use. Must be a subclass\n        of `jupyter_client.kernelspec.KernelSpecManager`.\n        \"\"\",\n    )\n\n    kernel_spec_cache_class = Type(\n        default_value=\"enterprise_gateway.services.kernelspecs.KernelSpecCache\",\n        config=True,\n        help=\"\"\"\n        The kernel spec cache class to use. Must be a subclass\n        of `enterprise_gateway.services.kernelspecs.KernelSpecCache`.\n        \"\"\",\n    )\n\n    kernel_manager_class = Type(\n        klass=\"enterprise_gateway.services.kernels.remotemanager.RemoteMappingKernelManager\",\n        default_value=\"enterprise_gateway.services.kernels.remotemanager.RemoteMappingKernelManager\",\n        config=True,\n        help=\"\"\"\n        The kernel manager class to use. Must be a subclass\n        of `enterprise_gateway.services.kernels.RemoteMappingKernelManager`.\n        \"\"\",\n    )\n\n    kernel_session_manager_class = Type(\n        klass=\"enterprise_gateway.services.sessions.kernelsessionmanager.KernelSessionManager\",\n        default_value=\"enterprise_gateway.services.sessions.kernelsessionmanager.FileKernelSessionManager\",\n        config=True,\n        help=\"\"\"\n        The kernel session manager class to use. Must be a subclass\n        of `enterprise_gateway.services.sessions.KernelSessionManager`.\n        \"\"\",\n    )\n\n    authorizer_class = Type(\n        klass=\"jupyter_server.auth.authorizer.Authorizer\",\n        default_value=\"jupyter_server.auth.authorizer.AllowAllAuthorizer\",\n        config=True,\n        help=\"\"\"\n        The authorizer class to use for authenticating and authorizing requests.\n\n        By default, Enterprise Gateway uses AllowAllAuthorizer which allows all\n        authenticated requests. You can configure a custom authorizer to implement\n        authentication and authorization logic.\n\n        Example usage:\n            c.EnterpriseGatewayApp.authorizer_class = 'my_module.MyAuthorizer'\n\n        Environment variable: EG_AUTHORIZER_CLASS\n        \"\"\",\n    )\n\n    authorizer_class_env = \"EG_AUTHORIZER_CLASS\"\n\n    @default(\"authorizer_class\")\n    def _authorizer_class_default(self):\n        return os.getenv(\n            self.authorizer_class_env, \"jupyter_server.auth.authorizer.AllowAllAuthorizer\"\n        )\n"
  },
  {
    "path": "enterprise_gateway/services/__init__.py",
    "content": ""
  },
  {
    "path": "enterprise_gateway/services/api/__init__.py",
    "content": ""
  },
  {
    "path": "enterprise_gateway/services/api/handlers.py",
    "content": "\"\"\"Tornado handlers for kernel specs.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\nfrom typing import List\n\nfrom jupyter_server.utils import ensure_async\nfrom tornado import web\n\nfrom ...mixins import CORSMixin\n\n\nclass BaseSpecHandler(CORSMixin, web.StaticFileHandler):\n    \"\"\"Exposes the ability to return specifications from static files\"\"\"\n\n    @staticmethod\n    def get_resource_metadata() -> tuple:\n        \"\"\"Returns the (resource, mime-type) for the handlers spec.\"\"\"\n        pass\n\n    def initialize(self) -> None:\n        \"\"\"Initializes the instance of this class to serve files.\n\n        The handler is initialized to serve files from the directory\n        where this module is defined.  `path` parameter will be overridden.\n        \"\"\"\n        web.StaticFileHandler.initialize(self, path=os.path.dirname(__file__))\n\n    async def get(self) -> None:\n        \"\"\"Handler for a get on a specific handler\"\"\"\n        resource_name, content_type = self.get_resource_metadata()\n        self.set_header(\"Content-Type\", content_type)\n        res = web.StaticFileHandler.get(self, resource_name)\n        await ensure_async(res)\n\n    def options(self, **kwargs) -> None:\n        \"\"\"Method for properly handling CORS pre-flight\"\"\"\n        self.finish()\n\n\nclass SpecJsonHandler(BaseSpecHandler):\n    \"\"\"Exposes a JSON swagger specification\"\"\"\n\n    @staticmethod\n    def get_resource_metadata() -> tuple:\n        \"\"\"Get the resource metadata.\"\"\"\n        return \"swagger.json\", \"application/json\"\n\n\nclass APIYamlHandler(BaseSpecHandler):\n    \"\"\"Exposes a YAML swagger specification\"\"\"\n\n    @staticmethod\n    def get_resource_metadata() -> tuple:\n        \"\"\"Get the resource metadata.\"\"\"\n        return \"swagger.yaml\", \"text/x-yaml\"\n\n\ndefault_handlers: List[tuple] = [\n    (f\"/api/{SpecJsonHandler.get_resource_metadata()[0]}\", SpecJsonHandler),\n    (f\"/api/{APIYamlHandler.get_resource_metadata()[0]}\", APIYamlHandler),\n]\n"
  },
  {
    "path": "enterprise_gateway/services/api/swagger.json",
    "content": "{\n  \"swagger\": \"2.0\",\n  \"info\": {\n    \"title\": \"Jupyter Enterprise Gateway API\",\n    \"description\": \"The API for the Jupyter Enterprise Gateway\",\n    \"version\": \"6\",\n    \"contact\": {\n      \"name\": \"Jupyter Project\",\n      \"url\": \"https://jupyter.org\"\n    }\n  },\n  \"produces\": [\"application/json\"],\n  \"consumes\": [\"application/json\"],\n  \"parameters\": {\n    \"kernel\": {\n      \"name\": \"kernel_id\",\n      \"required\": true,\n      \"in\": \"path\",\n      \"description\": \"kernel uuid\",\n      \"type\": \"string\",\n      \"format\": \"uuid\"\n    },\n    \"session\": {\n      \"name\": \"session\",\n      \"required\": true,\n      \"in\": \"path\",\n      \"description\": \"session uuid\",\n      \"type\": \"string\",\n      \"format\": \"uuid\"\n    }\n  },\n  \"securityDefinitions\": {\n    \"tokenHeader\": {\n      \"type\": \"apiKey\",\n      \"name\": \"Authorization\",\n      \"in\": \"header\",\n      \"description\": \"The authorization token to verify authorization. This is only needed when `EnterpriseGatewayApp.auth_token` is set. This should take the form of `token {value}` where `{value}` is the value of the token. Alternatively, the token can be passed as a query parameter.\"\n    },\n    \"tokenParam\": {\n      \"type\": \"apiKey\",\n      \"name\": \"token\",\n      \"in\": \"query\",\n      \"description\": \"The authorization token to verify authorization. This is only needed when `EnterpriseGatewayApp.auth_token` is set. This should take the form of `token={value}` where `{value}` is the value of the token. Alternatively, the token can be passed as a header.\"\n    }\n  },\n  \"security\": [\n    {\n      \"tokenHeader\": []\n    },\n    {\n      \"tokenParam\": []\n    }\n  ],\n  \"paths\": {\n    \"/api\": {\n      \"get\": {\n        \"summary\": \"Get API info\",\n        \"tags\": [\"api\"],\n        \"responses\": {\n          \"200\": {\n            \"description\": \"Returns information about the API\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/ApiInfo\"\n            }\n          }\n        }\n      }\n    },\n    \"/api/swagger.yaml\": {\n      \"get\": {\n        \"produces\": [\"text/x-yaml\"],\n        \"summary\": \"Get API info\",\n        \"tags\": [\"api\"],\n        \"responses\": {\n          \"200\": {\n            \"description\": \"Returns a swagger specification in yaml\"\n          }\n        }\n      }\n    },\n    \"/api/swagger.json\": {\n      \"get\": {\n        \"summary\": \"Get API info\",\n        \"tags\": [\"api\"],\n        \"responses\": {\n          \"200\": {\n            \"description\": \"Returns a swagger specification in json\"\n          }\n        }\n      }\n    },\n    \"/api/kernelspecs\": {\n      \"get\": {\n        \"summary\": \"Get kernel specs\",\n        \"tags\": [\"kernelspecs\"],\n        \"parameters\": {\n          \"name\": \"user\",\n          \"required\": false,\n          \"in\": \"query\",\n          \"description\": \"When present, kernelspec results will be filtered based on the configured authorization of specified value.\",\n          \"type\": \"string\"\n        },\n        \"responses\": {\n          \"200\": {\n            \"description\": \"If no query parameter is specified, all kernel specs will be returned; otherwise the result set is filtered based on the query parameter.\",\n            \"schema\": {\n              \"type\": \"object\",\n              \"properties\": {\n                \"default\": {\n                  \"type\": \"string\",\n                  \"description\": \"The name of the default kernel.\"\n                },\n                \"kernelspecs\": {\n                  \"type\": \"object\",\n                  \"additionalProperties\": {\n                    \"$ref\": \"#/definitions/KernelSpec\"\n                  }\n                }\n              }\n            }\n          }\n        }\n      }\n    },\n    \"/api/kernels\": {\n      \"get\": {\n        \"summary\": \"List the JSON data for all currently running kernels\",\n        \"tags\": [\"kernels\"],\n        \"responses\": {\n          \"200\": {\n            \"description\": \"List of running kernels\",\n            \"schema\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"$ref\": \"#/definitions/Kernel\"\n              }\n            }\n          },\n          \"403\": {\n            \"description\": \"This method is not accessible when `EnterpriseGatewayApp.list_kernels` is `False`.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/Error\"\n            }\n          }\n        }\n      },\n      \"post\": {\n        \"summary\": \"Start a kernel and return the uuid\",\n        \"tags\": [\"kernels\"],\n        \"parameters\": [\n          {\n            \"name\": \"start_kernel_body\",\n            \"in\": \"body\",\n            \"schema\": {\n              \"type\": \"object\",\n              \"properties\": {\n                \"name\": {\n                  \"type\": \"string\",\n                  \"description\": \"Kernel spec name (defaults to default kernel spec for server)\"\n                },\n                \"env\": {\n                  \"type\": \"object\",\n                  \"description\": \"A dictionary of environment variables and values to include in the kernel process - subject to filtering.\",\n                  \"additionalProperties\": {\n                    \"type\": \"string\"\n                  }\n                }\n              }\n            }\n          }\n        ],\n        \"responses\": {\n          \"201\": {\n            \"description\": \"The metadata about the newly created kernel.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/Kernel\"\n            },\n            \"headers\": {\n              \"Location\": {\n                \"description\": \"Model for started kernel\",\n                \"type\": \"string\",\n                \"format\": \"url\"\n              }\n            }\n          },\n          \"403\": {\n            \"description\": \"The maximum number of kernels have been created.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/Error\"\n            }\n          }\n        }\n      }\n    },\n    \"/api/kernels/{kernel_id}\": {\n      \"parameters\": [\n        {\n          \"$ref\": \"#/parameters/kernel\"\n        }\n      ],\n      \"get\": {\n        \"summary\": \"Get kernel information\",\n        \"tags\": [\"kernels\"],\n        \"responses\": {\n          \"200\": {\n            \"description\": \"Information about the kernel\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/Kernel\"\n            }\n          }\n        }\n      },\n      \"delete\": {\n        \"summary\": \"Kill a kernel and delete the kernel id\",\n        \"tags\": [\"kernels\"],\n        \"responses\": {\n          \"204\": {\n            \"description\": \"Kernel deleted\"\n          }\n        }\n      }\n    },\n    \"/api/kernels/{kernel_id}/channels\": {\n      \"parameters\": [\n        {\n          \"$ref\": \"#/parameters/kernel\"\n        }\n      ],\n      \"get\": {\n        \"summary\": \"Upgrades the connection to a websocket connection.\",\n        \"tags\": [\"channels\"],\n        \"responses\": {\n          \"200\": {\n            \"description\": \"The connection will be upgraded to a websocket.\"\n          }\n        }\n      }\n    },\n    \"/kernels/{kernel_id}/interrupt\": {\n      \"parameters\": [\n        {\n          \"$ref\": \"#/parameters/kernel\"\n        }\n      ],\n      \"post\": {\n        \"summary\": \"Interrupt a kernel\",\n        \"tags\": [\"kernels\"],\n        \"responses\": {\n          \"204\": {\n            \"description\": \"Kernel interrupted\"\n          }\n        }\n      }\n    },\n    \"/kernels/{kernel_id}/restart\": {\n      \"parameters\": [\n        {\n          \"$ref\": \"#/parameters/kernel\"\n        }\n      ],\n      \"post\": {\n        \"summary\": \"Restart a kernel\",\n        \"tags\": [\"kernels\"],\n        \"responses\": {\n          \"200\": {\n            \"description\": \"Kernel interrupted\",\n            \"headers\": {\n              \"Location\": {\n                \"description\": \"URL for kernel commands\",\n                \"type\": \"string\",\n                \"format\": \"url\"\n              }\n            },\n            \"schema\": {\n              \"$ref\": \"#/definitions/Kernel\"\n            }\n          }\n        }\n      }\n    },\n    \"/api/sessions\": {\n      \"get\": {\n        \"summary\": \"List available sessions\",\n        \"tags\": [\"sessions\"],\n        \"responses\": {\n          \"200\": {\n            \"description\": \"List of current sessions\",\n            \"schema\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"$ref\": \"#/definitions/Session\"\n              }\n            }\n          },\n          \"403\": {\n            \"description\": \"This method is not accessible when the kernel gateway when the `list_kernels` option is `False`.\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/Error\"\n            }\n          }\n        }\n      },\n      \"post\": {\n        \"summary\": \"Create a new session, or return an existing session if a session of the same name already exists.\",\n        \"tags\": [\"sessions\"],\n        \"parameters\": [\n          {\n            \"name\": \"session\",\n            \"in\": \"body\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/Session\"\n            }\n          }\n        ],\n        \"responses\": {\n          \"201\": {\n            \"description\": \"Session created or returned\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/Session\"\n            },\n            \"headers\": {\n              \"Location\": {\n                \"description\": \"URL for session commands\",\n                \"type\": \"string\",\n                \"format\": \"url\"\n              }\n            }\n          },\n          \"501\": {\n            \"description\": \"Session not available\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/Error\"\n            }\n          }\n        }\n      }\n    },\n    \"/api/sessions/{session}\": {\n      \"parameters\": [\n        {\n          \"$ref\": \"#/parameters/session\"\n        }\n      ],\n      \"get\": {\n        \"summary\": \"Get session\",\n        \"tags\": [\"sessions\"],\n        \"responses\": {\n          \"200\": {\n            \"description\": \"Session\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/Session\"\n            }\n          }\n        }\n      },\n      \"patch\": {\n        \"summary\": \"This can be used to rename the session.\",\n        \"tags\": [\"sessions\"],\n        \"parameters\": [\n          {\n            \"name\": \"model\",\n            \"in\": \"body\",\n            \"required\": true,\n            \"schema\": {\n              \"$ref\": \"#/definitions/Session\"\n            }\n          }\n        ],\n        \"responses\": {\n          \"200\": {\n            \"description\": \"Session\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/Session\"\n            }\n          },\n          \"400\": {\n            \"description\": \"No data provided\",\n            \"schema\": {\n              \"$ref\": \"#/definitions/Error\"\n            }\n          }\n        }\n      },\n      \"delete\": {\n        \"summary\": \"Delete a session\",\n        \"tags\": [\"sessions\"],\n        \"responses\": {\n          \"204\": {\n            \"description\": \"Session (and kernel) were deleted\"\n          },\n          \"410\": {\n            \"description\": \"Kernel was deleted before the session, and the session was *not* deleted\"\n          }\n        }\n      }\n    }\n  },\n  \"definitions\": {\n    \"Error\": {\n      \"description\": \"An error response from the server\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"reason\": {\n          \"type\": \"string\",\n          \"description\": \"The reason for the failure\"\n        },\n        \"message\": {\n          \"type\": \"string\",\n          \"description\": \"The message logged when the error occurred\"\n        }\n      }\n    },\n    \"KernelSpec\": {\n      \"description\": \"Kernel spec (contents of kernel.json)\",\n      \"properties\": {\n        \"name\": {\n          \"type\": \"string\",\n          \"description\": \"Unique name for kernel\"\n        },\n        \"KernelSpecFile\": {\n          \"$ref\": \"#/definitions/KernelSpecFile\",\n          \"description\": \"Kernel spec json file\"\n        },\n        \"resources\": {\n          \"type\": \"object\",\n          \"properties\": {\n            \"kernel.js\": {\n              \"type\": \"string\",\n              \"format\": \"filename\",\n              \"description\": \"path for kernel.js file\"\n            },\n            \"kernel.css\": {\n              \"type\": \"string\",\n              \"format\": \"filename\",\n              \"description\": \"path for kernel.css file\"\n            },\n            \"logo-*\": {\n              \"type\": \"string\",\n              \"format\": \"filename\",\n              \"description\": \"path for logo file.  Logo filenames are of the form `logo-widthxheight`\"\n            }\n          }\n        }\n      }\n    },\n    \"KernelSpecFile\": {\n      \"description\": \"Kernel spec json file\",\n      \"required\": [\"argv\", \"display_name\", \"language\"],\n      \"properties\": {\n        \"language\": {\n          \"type\": \"string\",\n          \"description\": \"The programming language which this kernel runs. This will be stored in notebook metadata.\"\n        },\n        \"argv\": {\n          \"type\": \"array\",\n          \"description\": \"A list of command line arguments used to start the kernel. The text `{connection_file}` in any argument will be replaced with the path to the connection file.\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"display_name\": {\n          \"type\": \"string\",\n          \"description\": \"The kernel's name as it should be displayed in the UI. Unlike the kernel name used in the API, this can contain arbitrary unicode characters.\"\n        },\n        \"codemirror_mode\": {\n          \"type\": \"string\",\n          \"description\": \"Codemirror mode.  Can be a string *or* an valid Codemirror mode object.  This defaults to the string from the `language` property.\"\n        },\n        \"env\": {\n          \"type\": \"object\",\n          \"description\": \"A dictionary of environment variables to set for the kernel. These will be added to the current environment variables.\",\n          \"additionalProperties\": {\n            \"type\": \"string\"\n          }\n        },\n        \"metadata\": {\n          \"type\": \"object\",\n          \"description\": \"A free-form dictionary consisting of additional information about the kernel and its environment.\",\n          \"additionalProperties\": true\n        },\n        \"help_links\": {\n          \"type\": \"array\",\n          \"description\": \"Help items to be displayed in the help menu in the notebook UI.\",\n          \"items\": {\n            \"type\": \"object\",\n            \"required\": [\"text\", \"url\"],\n            \"properties\": {\n              \"text\": {\n                \"type\": \"string\",\n                \"description\": \"menu item link text\"\n              },\n              \"url\": {\n                \"type\": \"string\",\n                \"format\": \"URL\",\n                \"description\": \"menu item link url\"\n              }\n            }\n          }\n        }\n      }\n    },\n    \"Kernel\": {\n      \"description\": \"Kernel information\",\n      \"required\": [\"id\", \"name\"],\n      \"properties\": {\n        \"id\": {\n          \"type\": \"string\",\n          \"format\": \"uuid\",\n          \"description\": \"uuid of kernel\"\n        },\n        \"name\": {\n          \"type\": \"string\",\n          \"description\": \"kernel spec name\"\n        },\n        \"last_activity\": {\n          \"type\": \"string\",\n          \"description\": \"ISO 8601 timestamp for the last-seen activity on this kernel.\\nUse this in combination with execution_state == 'idle' to identify\\nwhich kernels have been idle since a given time.\\nTimestamps will be UTC, indicated 'Z' suffix.\\nAdded in notebook server 5.0.\\n\"\n        },\n        \"connections\": {\n          \"type\": \"number\",\n          \"description\": \"The number of active connections to this kernel.\\n\"\n        },\n        \"execution_state\": {\n          \"type\": \"string\",\n          \"description\": \"Current execution state of the kernel (typically 'idle' or 'busy', but may be other values, such as 'starting').\\nAdded in notebook server 5.0.\\n\"\n        }\n      }\n    },\n    \"Session\": {\n      \"description\": \"A session\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"id\": {\n          \"type\": \"string\",\n          \"format\": \"uuid\"\n        },\n        \"path\": {\n          \"type\": \"string\",\n          \"description\": \"path to the session\"\n        },\n        \"name\": {\n          \"type\": \"string\",\n          \"description\": \"name of the session\"\n        },\n        \"type\": {\n          \"type\": \"string\",\n          \"description\": \"session type\"\n        },\n        \"kernel\": {\n          \"$ref\": \"#/definitions/Kernel\"\n        }\n      }\n    },\n    \"ApiInfo\": {\n      \"description\": \"Information about the api\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"version\": {\n          \"type\": \"string\"\n        },\n        \"gateway_version\": {\n          \"type\": \"string\"\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "enterprise_gateway/services/api/swagger.yaml",
    "content": "swagger: \"2.0\"\n\ninfo:\n  title: Jupyter Enterprise Gateway API\n  description: The API for the Jupyter Enterprise Gateway\n  version: \"6\"\n  contact:\n    name: Jupyter Project\n    url: https://jupyter.org\n\nproduces:\n  - application/json\nconsumes:\n  - application/json\nparameters:\n  kernel:\n    name: kernel_id\n    required: true\n    in: path\n    description: kernel uuid\n    type: string\n    format: uuid\n  session:\n    name: session\n    required: true\n    in: path\n    description: session uuid\n    type: string\n    format: uuid\n\nsecurityDefinitions:\n  tokenHeader:\n    type: apiKey\n    name: Authorization\n    in: header\n    description: |\n      The authorization token to verify authorization. This is only needed\n      when `EnterpriseGatewayApp.auth_token` is set. This should take the\n      form of `token {value}` where `{value}` is the value of the token.\n      Alternatively, the token can be passed as a query parameter.\n  tokenParam:\n    type: apiKey\n    name: token\n    in: query\n    description: |\n      The authorization token to verify authorization. This is only needed\n      when `EnterpriseGatewayApp.auth_token` is set. This should take the\n      form of `token={value}` where `{value}` is the value of the token.\n      Alternatively, the token can be passed as a header.\n\nsecurity:\n  - tokenHeader: []\n  - tokenParam: []\n\npaths:\n  /api:\n    get:\n      summary: Get API info\n      tags:\n        - api\n      responses:\n        200:\n          description: Returns information about the API\n          schema:\n            $ref: \"#/definitions/ApiInfo\"\n  /api/swagger.yaml:\n    get:\n      produces:\n        - text/x-yaml\n      summary: Get API info\n      tags:\n        - api\n      responses:\n        200:\n          description: Returns a swagger specification in yaml\n  /api/swagger.json:\n    get:\n      summary: Get API info\n      tags:\n        - api\n      responses:\n        200:\n          description: Returns a swagger specification in json\n  /api/kernelspecs:\n    get:\n      summary: Get kernel specs\n      tags:\n        - kernelspecs\n      parameters:\n        - name: user\n          in: query\n          description: When present, kernelspec results will be filtered based on the configured authorization of specified value.\n          required: false\n          type: string\n      responses:\n        200:\n          description: |\n            If no query parameter is specified, all kernel specs will be returned;\n            otherwise the result set is filtered based on the query parameter.\n          schema:\n            type: object\n            properties:\n              default:\n                type: string\n                description: The name of the default kernel.\n              kernelspecs:\n                type: object\n                additionalProperties:\n                  $ref: \"#/definitions/KernelSpec\"\n  /api/kernels:\n    get:\n      summary: List the JSON data for all currently running kernels\n      tags:\n        - kernels\n      responses:\n        200:\n          description: List of running kernels\n          schema:\n            type: array\n            items:\n              $ref: \"#/definitions/Kernel\"\n        403:\n          description: |\n            This method is not accessible when `EnterpriseGatewayApp.list_kernels` is `False`.\n          schema:\n            $ref: \"#/definitions/Error\"\n    post:\n      summary: Start a kernel and return the uuid\n      tags:\n        - kernels\n      parameters:\n        - name: start_kernel_body\n          in: body\n          schema:\n            type: object\n            properties:\n              name:\n                type: string\n                description: Kernel spec name (defaults to default kernel spec for server)\n              env:\n                type: object\n                description: |\n                  A dictionary of environment variables and values to include in the\n                  kernel process - subject to filtering.\n                additionalProperties:\n                  type: string\n      responses:\n        201:\n          description: The metadata about the newly created kernel.\n          schema:\n            $ref: \"#/definitions/Kernel\"\n          headers:\n            Location:\n              description: Model for started kernel\n              type: string\n              format: url\n        403:\n          description: The maximum number of kernels have been created.\n          schema:\n            $ref: \"#/definitions/Error\"\n  /api/kernels/{kernel_id}:\n    parameters:\n      - $ref: \"#/parameters/kernel\"\n    get:\n      summary: Get kernel information\n      tags:\n        - kernels\n      responses:\n        200:\n          description: Information about the kernel\n          schema:\n            $ref: \"#/definitions/Kernel\"\n    delete:\n      summary: Kill a kernel and delete the kernel id\n      tags:\n        - kernels\n      responses:\n        204:\n          description: Kernel deleted\n  /api/kernels/{kernel_id}/channels:\n    parameters:\n      - $ref: \"#/parameters/kernel\"\n    get:\n      summary: Upgrades the connection to a websocket connection.\n      tags:\n        - channels\n      responses:\n        200:\n          description: The connection will be upgraded to a websocket.\n  /kernels/{kernel_id}/interrupt:\n    parameters:\n      - $ref: \"#/parameters/kernel\"\n    post:\n      summary: Interrupt a kernel\n      tags:\n        - kernels\n      responses:\n        204:\n          description: Kernel interrupted\n  /kernels/{kernel_id}/restart:\n    parameters:\n      - $ref: \"#/parameters/kernel\"\n    post:\n      summary: Restart a kernel\n      tags:\n        - kernels\n      responses:\n        200:\n          description: Kernel interrupted\n          headers:\n            Location:\n              description: URL for kernel commands\n              type: string\n              format: url\n          schema:\n            $ref: \"#/definitions/Kernel\"\n  /api/sessions:\n    get:\n      summary: List available sessions\n      tags:\n        - sessions\n      responses:\n        200:\n          description: List of current sessions\n          schema:\n            type: array\n            items:\n              $ref: \"#/definitions/Session\"\n        403:\n          description: |\n            This method is not accessible when the kernel gateway\n            when the `list_kernels` option is `False`.\n          schema:\n            $ref: \"#/definitions/Error\"\n    post:\n      summary: |\n        Create a new session, or return an existing session if a session\n        of the same name already exists.\n      tags:\n        - sessions\n      parameters:\n        - name: session\n          in: body\n          schema:\n            $ref: \"#/definitions/Session\"\n      responses:\n        201:\n          description: Session created or returned\n          schema:\n            $ref: \"#/definitions/Session\"\n          headers:\n            Location:\n              description: URL for session commands\n              type: string\n              format: url\n        501:\n          description: Session not available\n          schema:\n            $ref: \"#/definitions/Error\"\n\n  /api/sessions/{session}:\n    parameters:\n      - $ref: \"#/parameters/session\"\n    get:\n      summary: Get session\n      tags:\n        - sessions\n      responses:\n        200:\n          description: Session\n          schema:\n            $ref: \"#/definitions/Session\"\n    patch:\n      summary: This can be used to rename the session.\n      tags:\n        - sessions\n      parameters:\n        - name: model\n          in: body\n          required: true\n          schema:\n            $ref: \"#/definitions/Session\"\n      responses:\n        200:\n          description: Session\n          schema:\n            $ref: \"#/definitions/Session\"\n        400:\n          description: No data provided\n          schema:\n            $ref: \"#/definitions/Error\"\n    delete:\n      summary: Delete a session\n      tags:\n        - sessions\n      responses:\n        204:\n          description: Session (and kernel) were deleted\n        410:\n          description: |\n            Kernel was deleted before the session, and the session\n            was *not* deleted\n\ndefinitions:\n  Error:\n    description: An error response from the server\n    type: object\n    properties:\n      reason:\n        type: string\n        description: The reason for the failure\n      message:\n        type: string\n        description: The message logged when the error occurred\n  KernelSpec:\n    description: Kernel spec (contents of kernel.json)\n    properties:\n      name:\n        type: string\n        description: Unique name for kernel\n      KernelSpecFile:\n        $ref: \"#/definitions/KernelSpecFile\"\n        description: Kernel spec json file\n      resources:\n        type: object\n        properties:\n          kernel.js:\n            type: string\n            format: filename\n            description: path for kernel.js file\n          kernel.css:\n            type: string\n            format: filename\n            description: path for kernel.css file\n          logo-*:\n            type: string\n            format: filename\n            description: |\n              path for logo file.  Logo filenames are of the form `logo-widthxheight`\n  KernelSpecFile:\n    description: Kernel spec json file\n    required:\n      - argv\n      - display_name\n      - language\n    properties:\n      language:\n        type: string\n        description: The programming language which this kernel runs. This will be stored in notebook metadata.\n      argv:\n        type: array\n        description: |\n          A list of command line arguments used to start the kernel. The text `{connection_file}` in any\n          argument will be replaced with the path to the connection file.\n        items:\n          type: string\n      display_name:\n        type: string\n        description: |\n          The kernel's name as it should be displayed in the UI. Unlike the kernel name used in the API,\n          this can contain arbitrary unicode characters.\n      codemirror_mode:\n        type: string\n        description: |\n          Codemirror mode.  Can be a string *or* an valid Codemirror mode object.  This defaults to the\n          string from the `language` property.\n      env:\n        type: object\n        description: |\n          A dictionary of environment variables to set for the kernel. These will be added to the current\n          environment variables.\n        additionalProperties:\n          type: string\n      metadata:\n        type: object\n        description: |\n          A free-form dictionary consisting of additional information about the kernel and its environment.\n        additionalProperties: true\n      help_links:\n        type: array\n        description: Help items to be displayed in the help menu in the notebook UI.\n        items:\n          type: object\n          required:\n            - text\n            - url\n          properties:\n            text:\n              type: string\n              description: menu item link text\n            url:\n              type: string\n              format: URL\n              description: menu item link url\n  Kernel:\n    description: Kernel information\n    required:\n      - id\n      - name\n    properties:\n      id:\n        type: string\n        format: uuid\n        description: uuid of kernel\n      name:\n        type: string\n        description: kernel spec name\n      last_activity:\n        type: string\n        description: |\n          ISO 8601 timestamp for the last-seen activity on this kernel.\n          Use this in combination with execution_state == 'idle' to identify\n          which kernels have been idle since a given time.\n          Timestamps will be UTC, indicated 'Z' suffix.\n          Added in notebook server 5.0.\n      connections:\n        type: number\n        description: |\n          The number of active connections to this kernel.\n      execution_state:\n        type: string\n        description: |\n          Current execution state of the kernel (typically 'idle' or 'busy', but may be other values, such as 'starting').\n          Added in notebook server 5.0.\n  Session:\n    description: A session\n    type: object\n    properties:\n      id:\n        type: string\n        format: uuid\n      path:\n        type: string\n        description: path to the session\n      name:\n        type: string\n        description: name of the session\n      type:\n        type: string\n        description: session type\n      kernel:\n        $ref: \"#/definitions/Kernel\"\n  ApiInfo:\n    description: Information about the api\n    type: object\n    properties:\n      version:\n        type: string\n      gateway_version:\n        type: string\n"
  },
  {
    "path": "enterprise_gateway/services/kernels/__init__.py",
    "content": ""
  },
  {
    "path": "enterprise_gateway/services/kernels/handlers.py",
    "content": "\"\"\"Tornado handlers for kernel CRUD and communication.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n\nfrom __future__ import annotations\n\nimport json\nimport os\nfrom functools import partial\nfrom typing import Any\n\nimport jupyter_server.services.kernels.handlers as jupyter_server_handlers\nimport tornado\nfrom jupyter_client.jsonutil import date_default\nfrom tornado import web\n\nfrom ...mixins import CORSMixin, JSONErrorsMixin, TokenAuthorizationMixin\n\nMAX_ENV_VALUE_LENGTH = 4096\n\n\nclass MainKernelHandler(\n    TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, jupyter_server_handlers.MainKernelHandler\n):\n    \"\"\"Extends the jupyter_server main kernel handler with token auth, CORS, and\n    JSON errors.\n    \"\"\"\n\n    @property\n    def client_envs(self):\n        return self.settings[\"eg_client_envs\"]\n\n    @property\n    def inherited_envs(self):\n        return self.settings[\"eg_inherited_envs\"]\n\n    def _build_kernel_env(self, model_env: dict[str, Any]) -> dict[str, str]:\n        \"\"\"Build the kernel environment from the request model and server settings.\"\"\"\n        env = {key: value for key, value in os.environ.items() if key in self.inherited_envs}\n\n        allowed_envs: list[str]\n        allowed_envs = list(model_env.keys()) if self.client_envs == [\"*\"] else self.client_envs\n        for key, value in model_env.items():\n            if key.startswith(\"KERNEL_\") or key in allowed_envs:\n                if not isinstance(value, str):\n                    raise tornado.web.HTTPError(\n                        400, f\"Environment variable '{key}' value must be a string\"\n                    )\n                if len(value) > MAX_ENV_VALUE_LENGTH:\n                    raise tornado.web.HTTPError(\n                        400, f\"Environment variable '{key}' exceeds maximum length\"\n                    )\n                env[key] = value\n        return env\n\n    def _build_kernel_headers(self) -> dict[str, str]:\n        \"\"\"Build kernel headers from the request based on server settings.\"\"\"\n        kernel_headers = {}\n        missing_headers = []\n        kernel_header_names = self.settings[\"eg_kernel_headers\"]\n        for name in kernel_header_names:\n            if name:\n                value = self.request.headers.get(name)\n                if value:\n                    kernel_headers[name] = value\n                else:\n                    missing_headers.append(name)\n\n        if missing_headers:\n            self.log.warning(\n                \"The following headers specified in 'kernel-headers' were not found: {}\".format(\n                    missing_headers\n                )\n            )\n        return kernel_headers\n\n    async def post(self):\n        \"\"\"Overrides the super class method to manage env in the request body.\n\n        Max kernel limits are now enforced in RemoteMappingKernelManager.start_kernel().\n\n        Raises\n        ------\n        tornado.web.HTTPError\n            403 Forbidden if either max kernel limit is reached (total or per user, if configured)\n        \"\"\"\n        max_kernels = self.settings[\"eg_max_kernels\"]\n        if max_kernels is not None:\n            km = self.settings[\"kernel_manager\"]\n            kernels = km.list_kernels()\n            if len(kernels) >= max_kernels:\n                raise tornado.web.HTTPError(403, \"Resource Limit\")\n\n        model = self.get_json_body()\n        if model is not None and \"env\" in model:\n            if not isinstance(model[\"env\"], dict):\n                raise tornado.web.HTTPError(400)\n\n            env = self._build_kernel_env(model[\"env\"])\n            kernel_headers = self._build_kernel_headers()\n\n            # No way to override the call to start_kernel on the kernel manager\n            # so do a temporary partial (ugh)\n            orig_start = self.kernel_manager.start_kernel\n            self.kernel_manager.start_kernel = partial(\n                self.kernel_manager.start_kernel, env=env, kernel_headers=kernel_headers\n            )\n            try:\n                await super().post()\n            finally:\n                self.kernel_manager.start_kernel = orig_start\n        else:\n            await super().post()\n\n    async def get(self):\n        \"\"\"Overrides the super class method to honor the kernel listing\n        configuration setting.\n\n        Allows the request to reach the super class if listing is enabled.\n\n        Raises\n        ------\n        tornado.web.HTTPError\n            403 Forbidden if kernel listing is disabled\n        \"\"\"\n        if not self.settings.get(\"eg_list_kernels\"):\n            raise tornado.web.HTTPError(403, \"Forbidden\")\n        else:\n            await super().get()\n\n    def options(self, **kwargs: dict[str, Any] | None):\n        \"\"\"Method for properly handling CORS pre-flight\"\"\"\n        self.finish()\n\n\nclass KernelHandler(\n    TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, jupyter_server_handlers.KernelHandler\n):\n    \"\"\"Extends the jupyter_server kernel handler with token auth, CORS, and\n    JSON errors.\n    \"\"\"\n\n    def options(self, **kwargs: dict[str, Any] | None):\n        \"\"\"Method for properly handling CORS pre-flight\"\"\"\n        self.finish()\n\n    @web.authenticated\n    def get(self, kernel_id: str):\n        \"\"\"Get the model for a kernel.\"\"\"\n        km = self.kernel_manager\n        km.check_kernel_id(kernel_id)\n        model = km.kernel_model(kernel_id)\n        self.finish(json.dumps(model, default=date_default))\n\n    @web.authenticated\n    async def delete(self, kernel_id):\n        \"\"\"Remove a kernel.\"\"\"\n        self.kernel_manager.check_kernel_id(kernel_id=kernel_id)\n        await super().delete(kernel_id=kernel_id)\n\n\nclass ZMQChannelsHandler(\n    TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, jupyter_server_handlers.ZMQChannelsHandler\n):\n    \"\"\"Extends the kernel websocket handler.\"\"\"\n\n    async def get(self, kernel_id):\n        \"\"\"Handle a get request for a kernel.\"\"\"\n        # Synchronize Kernel and check if it exists.\n        self.kernel_manager.check_kernel_id(kernel_id=kernel_id)\n        await super().get(kernel_id=kernel_id)\n\n\ndefault_handlers: list[tuple] = []\nfor path, cls in jupyter_server_handlers.default_handlers:\n    if cls.__name__ in globals():\n        # Use the same named class from here if it exists\n        default_handlers.append((path, globals()[cls.__name__]))\n    else:\n        # Gen a new type with CORS and token auth\n        bases = (TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, cls)\n        default_handlers.append((path, type(cls.__name__, bases, {})))\n"
  },
  {
    "path": "enterprise_gateway/services/kernels/remotemanager.py",
    "content": "\"\"\"Kernel managers that operate against a remote process.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import annotations\n\nimport asyncio\nimport os\nimport re\nimport signal\nimport time\nimport uuid\nfrom typing import Any, ClassVar\n\nfrom jupyter_client.ioloop.manager import AsyncIOLoopKernelManager\nfrom jupyter_client.kernelspec import KernelSpec\nfrom jupyter_server.services.kernels.kernelmanager import AsyncMappingKernelManager\nfrom tornado import web\nfrom traitlets import directional_link\nfrom traitlets import log as traitlets_log\nfrom zmq import IO_THREADS, MAX_SOCKETS, Context\n\nfrom enterprise_gateway.mixins import EnterpriseGatewayConfigMixin\n\nfrom ..processproxies.processproxy import BaseProcessProxyABC, LocalProcessProxy, RemoteProcessProxy\nfrom ..sessions.kernelsessionmanager import KernelSessionManager\n\ndefault_kernel_launch_timeout = float(os.getenv(\"EG_KERNEL_LAUNCH_TIMEOUT\", \"30\"))\nkernel_restart_status_poll_interval = float(os.getenv(\"EG_RESTART_STATUS_POLL_INTERVAL\", 1.0))\n\n\ndef import_item(name: str):\n    \"\"\"Import and return ``bar`` given the string ``foo.bar``.\n    Calling ``bar = import_item(\"foo.bar\")`` is the functional equivalent of\n    executing the code ``from foo import bar``.\n    Parameters\n    ----------\n    name : string\n      The fully qualified name of the module/package being imported.\n    Returns\n    -------\n    mod : module object\n       The module that was imported.\n    \"\"\"\n\n    parts = name.rsplit(\".\", 1)\n    if len(parts) == 2:\n        # called with 'foo.bar....'\n        package, obj = parts\n        module = __import__(package, fromlist=[obj])\n        try:\n            pak = getattr(module, obj)\n        except AttributeError:\n            raise ImportError(\"No module named %s\" % obj) from None\n        return pak\n    else:\n        # called with un-dotted string\n        return __import__(parts[0])\n\n\ndef get_process_proxy_config(kernelspec: KernelSpec) -> dict[str, Any]:\n    \"\"\"\n    Return the process-proxy stanza from the kernelspec.\n    Checks the kernelspec's metadata dictionary for a process proxy entry.\n    If found, it is returned, else one is created relative to the LocalProcessProxy and returns.\n    Parameters\n    ----------\n    kernelspec : obj\n        The kernel specification object from which the process-proxy dictionary is derived.\n    Returns\n    -------\n    process_proxy : dict\n        The process proxy portion of the kernelspec.  If one does not exist, it will contain the default\n        information.  If no `config` sub-dictionary exists, an empty `config` dictionary will be present.\n    \"\"\"\n    if \"process_proxy\" in kernelspec.metadata:\n        process_proxy = kernelspec.metadata.get(\"process_proxy\")\n        if \"class_name\" in process_proxy:  # If no class_name, return default\n            if \"config\" not in process_proxy:  # if class_name, but no config stanza, add one\n                process_proxy.update({\"config\": {}})\n            return process_proxy  # Return what we found (plus config stanza if necessary)\n    return {\n        \"class_name\": \"enterprise_gateway.services.processproxies.processproxy.LocalProcessProxy\",\n        \"config\": {},\n    }\n\n\ndef new_kernel_id(**kwargs: dict[str, Any] | None) -> str:\n    \"\"\"\n    This method provides a mechanism by which clients can specify a kernel's id.  In this case\n    that mechanism is via the per-kernel environment variable: KERNEL_ID.  If specified, its value\n    will be validated and returned, otherwise the result from the provided method is returned.\n    NOTE: This method exists in jupyter_client.multikernelmanager.py for releases > 5.2.3.  If you\n    find that this method is not getting invoked, then you likely need to update the version of\n    jupyter_client.  The Enterprise Gateway dependency will be updated once new releases of\n    jupyter_client are more prevalent.\n    Returns\n    -------\n    kernel_id : str\n        The uuid string to associate with the new kernel\n    \"\"\"\n    log = kwargs.pop(\"log\", None) or traitlets_log.get_logger()\n    kernel_id_fn = kwargs.pop(\"kernel_id_fn\", None) or (lambda: str(uuid.uuid4()))\n\n    env = kwargs.get(\"env\")\n    if env and env.get(\"KERNEL_ID\"):  # If there's a KERNEL_ID in the env, check it out\n        # convert string back to UUID - validating string in the process.\n        str_kernel_id = env.get(\"KERNEL_ID\")\n        try:\n            str_v4_kernel_id = str(uuid.UUID(str_kernel_id, version=4))\n            if str_kernel_id != str_v4_kernel_id:  # Given string is not uuid v4 compliant\n                msg = \"value is not uuid v4 compliant\"\n                raise ValueError(msg)\n        except ValueError as ve:\n            log.error(\n                \"Invalid v4 UUID value detected in ['env']['KERNEL_ID']: '{}'!  Error: {}\".format(\n                    str_kernel_id, ve\n                )\n            )\n            raise ve\n        # user-provided id is valid, use it\n        kernel_id = str(str_kernel_id)\n        log.debug(f\"Using user-provided kernel_id: {kernel_id}\")\n    else:\n        kernel_id = kernel_id_fn(**kwargs)\n\n    return kernel_id\n\n\nclass TrackPendingRequests:\n    \"\"\"\n     Simple class to track (increment/decrement) pending kernel start requests, both total and per user.\n    This tracking is necessary due to an inherent race condition that occurs now that kernel startup is\n    asynchronous.  As a result, multiple/simultaneous requests must be considered, in addition all existing\n    kernel sessions.\n    \"\"\"\n\n    _pending_requests_all = 0\n    _pending_requests_user: ClassVar = {}\n\n    def increment(self, username: str) -> None:\n        \"\"\"Increment the requests for a username.\"\"\"\n        self._pending_requests_all += 1\n        cur_val = int(self._pending_requests_user.get(username, 0))\n        self._pending_requests_user[username] = cur_val + 1\n\n    def decrement(self, username: str) -> None:\n        \"\"\"Decrement the requests for a username.\"\"\"\n        self._pending_requests_all -= 1\n        cur_val = int(self._pending_requests_user.get(username))\n        self._pending_requests_user[username] = cur_val - 1\n\n    def get_counts(self, username: str) -> tuple[int, int]:\n        \"\"\"Get the counts for a username.\"\"\"\n        return self._pending_requests_all, int(self._pending_requests_user.get(username, 0))\n\n\nclass RemoteMappingKernelManager(AsyncMappingKernelManager):\n    \"\"\"\n    Extends the AsyncMappingKernelManager with support for managing remote kernels via the process-proxy.\n    \"\"\"\n\n    def _context_default(self) -> Context:\n        \"\"\"\n        We override the _context_default method in\n        \"\"\"\n        zmq_context = super()._context_default()\n        if self.shared_context:  # this should be True by default\n            # pyzmq currently does not expose defaults for these values, so we replicate them here\n            # libzmq/zmq.h: ZMQ_MAX_SOCKETS_DLFT = 1023; zmq.Context.MAX_SOCKETS\n            # libzmq/zmq.h: ZMQ_IO_THREADS_DFLT = 1; zmq.Context.IO_THREADS\n            zmq_max_sock_desired = int(os.getenv(\"EG_ZMQ_MAX_SOCKETS\", zmq_context.MAX_SOCKETS))\n            if zmq_max_sock_desired != zmq_context.MAX_SOCKETS:\n                zmq_context.set(MAX_SOCKETS, zmq_max_sock_desired)\n                self.log.info(f\"Set ZMQ_MAX_SOCKETS to {zmq_context.MAX_SOCKETS}\")\n\n            zmq_io_threads_desired = int(os.getenv(\"EG_ZMQ_IO_THREADS\", zmq_context.IO_THREADS))\n            if zmq_io_threads_desired != zmq_context.IO_THREADS:\n                zmq_context.set(IO_THREADS, zmq_io_threads_desired)\n                self.log.info(f\"Set ZMQ_IO_THREADS to {zmq_context.IO_THREADS}\")\n\n        return zmq_context\n\n    pending_requests: TrackPendingRequests = (\n        TrackPendingRequests()\n    )  # Used to enforce max-kernel limits\n\n    def _kernel_manager_class_default(self) -> str:\n        return \"enterprise_gateway.services.kernels.remotemanager.RemoteKernelManager\"\n\n    def check_kernel_id(self, kernel_id: str) -> None:\n        \"\"\"Check that a kernel_id exists and raise 404 if not.\"\"\"\n        if kernel_id not in self and not self._refresh_kernel(kernel_id):\n            self.parent.kernel_session_manager.delete_session(kernel_id)\n            raise web.HTTPError(404, \"Kernel does not exist: %s\" % kernel_id)\n\n    def _refresh_kernel(self, kernel_id: str) -> bool:\n        if self.parent.availability_mode == EnterpriseGatewayConfigMixin.AVAILABILITY_REPLICATION:\n            try:\n                self.parent.kernel_session_manager.load_session(kernel_id)\n            except Exception as e:\n                self.log.error(f\"Failed to load session, kernel_id:{kernel_id}\", e)\n                return False\n            return self.parent.kernel_session_manager.start_session(kernel_id)\n        # else we should throw 404 when not using an availability mode of 'replication'\n        return False\n\n    async def start_kernel(self, *args: list[Any] | None, **kwargs: dict[str, Any] | None) -> str:\n        \"\"\"\n        Starts a kernel for a session and return its kernel_id.\n        Returns\n        -------\n        kernel_id : str\n            The uuid associated with the new kernel.  This string will equal the value\n            of the input parameter `kernel_id` if one was provided.\n        \"\"\"\n        username = KernelSessionManager.get_kernel_username(**kwargs)\n        self.log.debug(\n            \"RemoteMappingKernelManager.start_kernel: {kernel_name}, kernel_username: {username}\".format(\n                kernel_name=kwargs[\"kernel_name\"], username=username\n            )\n        )\n\n        # Check max kernel limits\n        self._enforce_kernel_limits(username)\n\n        RemoteMappingKernelManager.pending_requests.increment(username)\n        try:\n            kernel_id = await super().start_kernel(*args, **kwargs)\n        finally:\n            RemoteMappingKernelManager.pending_requests.decrement(username)\n        self.parent.kernel_session_manager.create_session(kernel_id, **kwargs)\n        return kernel_id\n\n    async def restart_kernel(self, kernel_id: str, now: bool = False) -> None:\n        \"\"\"Restart a kernel.\"\"\"\n        kernel = self.get_kernel(kernel_id)\n        if kernel.restarting:  # assuming duplicate request.\n            await self.wait_for_restart_finish(kernel_id, \"restart\")\n            self.log.info(\"Skipping kernel restart as this was duplicate request.\")\n            return\n        try:\n            kernel.restarting = True  # Moved in out of RemoteKernelManager\n            await super().restart_kernel(kernel_id)\n        finally:\n            kernel.restarting = False\n\n    async def shutdown_kernel(\n        self, kernel_id: str, now: bool = False, restart: bool = False\n    ) -> None:\n        \"\"\"Shut down a kernel.\"\"\"\n        kernel = self.get_kernel(kernel_id)\n        if kernel.restarting:\n            await self.wait_for_restart_finish(kernel_id, \"shutdown\")\n        try:\n            await super().shutdown_kernel(kernel_id, now, restart)\n        except KeyError as ke:  # this is hint for multiple shutdown request.\n            self.log.exception(f\"Exception while shutting down kernel: '{kernel_id}': {ke}\")\n            raise web.HTTPError(404, \"Kernel does not exist: %s\" % kernel_id) from None\n\n    async def wait_for_restart_finish(self, kernel_id: str, action: str = \"shutdown\") -> None:\n        \"\"\"Wait for a kernel restart to finish.\"\"\"\n        kernel = self.get_kernel(kernel_id)\n        start_time = float(time.time())  # epoc time\n        timeout = kernel.kernel_launch_timeout\n        poll_time = kernel_restart_status_poll_interval\n        self.log.info(\n            f\"Kernel '{kernel_id}' was restarting when {action} request received. Polling every {poll_time} \"\n            f\"seconds for next {timeout} seconds for kernel to complete its restart.\"\n        )\n        while kernel.restarting:\n            now = float(time.time())\n            if (now - start_time) > timeout:\n                self.log.info(\n                    f\"Timeout: Exiting restart wait loop in order to {action} kernel '{kernel_id}'.\"\n                )\n                break\n            await asyncio.sleep(poll_time)\n        return\n\n    def _enforce_kernel_limits(self, username: str) -> None:\n        \"\"\"\n        If MaxKernels or MaxKernelsPerUser are configured, enforce the respective values.\n        \"\"\"\n\n        if self.parent.max_kernels is not None or self.parent.max_kernels_per_user >= 0:\n            (\n                pending_all,\n                pending_user,\n            ) = RemoteMappingKernelManager.pending_requests.get_counts(username)\n\n            # Enforce overall limit...\n            if self.parent.max_kernels is not None:\n                active_and_pending = len(self.list_kernels()) + pending_all\n                if active_and_pending >= self.parent.max_kernels:\n                    error_message = (\n                        \"A max kernels limit has been set to {} and there are \"\n                        \"currently {} active and pending {}.\".format(\n                            self.parent.max_kernels,\n                            active_and_pending,\n                            \"kernel\" if active_and_pending == 1 else \"kernels\",\n                        )\n                    )\n                    self.log.error(error_message)\n                    raise web.HTTPError(403, error_message)\n\n            # Enforce per-user limit...\n            if self.parent.max_kernels_per_user >= 0 and self.parent.kernel_session_manager:\n                active_and_pending = (\n                    self.parent.kernel_session_manager.active_sessions(username) + pending_user\n                )\n                if active_and_pending >= self.parent.max_kernels_per_user:\n                    error_message = (\n                        \"A max kernels per user limit has been set to {} and user '{}' \"\n                        \"currently has {} active and pending {}.\".format(\n                            self.parent.max_kernels_per_user,\n                            username,\n                            active_and_pending,\n                            \"kernel\" if active_and_pending == 1 else \"kernels\",\n                        )\n                    )\n                    self.log.error(error_message)\n                    raise web.HTTPError(403, error_message)\n        return\n\n    def remove_kernel(self, kernel_id: str) -> None:\n        \"\"\"\n        Removes the kernel associated with `kernel_id` from the internal map and deletes the kernel session.\n        \"\"\"\n        try:\n            super().remove_kernel(kernel_id)\n        except KeyError:  # this is hint for multiple shutdown request.\n            self.log.debug(f\"Exception while removing kernel {kernel_id}: kernel not found.\")\n\n        self.parent.kernel_session_manager.delete_session(kernel_id)\n\n    def start_kernel_from_session(\n        self,\n        kernel_id: str,\n        kernel_name: str,\n        connection_info: dict[str, Any],\n        process_info: dict[str, Any],\n        launch_args: dict[str, Any],\n    ) -> bool:\n        \"\"\"\n        Starts a kernel from a persisted kernel session.\n        This method is used in HA situations when a previously running Enterprise Gateway instance has\n        terminated and a new instance - with access to the persisted kernel sessions is starting up.\n        It attempts to \"revive\" the persisted kernel session by instantiating the necessary class instances\n        to re-establish communication with the currently active kernel.\n        Note that this method is typically only successful when kernel instances are remote from the\n        previously running Enterprise Gateway server - since the need to re-establish communications\n        won't work if the kernels were also local to the (probably) terminated server.\n        Parameters\n        ----------\n        kernel_id : str\n            The uuid string corresponding to the kernel to start\n        kernel_name : str\n            The name of kernel to start\n        connection_info : dict\n            The connection information for the kernel loaded from persistent storage\n        process_info : dict\n            The process information corresponding to the process-proxy used by the kernel and loaded\n            from persistent storage\n        launch_args : dict\n            The arguments used for the initial launch of the kernel\n        Returns\n        -------\n            True if kernel could be located and started, False otherwise.\n        \"\"\"\n        # Create a KernelManger instance and load connection and process info, then confirm the kernel is still\n        # alive.\n        constructor_kwargs = {}\n        if self.kernel_spec_manager:\n            constructor_kwargs[\"kernel_spec_manager\"] = self.kernel_spec_manager\n\n        # Construct a kernel manager...\n        km = self.kernel_manager_factory(\n            connection_file=os.path.join(self.connection_dir, \"kernel-%s.json\" % kernel_id),\n            parent=self,\n            log=self.log,\n            kernel_name=kernel_name,\n            **constructor_kwargs,\n        )\n\n        # Load connection info into member vars - no need to write out connection file\n        km.load_connection_info(connection_info)\n\n        km._launch_args = launch_args\n\n        # Construct a process-proxy\n        process_proxy = get_process_proxy_config(km.kernel_spec)\n        process_proxy_class = import_item(process_proxy.get(\"class_name\"))\n        km.process_proxy = process_proxy_class(km, proxy_config=process_proxy.get(\"config\"))\n        km.process_proxy.load_process_info(process_info)\n\n        # Confirm we can even poll the process.  If not, remove the persisted session.\n        if km.process_proxy.poll() is False:\n            return False\n\n        km.kernel = km.process_proxy\n        km.start_restarter()\n        km._connect_control_socket()\n        self._kernels[kernel_id] = km\n        self._kernel_connections[kernel_id] = 0\n        self.start_watching_activity(kernel_id)\n        self.add_restart_callback(\n            kernel_id,\n            lambda: self._handle_kernel_died(kernel_id),\n            \"dead\",\n        )\n        # Only initialize culling if available.  Warning message will be issued in gatewayapp at startup.\n        func = getattr(self, \"initialize_culler\", None)\n        if func:\n            func()\n        return True\n\n    def new_kernel_id(self, **kwargs: dict[str, Any] | None) -> str:\n        \"\"\"\n        Determines the kernel_id to use for a new kernel.\n        \"\"\"\n\n        return new_kernel_id(kernel_id_fn=super().new_kernel_id, log=self.log, **kwargs)\n\n\nclass RemoteKernelManager(EnterpriseGatewayConfigMixin, AsyncIOLoopKernelManager):\n    \"\"\"\n    Extends the AsyncIOLoopKernelManager used by the RemoteMappingKernelManager.\n    This class is responsible for detecting that a remote kernel is desired, then launching the\n    appropriate class (previously pulled from the kernel spec).  The process 'proxy' is\n    returned - upon which methods of poll(), wait(), send_signal(), and kill() can be called.\n    \"\"\"\n\n    def __init__(self, **kwargs: dict[str, Any] | None):\n        \"\"\"Initialize the remote kernel manager.\"\"\"\n        super().__init__(**kwargs)\n        self.process_proxy = None\n        self.response_address = None\n        self.public_key = None\n        self.sigint_value = None\n        self.kernel_id = None\n        self.user_overrides = {}\n        self.kernel_launch_timeout = default_kernel_launch_timeout\n        self.restarting = False  # need to track whether we're in a restart situation or not\n        self._activity_stream = None\n\n        # If this instance supports port caching, then disable cache_ports since we don't need this\n        # for remote kernels and it breaks the ability to support port ranges for local kernels (which\n        # is viewed as more imporant for EG).\n        # Note: This check MUST remain in this method since cache_ports is used immediately\n        # following construction.\n        if hasattr(self, \"cache_ports\"):\n            self.cache_ports = False\n\n        if not self.connection_file:\n            self.kernel_id = new_kernel_id(log=self.log)\n\n        self._link_dependent_props()\n\n        if self.kernel_spec_manager is None:\n            self.kernel_spec_manager = self.kernel_spec_manager_class(\n                parent=self,\n            )\n\n    def _link_dependent_props(self) -> None:\n        \"\"\"\n        Ensure that RemoteKernelManager, when used as part of an EnterpriseGatewayApp,\n        has certain necessary configuration stay in sync with the app's configuration.\n        When RemoteKernelManager is used independently, this function is a no-op, and\n        default values or configuration set on this class is used.\n        \"\"\"\n        try:\n            eg_instance = self.parent.parent\n        except AttributeError:\n            return\n        dependent_props = [\n            \"authorized_users\",\n            \"unauthorized_users\",\n            \"port_range\",\n            \"impersonation_enabled\",\n            \"max_kernels_per_user\",\n            \"client_envs\",\n            \"inherited_envs\",\n            \"yarn_endpoint\",\n            \"alt_yarn_endpoint\",\n            \"yarn_endpoint_security_enabled\",\n            \"conductor_endpoint\",\n            \"remote_hosts\",\n            \"load_balancing_algorithm\",\n        ]\n        self._links = [\n            directional_link((eg_instance, prop), (self, prop)) for prop in dependent_props\n        ]\n\n    async def start_kernel(self, **kwargs: dict[str, Any] | None):\n        \"\"\"\n        Starts a kernel in a separate process.\n        Where the started kernel resides depends on the configured process proxy.\n        Parameters\n        ----------\n        `**kwargs` : optional\n             keyword arguments that are passed down to build the kernel_cmd\n             and launching the kernel (e.g. Popen kwargs).\n        \"\"\"\n        self._get_process_proxy()\n        self._capture_user_overrides(**kwargs)\n        await super().start_kernel(**kwargs)\n\n    def _capture_user_overrides(self, **kwargs: dict[str, Any] | None) -> None:\n        \"\"\"\n        Make a copy of any allowed or KERNEL_ env values provided by user.  These will be injected\n        back into the env after the kernelspec env has been applied.  This enables defaulting behavior\n        of the kernelspec env stanza that would have otherwise overridden the user-provided values.\n        \"\"\"\n        env = kwargs.get(\"env\", {})\n        # If KERNEL_LAUNCH_TIMEOUT is passed in the payload, override it.\n        self.kernel_launch_timeout = float(\n            env.get(\"KERNEL_LAUNCH_TIMEOUT\", default_kernel_launch_timeout)\n        )\n        self.user_overrides.update(\n            {\n                key: value\n                for key, value in env.items()\n                if key.startswith(\"KERNEL_\")\n                or key in self.inherited_envs\n                or key in self.client_envs\n            }\n        )\n\n    def format_kernel_cmd(self, extra_arguments: list[str] | None = None) -> list[str]:\n        \"\"\"\n        Replace templated args (e.g. {response_address}, {port_range}, or {kernel_id}).\n        \"\"\"\n        cmd = super().format_kernel_cmd(extra_arguments)\n\n        if self.response_address or self.port_range or self.kernel_id or self.public_key:\n            ns = self._launch_args.copy()\n            if self.response_address:\n                ns[\"response_address\"] = self.response_address\n            if self.public_key:\n                ns[\"public_key\"] = self.public_key\n            if self.port_range:\n                ns[\"port_range\"] = self.port_range\n            if self.kernel_id:\n                ns[\"kernel_id\"] = self.kernel_id\n\n            pat = re.compile(r\"\\{([A-Za-z0-9_]+)\\}\")\n\n            def from_ns(match):\n                \"\"\"Get the key out of ns if it's there, otherwise no change.\"\"\"\n                return ns.get(match.group(1), match.group())\n\n            return [pat.sub(from_ns, arg) for arg in cmd]\n        return cmd\n\n    async def _launch_kernel(\n        self, kernel_cmd: list[str], **kwargs: dict[str, Any] | None\n    ) -> BaseProcessProxyABC:\n        # Note: despite the under-bar prefix to this method, the jupyter_client comment says that\n        # this method should be \"[overridden] in a subclass to launch kernel subprocesses differently\".\n        # So that's what we've done.\n\n        env = kwargs[\"env\"]\n\n        # Apply user_overrides to enable defaulting behavior from kernelspec.env stanza.  Note that we do this\n        # BEFORE setting KERNEL_GATEWAY and removing {EG,KG}_AUTH_TOKEN so those operations cannot be overridden.\n        env.update(self.user_overrides)\n\n        # No longer using Kernel Gateway, but retain references of B/C purposes\n        env[\"KERNEL_GATEWAY\"] = \"1\"\n        if \"EG_AUTH_TOKEN\" in env:\n            del env[\"EG_AUTH_TOKEN\"]\n        if \"KG_AUTH_TOKEN\" in env:\n            del env[\"KG_AUTH_TOKEN\"]\n\n        self.log.debug(\n            f\"Launching kernel: '{self.kernel_spec.display_name}' with command: {kernel_cmd}\"\n        )\n\n        proxy = await self.process_proxy.launch_process(kernel_cmd, **kwargs)\n        return proxy\n\n    def request_shutdown(self, restart: bool = False) -> None:\n        \"\"\"\n        Send a shutdown request via control channel and process proxy (if remote).\n        \"\"\"\n        super().request_shutdown(restart)\n\n        # If we're using a remote proxy, we need to send the launcher indication that we're\n        # shutting down so it can exit its listener thread, if its using one.\n        if isinstance(self.process_proxy, RemoteProcessProxy):\n            self.process_proxy.shutdown_listener()\n\n    async def restart_kernel(self, now: bool = False, **kwargs: dict[str, Any] | None) -> None:\n        \"\"\"\n        Restarts a kernel with the arguments that were used to launch it.\n        This is an automatic restart request (now=True) AND this is associated with a\n        remote kernel, check the active connection count.  If there are zero connections, do\n        not restart the kernel.\n        Parameters\n        ----------\n        now : bool, optional\n            If True, the kernel is forcefully restarted *immediately*, without\n            having a chance to do any cleanup action.  Otherwise the kernel is\n            given 1s to clean up before a forceful restart is issued.\n            In all cases the kernel is restarted, the only difference is whether\n            it is given a chance to perform a clean shutdown or not.\n        `**kwargs` : optional\n            Any options specified here will overwrite those used to launch the\n            kernel.\n        \"\"\"\n        kernel_id = self.kernel_id or os.path.basename(self.connection_file).replace(\n            \"kernel-\", \"\"\n        ).replace(\".json\", \"\")\n        # Check if this is a remote process proxy and if now = True. If so, check its connection count. If no\n        # connections, shutdown else perform the restart.  Note: auto-restart sets now=True, but handlers use\n        # the default value (False).\n        if (  # noqa\n            isinstance(self.process_proxy, RemoteProcessProxy)\n            and now\n            and self.mapping_kernel_manager\n        ):\n            if self.mapping_kernel_manager._kernel_connections.get(kernel_id, 0) == 0:\n                self.log.warning(\n                    \"Remote kernel ({}) will not be automatically restarted since there are no \"\n                    \"clients connected at this time.\".format(kernel_id)\n                )\n                # Use the parent mapping kernel manager so activity monitoring and culling is also shutdown\n                await self.mapping_kernel_manager.shutdown_kernel(kernel_id, now=now)\n                return\n\n        if now:  # if auto-restarting (when now is True), indicate we're restarting.\n            self.restarting = True\n\n        await super().restart_kernel(now, **kwargs)\n        if isinstance(self.process_proxy, RemoteProcessProxy):  # for remote kernels...\n            # Re-establish activity watching...\n            if self._activity_stream:\n                self._activity_stream.close()\n                self._activity_stream = None\n            if self.mapping_kernel_manager:\n                self.mapping_kernel_manager.start_watching_activity(kernel_id)\n        # Refresh persisted state.\n        if self.kernel_session_manager:\n            self.kernel_session_manager.refresh_session(kernel_id)\n        if now:\n            self.restarting = False\n\n    async def signal_kernel(self, signum: int) -> None:\n        \"\"\"\n        Sends signal `signum` to the kernel process.\n        \"\"\"\n        if self.has_kernel:\n            if signum == signal.SIGINT:\n                if self.sigint_value is None:\n                    # If we're interrupting the kernel, check if kernelspec's env defines\n                    # an alternate interrupt signal.  We'll do this once per interrupted kernel.\n                    # This is required for kernels whose language may prevent signals across\n                    # process/user boundaries (Scala, for example).\n                    self.sigint_value = signum  # use default\n                    alt_sigint = self.kernel_spec.env.get(\"EG_ALTERNATE_SIGINT\")\n                    if alt_sigint:\n                        try:\n                            sig_value = getattr(signal, alt_sigint)\n                            if isinstance(sig_value, int):  # Python 2\n                                self.sigint_value = sig_value\n                            else:  # Python 3\n                                self.sigint_value = sig_value.value\n                            self.log.debug(\n                                \"Converted EG_ALTERNATE_SIGINT '{}' to value '{}' to use as interrupt signal.\".format(\n                                    alt_sigint, self.sigint_value\n                                )\n                            )\n                        except AttributeError:\n                            self.log.warning(\n                                \"Error received when attempting to convert EG_ALTERNATE_SIGINT of \"\n                                \"'{}' to a value. Check kernelspec entry for kernel '{}' - using \"\n                                \"default 'SIGINT'\".format(alt_sigint, self.kernel_spec.display_name)\n                            )\n                self.kernel.send_signal(self.sigint_value)\n            else:\n                self.kernel.send_signal(signum)\n        else:\n            msg = \"Cannot signal kernel. No kernel is running!\"\n            raise RuntimeError(msg)\n\n    def cleanup(self, connection_file: bool = True) -> None:\n        \"\"\"\n        Clean up resources when the kernel is shut down\n        \"\"\"\n\n        # Note This method has been deprecated in jupyter_client 6.1.5 and\n        # remains here for pre-6.2.0 jupyter_client installations.\n\n        # Note we must use `process_proxy` here rather than `kernel`, although they're the same value.\n        # The reason is because if the kernel shutdown sequence has triggered its \"forced kill\" logic\n        # then that method (jupyter_client/manager.py/_kill_kernel()) will set `self.kernel` to None,\n        # which then prevents process proxy cleanup.\n        if self.process_proxy:\n            self.process_proxy.cleanup()\n            self.process_proxy = None\n        return super().cleanup(connection_file)\n\n    def cleanup_resources(self, restart: bool = False) -> None:\n        \"\"\"\n        Clean up resources when the kernel is shut down\n        \"\"\"\n\n        # Note This method was introduced in jupyter_client 6.1.5 and\n        # will not be called until jupyter_client 6.2.0 has been released.\n\n        # Note we must use `process_proxy` here rather than `kernel`, although they're the same value.\n        # The reason is because if the kernel shutdown sequence has triggered its \"forced kill\" logic\n        # then that method (jupyter_client/manager.py/_kill_kernel()) will set `self.kernel` to None,\n        # which then prevents process proxy cleanup.\n        if self.process_proxy:\n            self.process_proxy.cleanup()\n            self.process_proxy = None\n\n        return super().cleanup_resources(restart)\n\n    def write_connection_file(self) -> None:\n        \"\"\"\n        Write connection info to JSON dict in self.connection_file if the kernel is local.\n        If this is a remote kernel that's using a response address or we're restarting, we should skip the\n        write_connection_file since it will create 5 useless ports that would not adhere to port-range\n        restrictions if configured.\n        \"\"\"\n        if (\n            isinstance(self.process_proxy, LocalProcessProxy) or not self.response_address\n        ) and not self.restarting:\n            # However, since we *may* want to limit the selected ports, go ahead and get the ports using\n            # the process proxy (will be LocalProcessProxy for default case) since the port selection will\n            # handle the default case when the member ports aren't set anyway.\n            ports = self.process_proxy.select_ports(5)\n            self.shell_port = ports[0]\n            self.iopub_port = ports[1]\n            self.stdin_port = ports[2]\n            self.hb_port = ports[3]\n            self.control_port = ports[4]\n            super().write_connection_file()\n        return None\n\n    def _get_process_proxy(self) -> None:\n        \"\"\"\n        Reads the associated kernelspec and to see if has a process proxy stanza.\n        If one exists, it instantiates an instance.  If a process proxy is not\n        specified in the kernelspec, a LocalProcessProxy stanza is fabricated and\n        instantiated.\n        \"\"\"\n        process_proxy_cfg = get_process_proxy_config(self.kernel_spec)\n        process_proxy_class_name = process_proxy_cfg.get(\"class_name\")\n        self.log.debug(\n            \"Instantiating kernel '{}' with process proxy: {}\".format(\n                self.kernel_spec.display_name, process_proxy_class_name\n            )\n        )\n        process_proxy_class = import_item(process_proxy_class_name)\n        self.process_proxy = process_proxy_class(\n            kernel_manager=self, proxy_config=process_proxy_cfg.get(\"config\")\n        )\n\n    # When this class is used by an EnterpriseGatewayApp instance, it will be able to\n    # access the app's configuration using the traitlet parent chain.\n    # When it's used independently, it should fall back to safe defaults.\n    @property\n    def kernel_session_manager(self) -> KernelSessionManager | None:\n        try:\n            return self.parent.parent.kernel_session_manager\n        except AttributeError:\n            return None\n\n    @property\n    def cull_idle_timeout(self) -> int:\n        try:\n            return self.parent.cull_idle_timeout\n        except AttributeError:\n            return 0\n\n    @property\n    def mapping_kernel_manager(self) -> RemoteMappingKernelManager | None:\n        try:\n            return self.parent\n        except AttributeError:\n            return None\n"
  },
  {
    "path": "enterprise_gateway/services/kernelspecs/__init__.py",
    "content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom .kernelspec_cache import KernelSpecCache  # noqa\n"
  },
  {
    "path": "enterprise_gateway/services/kernelspecs/handlers.py",
    "content": "\"\"\"Tornado handlers for kernel specs.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport json\nfrom typing import Dict, List, Optional\n\nfrom jupyter_server.base.handlers import JupyterHandler\nfrom jupyter_server.services.kernelspecs.handlers import is_kernelspec_model, kernelspec_model\nfrom jupyter_server.utils import ensure_async, url_unescape\nfrom tornado import web\nfrom traitlets import Set\n\nfrom ...base.handlers import APIHandler\nfrom ...mixins import CORSMixin, JSONErrorsMixin, TokenAuthorizationMixin\nfrom .kernelspec_cache import KernelSpecCache\n\n\ndef apply_user_filter(\n    kernelspec_model: Dict[str, object],\n    global_authorized_list: Set,\n    global_unauthorized_list: Set,\n    kernel_user: Optional[str] = None,\n) -> Optional[Dict[str, object]]:\n    \"\"\"\n    If authorization lists are configured - either within the kernelspec or globally, ensure\n    the user is authorized for the given kernelspec.\n    \"\"\"\n    if kernel_user:\n        # Check the unauthorized list of the kernelspec, then the globally-configured unauthorized list - the\n        # semantics of which are a union of the two lists.\n        try:\n            # Check if kernel_user in kernelspec_model\n            unauthorized_list = kernelspec_model[\"spec\"][\"metadata\"][\"process_proxy\"][\"config\"][\n                \"unauthorized_users\"\n            ]\n        except KeyError:\n            pass\n        else:\n            if kernel_user in unauthorized_list:\n                return None\n        if kernel_user in global_unauthorized_list:\n            return None\n\n        # Check the authorized list of the kernelspec, then the globally-configured authorized list -\n        # but only if the kernelspec list doesn't exist.  This is because the kernelspec set of authorized\n        # users may be a subset of globally authorized users and is, essentially, used as a denial to those\n        # not defined in the kernelspec's list.\n        try:\n            authorized_list = kernelspec_model[\"spec\"][\"metadata\"][\"process_proxy\"][\"config\"][\n                \"authorized_users\"\n            ]\n        except KeyError:\n            if global_authorized_list and kernel_user not in global_authorized_list:\n                return None\n        else:\n            if authorized_list and kernel_user not in authorized_list:\n                return None\n\n    return kernelspec_model\n\n\nclass MainKernelSpecHandler(TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, APIHandler):\n    \"\"\"The root kernel spec handler.\"\"\"\n\n    @property\n    def kernel_spec_cache(self) -> KernelSpecCache:\n        return self.settings[\"kernel_spec_cache\"]\n\n    @web.authenticated\n    async def get(self) -> None:\n        \"\"\"Get the kernel spec models.\"\"\"\n        ksc = self.kernel_spec_cache\n        km = self.kernel_manager\n        model = {}\n        model[\"default\"] = km.default_kernel_name\n        model[\"kernelspecs\"] = specs = {}\n\n        kernel_user_filter = self.request.query_arguments.get(\"user\")\n        kernel_user = None\n        if kernel_user_filter:\n            kernel_user = kernel_user_filter[0].decode(\"utf-8\")\n            if kernel_user:\n                self.log.debug(\"Searching kernels for user '%s' \" % kernel_user)\n\n        kspecs = await ensure_async(ksc.get_all_specs())\n\n        list_kernels_found = []\n        for kernel_name, kernel_info in kspecs.items():\n            try:\n                if is_kernelspec_model(kernel_info):\n                    d = kernel_info\n                else:\n                    d = kernelspec_model(\n                        self, kernel_name, kernel_info[\"spec\"], kernel_info[\"resource_dir\"]\n                    )\n                d = apply_user_filter(\n                    d,\n                    self.settings[\"eg_authorized_users\"],\n                    self.settings[\"eg_unauthorized_users\"],\n                    kernel_user,\n                )\n                if d is not None:\n                    specs[kernel_name] = d\n                    list_kernels_found.append(d[\"name\"])\n                else:\n                    self.log.debug(\n                        f\"User {kernel_user} is not authorized to use kernel spec {kernel_name}\"\n                    )\n            except Exception:\n                self.log.error(\"Failed to load kernel spec: '%s'\", kernel_name)\n                continue\n\n        self.set_header(\"Content-Type\", \"application/json\")\n        self.finish(json.dumps(model))\n\n\nclass KernelSpecHandler(TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, APIHandler):\n    \"\"\"A handler for a specific kernel spec.\"\"\"\n\n    @property\n    def kernel_spec_cache(self) -> KernelSpecCache:\n        return self.settings[\"kernel_spec_cache\"]\n\n    @web.authenticated\n    async def get(self, kernel_name: str) -> None:\n        \"\"\"Get a kernel spec by name.\"\"\"\n        ksc = self.kernel_spec_cache\n        kernel_name = url_unescape(kernel_name)\n        kernel_user_filter = self.request.query_arguments.get(\"user\")\n        kernel_user = None\n        if kernel_user_filter:\n            kernel_user = kernel_user_filter[0].decode(\"utf-8\")\n        try:\n            spec = await ensure_async(ksc.get_kernel_spec(kernel_name))\n        except KeyError:\n            raise web.HTTPError(404, \"Kernel spec %s not found\" % kernel_name) from None\n        if is_kernelspec_model(spec):\n            model = spec\n        else:\n            model = kernelspec_model(self, kernel_name, spec.to_dict(), spec.resource_dir)\n        d = apply_user_filter(\n            model,\n            self.settings[\"eg_authorized_users\"],\n            self.settings[\"eg_unauthorized_users\"],\n            kernel_user,\n        )\n\n        if d is None:\n            raise web.HTTPError(\n                403, f\"User {kernel_user} is not authorized to use kernel spec {kernel_name}\"\n            )\n\n        self.set_header(\"Content-Type\", \"application/json\")\n        self.finish(json.dumps(model))\n\n\nclass KernelSpecResourceHandler(\n    TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, web.StaticFileHandler, JupyterHandler\n):\n    \"\"\"A handler for kernel spec resources.\"\"\"\n\n    SUPPORTED_METHODS = (\"GET\", \"HEAD\")\n\n    @property\n    def kernel_spec_cache(self) -> KernelSpecCache:\n        return self.settings[\"kernel_spec_cache\"]\n\n    def initialize(self) -> None:\n        \"\"\"Initialize the handler.\"\"\"\n        web.StaticFileHandler.initialize(self, path=\"\")\n\n    @web.authenticated\n    async def get(self, kernel_name: str, path: str, include_body: bool = True) -> None:\n        \"\"\"Get a resource for a kernel.\"\"\"\n        ksc = self.kernel_spec_cache\n        try:\n            kernelspec = await ensure_async(ksc.get_kernel_spec(kernel_name))\n            self.root = kernelspec.resource_dir\n        except KeyError as e:\n            raise web.HTTPError(404, \"Kernel spec %s not found\" % kernel_name) from e\n        self.log.debug(\"Serving kernel resource from: %s\", self.root)\n        return await web.StaticFileHandler.get(self, path, include_body=include_body)\n\n    @web.authenticated\n    def head(self, kernel_name: str, path: str) -> None:\n        \"\"\"Get the head for a kernel resource.\"\"\"\n        return self.get(kernel_name, path, include_body=False)\n\n\nkernel_name_regex: str = r\"(?P<kernel_name>[\\w\\.\\-%]+)\"\n\n# Extends the default handlers from the jupyter_server package with token auth, CORS\n# and JSON errors.\ndefault_handlers: List[tuple] = [\n    (r\"/api/kernelspecs\", MainKernelSpecHandler),\n    (r\"/api/kernelspecs/%s\" % kernel_name_regex, KernelSpecHandler),\n    (r\"/kernelspecs/%s/(?P<path>.*)\" % kernel_name_regex, KernelSpecResourceHandler),\n]\n"
  },
  {
    "path": "enterprise_gateway/services/kernelspecs/kernelspec_cache.py",
    "content": "\"\"\"Cache handling for kernel specs.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n\nimport os\nfrom typing import ClassVar, Dict, Optional, Union\n\nfrom jupyter_client.kernelspec import KernelSpec\nfrom jupyter_server.utils import ensure_async\nfrom traitlets.config import SingletonConfigurable\nfrom traitlets.traitlets import CBool, default\nfrom watchdog.events import FileMovedEvent, FileSystemEventHandler\nfrom watchdog.observers import Observer\n\n# Simplify the typing.  Cache items are essentially dictionaries of strings\n# to either strings or dictionaries.  The items themselves are indexed by\n# the kernel_name (case-insensitive).\nCacheItemType = Dict[str, Union[str, Dict]]\n\n\nclass KernelSpecCache(SingletonConfigurable):\n    \"\"\"The primary (singleton) instance for managing KernelSpecs.\n\n    This class contains the configured KernelSpecManager instance upon\n    which it uses to populate the cache (when enabled) or as a pass-thru\n    (when disabled).\n\n    Note that the KernelSpecManager returns different formats from methods\n    get_all_specs() and get_kernel_spec().  The format in which cache entries\n    are stored is that of the get_all_specs() results.  As a result, some\n    conversion between formats is necessary, depending on which method is called.\n    \"\"\"\n\n    cache_enabled_env = \"EG_KERNELSPEC_CACHE_ENABLED\"\n    cache_enabled = CBool(\n        True,\n        config=True,\n        help=\"\"\"Enable Kernel Specification caching. (EG_KERNELSPEC_CACHE_ENABLED env var)\"\"\",\n    )\n\n    @default(\"cache_enabled\")\n    def _cache_enabled_default(self):\n        return os.getenv(self.cache_enabled_env, \"false\").lower() in (\"true\", \"1\")\n\n    def __init__(self, kernel_spec_manager, **kwargs) -> None:\n        \"\"\"Initialize the cache.\"\"\"\n        super().__init__(**kwargs)\n        self.kernel_spec_manager = kernel_spec_manager\n        self._initialize()\n\n    async def get_kernel_spec(self, kernel_name: str) -> KernelSpec:\n        \"\"\"Get the named kernel specification.\n\n        This method is equivalent to calling KernelSpecManager.get_kernel_spec().  If\n        caching is enabled, it will pull the item from the cache.  If no item is\n        returned (as will be the case if caching is disabled) it will defer to the\n        currently configured KernelSpecManager.  If an item is returned (and caching\n        is enabled), it will be added to the cache.\n        \"\"\"\n        kernelspec = self.get_item(kernel_name)\n        if not kernelspec:\n            kernelspec = await ensure_async(self.kernel_spec_manager.get_kernel_spec(kernel_name))\n            if kernelspec:\n                self.put_item(kernel_name, kernelspec)\n        return kernelspec\n\n    async def get_all_specs(self) -> Dict[str, CacheItemType]:\n        \"\"\"Get all available kernel specifications.\n\n        This method is equivalent to calling KernelSpecManager.get_all_specs().  If\n        caching is enabled, it will pull all items from the cache.  If no items are\n        returned (as will be the case if caching is disabled) it will defer to the\n        currently configured KernelSpecManager.  If items are returned (and caching\n        is enabled), they will be added to the cache.\n\n        Note that the return type of this method is not a dictionary or list of\n        KernelSpec instances, but rather a dictionary of kernel-name to kernel-info\n        dictionaries are returned - as is the case with the respective return values\n        of the KernelSpecManager methods.\n        \"\"\"\n        kernelspecs = self.get_all_items()\n        if not kernelspecs:\n            kernelspecs = await ensure_async(self.kernel_spec_manager.get_all_specs())\n            if kernelspecs:\n                self.put_all_items(kernelspecs)\n        return kernelspecs\n\n    # Cache-related methods\n    def get_item(self, kernel_name: str) -> Optional[KernelSpec]:\n        \"\"\"Retrieves a named kernel specification from the cache.\n\n        If cache is disabled or the item is not in the cache, None is returned;\n        otherwise, a KernelSpec instance of the item is returned.\n        \"\"\"\n        kernelspec = None\n        if self.cache_enabled:\n            cache_item = self.cache_items.get(kernel_name.lower())\n            if cache_item:  # Convert to KernelSpec\n                # In certain conditions, like when the kernelspec is fetched prior to its removal from the cache,\n                # we can encounter a FileNotFoundError.  In those cases, treat as a cache miss as well.\n                try:\n                    kernelspec = KernelSpecCache.cache_item_to_kernel_spec(cache_item)\n                except FileNotFoundError:\n                    pass\n            if not kernelspec:\n                self.cache_misses += 1\n                self.log.debug(f\"Cache miss ({self.cache_misses}) for kernelspec: {kernel_name}\")\n        return kernelspec\n\n    def get_all_items(self) -> Dict[str, CacheItemType]:\n        \"\"\"Retrieves all kernel specification from the cache.\n\n        If cache is disabled or no items are in the cache, an empty dictionary is returned;\n        otherwise, a dictionary of kernel-name to specifications (kernel infos) are returned.\n        \"\"\"\n        items = {}\n        if self.cache_enabled:\n            for kernel_name in self.cache_items:\n                cache_item = self.cache_items.get(kernel_name)\n                items[kernel_name] = cache_item\n            if not items:\n                self.cache_misses += 1\n        return items\n\n    def put_item(self, kernel_name: str, cache_item: Union[KernelSpec, CacheItemType]) -> None:\n        \"\"\"Adds or updates a kernel specification in the cache.\n\n        This method can take either a KernelSpec (if called directly from the `get_kernel_spec()`\n        method, or a CacheItemItem (if called from a cache-related method) as that is the type\n        in which the cache items are stored.\n\n        If it determines the cache entry corresponds to a currently unwatched directory,\n        that directory will be added to list of observed directories and scheduled accordingly.\n        \"\"\"\n        if self.cache_enabled:\n            self.log.info(f\"KernelSpecCache: adding/updating kernelspec: {kernel_name}\")\n            if type(cache_item) is KernelSpec:\n                cache_item = KernelSpecCache.kernel_spec_to_cache_item(cache_item)\n\n            resource_dir = cache_item[\"resource_dir\"]\n            self.cache_items[kernel_name.lower()] = cache_item\n            observed_dir = os.path.dirname(resource_dir)\n            if observed_dir not in self.observed_dirs:\n                # New directory to watch, schedule it...\n                self.log.debug(f\"KernelSpecCache: observing directory: {observed_dir}\")\n                self.observed_dirs.add(observed_dir)\n                self.observer.schedule(KernelSpecChangeHandler(self), observed_dir, recursive=True)\n\n    def put_all_items(self, kernelspecs: Dict[str, CacheItemType]) -> None:\n        \"\"\"Adds or updates a dictionary of kernel specification in the cache.\"\"\"\n        for kernel_name, cache_item in kernelspecs.items():\n            self.put_item(kernel_name, cache_item)\n\n    def remove_item(self, kernel_name: str) -> Optional[CacheItemType]:\n        \"\"\"Removes the cache item corresponding to kernel_name from the cache.\"\"\"\n        cache_item = None\n        if self.cache_enabled and kernel_name.lower() in self.cache_items:\n            cache_item = self.cache_items.pop(kernel_name.lower())\n            self.log.info(f\"KernelSpecCache: removed kernelspec: {kernel_name}\")\n        return cache_item\n\n    def _initialize(self):\n        \"\"\"Initializes the cache and starts the observer.\"\"\"\n\n        # The kernelspec cache consists of a dictionary mapping the kernel name to the actual\n        # kernelspec data (CacheItemType).\n        self.cache_items = {}  # Maps kernel name to kernelspec\n        self.observed_dirs = set()  # Tracks which directories are being watched\n        self.cache_misses = 0\n\n        # Seed the cache and start the observer\n        if self.cache_enabled:\n            self.observer = Observer()\n            kernelspecs = self.kernel_spec_manager.get_all_specs()\n            self.put_all_items(kernelspecs)\n            # Following adds, see if any of the manager's kernel dirs are not observed and add them\n            for kernel_dir in self.kernel_spec_manager.kernel_dirs:\n                if kernel_dir not in self.observed_dirs:\n                    if os.path.exists(kernel_dir):\n                        self.log.info(f\"KernelSpecCache: observing directory: {kernel_dir}\")\n                        self.observed_dirs.add(kernel_dir)\n                        self.observer.schedule(\n                            KernelSpecChangeHandler(self), kernel_dir, recursive=True\n                        )\n                    else:\n                        self.log.warning(\n                            f\"KernelSpecCache: kernel_dir '{kernel_dir}' does not exist\"\n                            \" and will not be observed.\"\n                        )\n            self.observer.start()\n\n    @staticmethod\n    def kernel_spec_to_cache_item(kernelspec: KernelSpec) -> CacheItemType:\n        \"\"\"Converts a KernelSpec instance to a CacheItemType for storage into the cache.\"\"\"\n        cache_item = {}\n        cache_item[\"spec\"] = kernelspec.to_dict()\n        cache_item[\"resource_dir\"] = kernelspec.resource_dir\n        return cache_item\n\n    @staticmethod\n    def cache_item_to_kernel_spec(cache_item: CacheItemType) -> KernelSpec:\n        \"\"\"Converts a CacheItemType to a KernelSpec instance for user consumption.\"\"\"\n        kernel_spec = KernelSpec(resource_dir=cache_item[\"resource_dir\"], **cache_item[\"spec\"])\n        return kernel_spec\n\n\nclass KernelSpecChangeHandler(FileSystemEventHandler):\n    \"\"\"Watchdog handler that filters on specific files deemed representative of a kernel specification.\"\"\"\n\n    # Events related to these files trigger the management of the KernelSpec cache.  Should we find\n    # other files qualify as indicators of a kernel specification's state (like perhaps detached parameter\n    # files in the future) should be added to this list - at which time it should become configurable.\n    watched_files: ClassVar = [\"kernel.json\"]\n\n    def __init__(self, kernel_spec_cache: KernelSpecCache, **kwargs):\n        \"\"\"Initialize the handler.\"\"\"\n        super().__init__(**kwargs)\n        self.kernel_spec_cache = kernel_spec_cache\n        self.log = kernel_spec_cache.log\n\n    def dispatch(self, event):\n        \"\"\"Dispatches events pertaining to kernelspecs to the appropriate methods.\n\n\n        The primary purpose of this method is to ensure the action is occurring against\n        the a file in the list of watched files and adds some additional attributes to\n        the event instance to make the actual event handling method easier.\n        :param event:\n            The event object representing the file system event.\n        :type event:\n            :class:`FileSystemEvent`\n        \"\"\"\n        if os.path.basename(event.src_path) in self.watched_files:\n            src_resource_dir = os.path.dirname(event.src_path)\n            event.src_resource_dir = src_resource_dir\n            event.src_kernel_name = os.path.basename(src_resource_dir)\n            if type(event) is FileMovedEvent:\n                dest_resource_dir = os.path.dirname(event.dest_path)\n                event.dest_resource_dir = dest_resource_dir\n                event.dest_kernel_name = os.path.basename(dest_resource_dir)\n\n            super().dispatch(event)\n\n    def on_created(self, event):\n        \"\"\"Fires when a watched file is created.\n\n        This will trigger a call to the configured KernelSpecManager to fetch the instance\n        associated with the created file, which is then added to the cache.\n        \"\"\"\n        kernel_name = event.src_kernel_name\n        try:\n            kernelspec = self.kernel_spec_cache.kernel_spec_manager.get_kernel_spec(kernel_name)\n            self.kernel_spec_cache.put_item(kernel_name, kernelspec)\n        except Exception as e:\n            self.log.warning(\n                \"The following exception occurred creating cache entry for: {src_resource_dir} \"\n                \"- continuing...  ({e})\".format(src_resource_dir=event.src_resource_dir, e=e)\n            )\n\n    def on_deleted(self, event):\n        \"\"\"Fires when a watched file is deleted, triggering a removal of the corresponding item from the cache.\"\"\"\n        kernel_name = event.src_kernel_name\n        self.kernel_spec_cache.remove_item(kernel_name)\n\n    def on_modified(self, event):\n        \"\"\"Fires when a watched file is modified.\n\n        This will trigger a call to the configured KernelSpecManager to fetch the instance\n        associated with the modified file, which is then replaced in the cache.\n        \"\"\"\n        kernel_name = event.src_kernel_name\n        try:\n            kernelspec = self.kernel_spec_cache.kernel_spec_manager.get_kernel_spec(kernel_name)\n            self.kernel_spec_cache.put_item(kernel_name, kernelspec)\n        except Exception as e:\n            self.log.warning(\n                \"The following exception occurred updating cache entry for: {src_resource_dir} \"\n                \"- continuing...  ({e})\".format(src_resource_dir=event.src_resource_dir, e=e)\n            )\n\n    def on_moved(self, event):\n        \"\"\"Fires when a watched file is moved.\n\n        This will trigger the update of the existing cached item, replacing its resource_dir entry\n        with that of the new destination.\n        \"\"\"\n        src_kernel_name = event.src_kernel_name\n        dest_kernel_name = event.dest_kernel_name\n        cache_item = self.kernel_spec_cache.remove_item(src_kernel_name)\n        cache_item[\"resource_dir\"] = event.dest_resource_dir\n        self.kernel_spec_cache.put_item(dest_kernel_name, cache_item)\n"
  },
  {
    "path": "enterprise_gateway/services/processproxies/__init__.py",
    "content": ""
  },
  {
    "path": "enterprise_gateway/services/processproxies/conductor.py",
    "content": "\"\"\"Code related to managing kernels running in Conductor clusters.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import annotations\n\nimport asyncio\nimport json\nimport os\nimport re\nimport signal\nimport socket\nimport subprocess\nimport time\nfrom random import randint\nfrom typing import Any, ClassVar\n\nfrom jupyter_client import localinterfaces\nfrom jupyter_server.utils import url_unescape\n\nfrom ..kernels.remotemanager import RemoteKernelManager\nfrom .processproxy import RemoteProcessProxy\n\npjoin = os.path.join\nlocal_ip = localinterfaces.public_ips()[0]\npoll_interval = float(os.getenv(\"EG_POLL_INTERVAL\", \"0.5\"))\nmax_poll_attempts = int(os.getenv(\"EG_MAX_POLL_ATTEMPTS\", \"10\"))\n\n\nclass ConductorClusterProcessProxy(RemoteProcessProxy):\n    \"\"\"\n    Kernel lifecycle management for Conductor clusters.\n    \"\"\"\n\n    initial_states: ClassVar = {\"SUBMITTED\", \"WAITING\", \"RUNNING\"}\n    final_states: ClassVar = {\"FINISHED\", \"KILLED\", \"RECLAIMED\"}  # Don't include FAILED state\n\n    def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict):\n        \"\"\"Initialize the proxy.\"\"\"\n        super().__init__(kernel_manager, proxy_config)\n        self.application_id = None\n        self.driver_id = None\n        self.env = None\n        self.rest_credential = None\n        self.jwt_token = None\n        self.conductor_endpoint = proxy_config.get(\n            \"conductor_endpoint\", kernel_manager.conductor_endpoint\n        )\n        self.ascd_endpoint = self.conductor_endpoint\n\n    async def launch_process(\n        self, kernel_cmd: str, **kwargs: dict[str, Any] | None\n    ) -> ConductorClusterProcessProxy:\n        \"\"\"\n        Launches the specified process within a Conductor cluster environment.\n        \"\"\"\n        await super().launch_process(kernel_cmd, **kwargs)\n\n        self.env = kwargs.get(\"env\")\n        self.kernel_headers = kwargs.get(\"kernel_headers\")\n\n        # Get Conductor cred from process env\n        env_dict = dict(os.environ.copy())\n        if env_dict and \"EGO_SERVICE_CREDENTIAL\" in env_dict:\n            self.rest_credential = env_dict[\"EGO_SERVICE_CREDENTIAL\"]\n        elif self.kernel_headers and \"Jwt-Auth-User-Payload\" in self.kernel_headers:\n            kwargs.get(\"env\")[\"KERNEL_NOTEBOOK_COOKIE_JAR\"] = \"kernelcookie\" + str(randint(0, 1000))\n            jsonKH = json.loads(self.kernel_headers[\"Jwt-Auth-User-Payload\"])\n            self.jwt_token = jsonKH[\"accessToken\"]\n            await asyncio.get_event_loop().run_in_executor(\n                None, self._performConductorJWTLogonAndRetrieval, self.jwt_token, kwargs.get(\"env\")\n            )\n        else:\n            error_message = (\n                \"ConductorClusterProcessProxy failed to obtain the Conductor credential.\"\n            )\n            self.log_and_raise(http_status_code=500, reason=error_message)\n\n        # dynamically update Spark submit parameters\n        await asyncio.get_event_loop().run_in_executor(\n            None, self._update_launch_info, kernel_cmd, kwargs.get(\"env\")\n        )\n        # Enable stderr PIPE for the run command\n        kwargs.update({\"stderr\": subprocess.PIPE})\n        self.local_proc = self.launch_kernel(kernel_cmd, **kwargs)\n        self.pid = self.local_proc.pid\n        self.ip = local_ip\n\n        self.log.debug(\n            \"Conductor cluster kernel launched using Conductor endpoint: {}, pid: {}, Kernel ID: {}, \"\n            \"cmd: '{}'\".format(\n                self.conductor_endpoint, self.local_proc.pid, self.kernel_id, kernel_cmd\n            )\n        )\n        await self.confirm_remote_startup()\n        return self\n\n    def _update_launch_info(self, kernel_cmd: list[str], env_dict: dict) -> None:\n        \"\"\"\n        Dynamically assemble the spark-submit configuration passed from NB2KG.\n        \"\"\"\n        if any(arg.endswith(\".sh\") for arg in kernel_cmd):\n            self.log.debug(\"kernel_cmd contains execution script\")\n        else:\n            kernel_dir = self.kernel_manager.kernel_spec_manager._find_spec_directory(\n                self.kernel_manager.kernel_name\n            )\n            cmd = pjoin(kernel_dir, \"bin/run.sh\")\n            kernel_cmd.insert(0, cmd)\n\n        # add SPARK_HOME, PYSPARK_PYTHON, update SPARK_OPT to contain SPARK_MASTER and EGO_SERVICE_CREDENTIAL\n        env_dict[\"SPARK_HOME\"] = env_dict[\"KERNEL_SPARK_HOME\"]\n        env_dict[\"PYSPARK_PYTHON\"] = env_dict[\"KERNEL_PYSPARK_PYTHON\"]\n        # add KERNEL_SPARK_OPTS to append user configured Spark configuration\n        user_defined_spark_opts = \"\"\n        if \"KERNEL_SPARK_OPTS\" in env_dict:\n            user_defined_spark_opts = env_dict[\"KERNEL_SPARK_OPTS\"]\n\n        # Get updated one_notebook_master_rest_url for KERNEL_NOTEBOOK_MASTER_REST and SPARK_OPTS.\n        if self.jwt_token is None:\n            self._update_notebook_master_rest_url(env_dict)\n\n        if \"--master\" not in env_dict[\"SPARK_OPTS\"]:\n            env_dict[\"SPARK_OPTS\"] = (\n                \"--master {master} --conf spark.ego.credential={rest_cred} \"\n                \"--conf spark.pyspark.python={pyspark_python} {spark_opts} \"\n                \"{user_defined_spark_opts}\".format(\n                    master=env_dict[\"KERNEL_NOTEBOOK_MASTER_REST\"],\n                    rest_cred=\"'\" + self.rest_credential + \"'\",\n                    pyspark_python=env_dict[\"PYSPARK_PYTHON\"],\n                    spark_opts=env_dict[\"SPARK_OPTS\"],\n                    user_defined_spark_opts=user_defined_spark_opts,\n                )\n            )\n\n    def _update_notebook_master_rest_url(self, env_dict: dict) -> None:\n        \"\"\"\n        Updates the notebook master rest url to update KERNEL_NOTEBOOK_MASTER_REST,\n        conductor_endpoint, and SPARK_OPTS.\n        \"\"\"\n\n        self.log.debug(\"Updating notebook master rest urls.\")\n        response = None\n        # Assemble REST call\n        header = \"Accept: application/json\"\n        authorization = \"Authorization: %s\" % self.rest_credential\n        if (\n            \"KERNEL_NOTEBOOK_DATA_DIR\" not in env_dict\n            or \"KERNEL_NOTEBOOK_COOKIE_JAR\" not in env_dict\n            or \"KERNEL_CURL_SECURITY_OPT\" not in env_dict\n        ):\n            self.log.warning(\n                \"Could not find KERNEL environment variables. Not updating notebook master rest url.\"\n            )\n            return\n        if (\n            \"CONDUCTOR_REST_URL\" not in env_dict\n            or \"KERNEL_SIG_ID\" not in env_dict\n            or \"KERNEL_NOTEBOOK_MASTER_REST\" not in env_dict\n        ):\n            self.log.warning(\n                \"Could not find CONDUCTOR_REST_URL or KERNEL_SIG_ID or KERNEL_NOTEBOOK_MASTER_REST. \"\n                \"Not updating notebook master rest url.\"\n            )\n            return\n\n        cookie_jar = pjoin(\n            env_dict[\"KERNEL_NOTEBOOK_DATA_DIR\"], env_dict[\"KERNEL_NOTEBOOK_COOKIE_JAR\"]\n        )\n        sslconf = env_dict[\"KERNEL_CURL_SECURITY_OPT\"].split()\n        ascd_rest_url = env_dict[\"CONDUCTOR_REST_URL\"]\n        ig_id = env_dict[\"KERNEL_SIG_ID\"]\n        url = f\"{ascd_rest_url}conductor/v1/instances?id={ig_id}&fields=outputs\"\n        cmd = [\"curl\", \"-v\", \"-b\", cookie_jar, \"-X\", \"GET\", \"-H\", header, \"-H\", authorization, url]\n        cmd[2:2] = sslconf\n        # Perform REST call\n        try:\n            process = subprocess.Popen(\n                cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True\n            )\n            output, stderr = process.communicate()\n            response = json.loads(output) if output else None\n            if (\n                response is None\n                or len(response) < 1\n                or not response[0]\n                or not response[0][\"outputs\"]\n            ):\n                response = None\n        except Exception as e:\n            self.log.warning(\n                f\"Getting instance group with cmd '{cmd}' failed with exception: '{e}'.  Continuing...\"\n            )\n            return\n\n        outputs = response[0][\"outputs\"]\n\n        if (\n            \"one_notebook_master_rest_url\" not in outputs\n            or not outputs[\"one_notebook_master_rest_url\"]\n            or \"value\" not in outputs[\"one_notebook_master_rest_url\"]\n            or not outputs[\"one_notebook_master_rest_url\"][\"value\"]\n        ):\n            self.log.warning(\n                \"Could not get one_notebook_master_rest_url from instance group. \"\n                \"Not updating notebook master rest url.\"\n            )\n            return\n        if (\n            \"one_notebook_master_web_submission_url\" not in outputs\n            or not outputs[\"one_notebook_master_web_submission_url\"]\n            or \"value\" not in outputs[\"one_notebook_master_web_submission_url\"]\n            or not outputs[\"one_notebook_master_web_submission_url\"][\"value\"]\n        ):\n            self.log.warning(\n                \"Could not get one_notebook_master_web_submission_url from instance group. \"\n                \"Not updating notebook master rest url.\"\n            )\n            return\n\n        updated_one_notebook_master_rest_url = outputs[\"one_notebook_master_rest_url\"][\"value\"]\n        updated_one_notebook_master_web_submission_url = outputs[\n            \"one_notebook_master_web_submission_url\"\n        ][\"value\"]\n\n        if updated_one_notebook_master_rest_url and updated_one_notebook_master_web_submission_url:\n            self.log.debug(\n                f\"Updating KERNEL_NOTEBOOK_MASTER_REST to '{updated_one_notebook_master_rest_url}'.\"\n            )\n            os.environ[\"KERNEL_NOTEBOOK_MASTER_REST\"] = updated_one_notebook_master_rest_url\n            env_dict[\"KERNEL_NOTEBOOK_MASTER_REST\"] = updated_one_notebook_master_rest_url\n            self.conductor_endpoint = updated_one_notebook_master_web_submission_url\n\n    def poll(self) -> bool | None:\n        \"\"\"\n        Submitting a new kernel/app will take a while to be SUBMITTED.\n        Thus application ID will probably not be available immediately for poll.\n        So will regard the application as RUNNING when application ID still in SUBMITTED/WAITING/RUNNING state.\n        :return: None if the application's ID is available and state is SUBMITTED/WAITING/RUNNING. Otherwise False.\n        \"\"\"\n        result = False\n\n        if self._get_application_id():\n            state = self._query_app_state_by_driver_id(self.driver_id)\n            if state in ConductorClusterProcessProxy.initial_states:\n                result = None\n        return result\n\n    def send_signal(self, signum: int) -> bool | None:\n        \"\"\"\n        Currently only support 0 as poll and other as kill.\n        :param signum\n        :return: None if signal was successfully sent to kernel, False if an exception was thrown\n        \"\"\"\n        self.log.debug(f\"ConductorClusterProcessProxy.send_signal {signum}\")\n        if signum == 0:\n            return self.poll()\n        elif signum == signal.SIGKILL:\n            return self.kill()\n        else:\n            return super().send_signal(signum)\n\n    def kill(self) -> bool | None:\n        \"\"\"\n        Kill a kernel.\n        :return: None if the application existed and is not in RUNNING state, False otherwise.\n        \"\"\"\n        state = None\n        result = False\n        if self.driver_id:\n            resp = self._kill_app_by_driver_id(self.driver_id)\n            self.log.debug(\n                \"ConductorClusterProcessProxy.kill: kill_app_by_driver_id({}) response: {}, confirming \"\n                \"app state is not RUNNING\".format(self.driver_id, resp)\n            )\n            i = 1\n            state = self._query_app_state_by_driver_id(self.driver_id)\n            while state not in ConductorClusterProcessProxy.final_states and i <= max_poll_attempts:\n                time.sleep(poll_interval)\n                state = self._query_app_state_by_driver_id(self.driver_id)\n                i = i + 1\n\n            if state in ConductorClusterProcessProxy.final_states:\n                result = None\n\n        super().kill()\n\n        self.log.debug(\n            \"ConductorClusterProcessProxy.kill, application ID: {}, kernel ID: {}, state: {}\".format(\n                self.application_id, self.kernel_id, state\n            )\n        )\n        return result\n\n    def cleanup(self) -> None:\n        \"\"\"Clean up the kernel.\"\"\"\n        # we might have a defunct process (if using waitAppCompletion = false) - so poll, kill, wait when we have\n        # a local_proc.\n        if self.local_proc:\n            self.log.debug(\n                \"ConductorClusterProcessProxy.cleanup: Clearing possible defunct process, pid={}...\".format(\n                    self.local_proc.pid\n                )\n            )\n            if super().poll():\n                super().kill()\n            super().wait()\n            self.local_proc = None\n\n        # reset application id to force new query - handles kernel restarts/interrupts\n        self.application_id = None\n\n        # for cleanup, we should call the superclass last\n        super().cleanup()\n\n    def _parse_driver_submission_id(self, submission_response: str) -> None:\n        \"\"\"\n        Parse driver id from stderr gotten back from launch_kernel\n        :param submission_response\n        \"\"\"\n        if submission_response:\n            self.log.debug(f\"Submission Response: {submission_response}\\n\")\n            matched_lines = [\n                line for line in submission_response.split(\"\\n\") if \"submissionId\" in line\n            ]\n            if matched_lines and len(matched_lines) > 0:\n                driver_info = matched_lines[0]\n                self.log.debug(f\"Driver Info: {driver_info}\")\n                driver_id = driver_info.split(\":\")[1]\n                driver_id = re.findall(r'\"([^\"]*)\"', driver_id)\n                if driver_id and len(driver_id) > 0:\n                    self.driver_id = driver_id[0]\n                    self.log.debug(f\"Driver ID: {driver_id[0]}\")\n            # Handle Checking for submission error to report\n            err_lines = [\n                line\n                for line in submission_response.split(\"\\n\")\n                if \"Application submission failed\" in line\n            ]\n            if err_lines and len(err_lines) > 0:\n                self.log_and_raise(\n                    http_status_code=500,\n                    reason=err_lines[0][err_lines[0].find(\"Application submission failed\") :],\n                )\n\n    async def confirm_remote_startup(self) -> None:\n        \"\"\"\n        Confirms the application is in a started state before returning.  Should post-RUNNING states be\n        unexpectedly encountered ('FINISHED', 'KILLED', 'RECLAIMED') then we must throw, otherwise the rest\n        of the gateway will believe its talking to a valid kernel.\n        \"\"\"\n        self.start_time = RemoteProcessProxy.get_current_time()\n        i = 0\n        ready_to_connect = False  # we're ready to connect when we have a connection file to use\n        while not ready_to_connect:\n            if self.local_proc.stderr:\n                # Read stderr after the launch_kernel, and parse the driver id from the REST response\n                output = self.local_proc.stderr.read().decode(\"utf-8\")\n                self._parse_driver_submission_id(output)\n            i += 1\n            await self.handle_timeout()\n\n            if self._get_application_id(True):\n                # Once we have an application ID, start monitoring state, obtain assigned host and get connection info\n                app_state = self._get_application_state()\n\n                if app_state in ConductorClusterProcessProxy.final_states:\n                    error_message = (\n                        \"KernelID: '{}', ApplicationID: '{}' unexpectedly found in state '{}' \"\n                        \"during kernel startup!\".format(\n                            self.kernel_id, self.application_id, app_state\n                        )\n                    )\n                    self.log_and_raise(http_status_code=500, reason=error_message)\n\n                self.log.debug(\n                    \"{}: State: '{}', Host: '{}', KernelID: '{}', ApplicationID: '{}'\".format(\n                        i, app_state, self.assigned_host, self.kernel_id, self.application_id\n                    )\n                )\n\n                if self.assigned_host:\n                    ready_to_connect = await self.receive_connection_info()\n            else:\n                self.detect_launch_failure()\n\n    def _get_application_state(self) -> str:\n        \"\"\"\n        Gets the current application state using the application_id already obtained.  Once the assigned host\n        has been identified, it is no longer accessed.\n        \"\"\"\n        app_state = None\n        apps = self._query_app_by_driver_id(self.driver_id)\n\n        if apps:\n            for app in apps:\n                if \"state\" in app:\n                    app_state = app[\"state\"]\n                if not self.assigned_host and app[\"driver\"]:\n                    self.assigned_host = app[\"driver\"][\"host\"]\n                    # Set the driver host to the actual host where the application landed.\n                    self.assigned_ip = socket.gethostbyname(self.assigned_host)\n        return app_state\n\n    async def handle_timeout(self) -> None:\n        \"\"\"\n        Checks to see if the kernel launch timeout has been exceeded while awaiting connection info.\n        \"\"\"\n        await asyncio.sleep(poll_interval)\n        time_interval = RemoteProcessProxy.get_time_diff(\n            self.start_time, RemoteProcessProxy.get_current_time()\n        )\n\n        if time_interval > self.kernel_launch_timeout:\n            reason = f\"Application failed to start within {self.kernel_launch_timeout} seconds.\"\n            error_http_code = 500\n            if self._get_application_id(True):\n                if self._query_app_state_by_driver_id(self.driver_id) != \"WAITING\":\n                    reason = \"Kernel unavailable after {} seconds for driver_id {}, app_id {}, launch timeout: {}!\"\n                    reason = reason.format(\n                        time_interval,\n                        self.driver_id,\n                        self.application_id,\n                        self.kernel_launch_timeout,\n                    )\n                    error_http_code = 503\n                else:\n                    reason = \"App {} is WAITING, but waited too long ({} secs) to get connection file\".format(\n                        self.application_id, self.kernel_launch_timeout\n                    )\n            await asyncio.get_event_loop().run_in_executor(None, self.kill)\n            timeout_message = f\"KernelID: '{self.kernel_id}' launch timeout due to: {reason}\"\n            self.log_and_raise(http_status_code=error_http_code, reason=timeout_message)\n\n    def _get_application_id(self, ignore_final_states: bool = False) -> str:\n        \"\"\"\n        Return the kernel's application ID if available, otherwise None.  If we're obtaining application_id\n        from scratch, do not consider kernels in final states.\n        \"\"\"\n        if not self.application_id:\n            apps = self._query_app_by_driver_id(self.driver_id)\n            state_condition = True\n            if apps:\n                for app in apps:\n                    if \"state\" in app and ignore_final_states:\n                        state_condition = (\n                            app[\"state\"] not in ConductorClusterProcessProxy.final_states\n                        )\n                    if \"applicationid\" in app and len(app[\"applicationid\"]) > 0 and state_condition:\n                        self.application_id = app[\"applicationid\"]\n                        time_interval = RemoteProcessProxy.get_time_diff(\n                            self.start_time, RemoteProcessProxy.get_current_time()\n                        )\n                        self.log.info(\n                            \"ApplicationID: '{}' assigned for KernelID: '{}', state: {}, \"\n                            \"{} seconds after starting.\".format(\n                                app[\"applicationid\"], self.kernel_id, app[\"state\"], time_interval\n                            )\n                        )\n                    else:\n                        self.log.debug(\n                            \"ApplicationID not yet assigned for KernelID: '{}' - retrying...\".format(\n                                self.kernel_id\n                            )\n                        )\n            else:\n                self.log.debug(\n                    f\"ApplicationID not yet assigned for KernelID: '{self.kernel_id}' - retrying...\"\n                )\n        return self.application_id\n\n    def get_process_info(self) -> dict[str, Any]:\n        \"\"\"\n        Captures the base information necessary for kernel persistence relative to Conductor clusters.\n        \"\"\"\n        process_info = super().get_process_info()\n        process_info.update({\"application_id\": self.application_id})\n        process_info.update({\"rest_credential\": self.rest_credential})\n        return process_info\n\n    def load_process_info(self, process_info: dict[str, Any]) -> None:\n        \"\"\"\n        Captures the base information necessary for kernel persistence relative to Conductor clusters.\n        \"\"\"\n        super().load_process_info(process_info)\n        self.application_id = process_info[\"application_id\"]\n        self.rest_credential = process_info[\"rest_credential\"]\n\n    def _query_app_by_driver_id(self, driver_id: str) -> dict | None:\n        \"\"\"\n        Retrieve application by using driver ID.\n        :param driver_id: as the unique driver id for query\n        :return: The JSON object of an application. None if driver_id is not found.\n        \"\"\"\n        response = None\n        if not driver_id:\n            return response\n        # Assemble REST call\n        env = self.env\n        header = \"Accept: application/json\"\n        authorization = \"Authorization: %s\" % self.rest_credential\n        cookie_jar = pjoin(env[\"KERNEL_NOTEBOOK_DATA_DIR\"], env[\"KERNEL_NOTEBOOK_COOKIE_JAR\"])\n        sslconf = env[\"KERNEL_CURL_SECURITY_OPT\"].split()\n        url = f\"{self.conductor_endpoint}/v1/applications?driverid={driver_id}\"\n        cmd = [\"curl\", \"-v\", \"-b\", cookie_jar, \"-X\", \"GET\", \"-H\", header, \"-H\", authorization, url]\n        cmd[2:2] = sslconf\n\n        # Perform REST call\n        try:\n            process = subprocess.Popen(\n                cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True\n            )\n            output, stderr = process.communicate()\n            response = json.loads(output) if output else None\n            response = None if not response or not response[\"applist\"] else response[\"applist\"]\n        except Exception as e:\n            self.log.warning(\n                f\"Getting application with cmd '{cmd}' failed with exception: '{e}'.  Continuing...\"\n            )\n        return response\n\n    def _query_app_by_id(self, app_id: str) -> dict | None:\n        \"\"\"\n        Retrieve an application by application ID.\n        :param app_id\n        :return: The JSON object of an application. None if app_id is not found.\n        \"\"\"\n        response = None\n        # Assemble REST call\n        env = self.env\n        header = \"Accept: application/json\"\n        authorization = \"Authorization: %s\" % self.rest_credential\n        cookie_jar = pjoin(env[\"KERNEL_NOTEBOOK_DATA_DIR\"], env[\"KERNEL_NOTEBOOK_COOKIE_JAR\"])\n        sslconf = env[\"KERNEL_CURL_SECURITY_OPT\"].split()\n        url = f\"{self.conductor_endpoint}/v1/applications?applicationid={app_id}\"\n        cmd = [\"curl\", \"-v\", \"-b\", cookie_jar, \"-X\", \"GET\", \"-H\", header, \"-H\", authorization, url]\n        cmd[2:2] = sslconf\n        # Perform REST call\n        try:\n            process = subprocess.Popen(\n                cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True\n            )\n            output, stderr = process.communicate()\n            response = json.loads(output) if output else None\n            response = None if response is None or not response[\"applist\"] else response[\"applist\"]\n        except Exception as e:\n            self.log.warning(\n                f\"Getting application with cmd '{cmd}' failed with exception: '{e}'.  Continuing...\"\n            )\n        return response\n\n    def _query_app_state_by_driver_id(self, driver_id: str) -> dict | None:\n        \"\"\"\n        Return the state of an application.\n        :param driver_id:\n        :return:\n        \"\"\"\n        response = None\n        apps = self._query_app_by_driver_id(driver_id)\n        if apps:\n            for app in apps:\n                if \"state\" in app:\n                    response = app[\"state\"]\n        return response\n\n    def _get_driver_by_app_id(self, app_id: str) -> dict | None:\n        \"\"\"\n        Get driver info from application ID.\n        :param app_id\n        :return: The JSON response driver information of the corresponding application. None if app_id is not found.\n        \"\"\"\n        response = None\n        apps = self._query_app_by_id(app_id)\n        if apps:\n            for app in apps:\n                if app and app[\"driver\"]:\n                    self.log.debug(\"Obtain Driver ID: {}\".format(app[\"driver\"][\"id\"]))\n                    response = app[\"driver\"]\n        else:\n            self.log.warning(\"Application id does not exist\")\n        return response\n\n    def _kill_app_by_driver_id(self, driver_id: str):\n        \"\"\"\n        Kill an application. If the app's state is FINISHED or FAILED, it won't be changed to KILLED.\n        :param driver_id\n        :return: The JSON response of killing the application. None if driver is not found.\n        \"\"\"\n        self.log.debug(f\"Kill driver: {driver_id}\")\n        if driver_id is None:\n            if self.application_id is None:\n                return None\n            self.log.debug(\n                \"Driver does not exist, retrieving DriverID with ApplicationID: {}\".format(\n                    self.application_id\n                )\n            )\n            driver_info = self._get_driver_by_app_id(self.application_id)\n            if driver_info:\n                self.driver_id = driver_info[\"id\"]\n            else:\n                return None\n\n        # Assemble REST call\n        response = None\n        env = self.env\n        header = \"Accept: application/json\"\n        authorization = \"Authorization: %s\" % self.rest_credential\n        cookie_jar = pjoin(env[\"KERNEL_NOTEBOOK_DATA_DIR\"], env[\"KERNEL_NOTEBOOK_COOKIE_JAR\"])\n        sslconf = env[\"KERNEL_CURL_SECURITY_OPT\"].split()\n        url = f\"{self.conductor_endpoint}/v1/submissions/kill/{self.driver_id}\"\n        cmd = [\"curl\", \"-v\", \"-b\", cookie_jar, \"-X\", \"POST\", \"-H\", header, \"-H\", authorization, url]\n        cmd[2:2] = sslconf\n\n        # Perform REST call\n        try:\n            process = subprocess.Popen(\n                cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True\n            )\n            output, stderr = process.communicate()\n            response = json.loads(output) if output else None\n        except Exception as e:\n            self.log.warning(\n                f\"Termination of application with cmd '{cmd}' failed with exception: '{e}'.  Continuing...\"\n            )\n        self.log.debug(f\"Kill response: {response}\")\n        return response\n\n    def _performRestCall(self, cmd: list[str], url: str, HA_LIST: list[str]) -> tuple:  # noqa\n        for HA in HA_LIST:\n            portcolon = url.rfind(\":\")\n            slash = url.find(\"://\")\n            url = url[0 : slash + 3] + HA + url[portcolon:]\n            cmd[-1] = url\n            self.log.debug(cmd)\n            process = subprocess.Popen(\n                cmd,\n                stdout=subprocess.PIPE,\n                stderr=subprocess.PIPE,\n                close_fds=True,\n                universal_newlines=True,\n            )\n            output, stderr = process.communicate()\n            if (\n                \"Could not resolve host\" not in stderr\n                and \"Failed connect to\" not in stderr\n                and \"Connection refused\" not in stderr\n            ):\n                return output, stderr\n        self.log_and_raise(\n            http_status_code=500, reason=\"Could not connect to ascd. Verify ascd is running.\"\n        )\n        return \"Error\", \"Error\"\n\n    # confirm return type\n    def _performConductorJWTLogonAndRetrieval(  # noqa\n        self, jwt_token: str, env_dict: dict[str, Any]\n    ):\n        \"\"\"\n        Authenticate to Conductor with a JWT Token and setup the kernel environment variables.\n        :param jwt_token: JWT Token to authenticate with to Conductor\n        :param env_dict: Environment Dictionary of this Kernel launch\n        :return: None\n        \"\"\"\n        response = None\n        if not jwt_token:\n            return response\n        # Assemble JWT Auth logon REST call\n        env = self.env\n\n        if env[\"KERNEL_IG_UUID\"] is None:\n            reasonErr = (\n                \"Instance group specified is None. Check environment \"\n                \"specified instance group is available.\"\n            )\n            self.log_and_raise(http_status_code=500, reason=reasonErr)\n\n        # Determine hostname of ascd_endpoint and setup the HA List\n        portcolon = self.ascd_endpoint.rfind(\":\")\n        slash = self.ascd_endpoint.find(\"://\")\n        host = self.ascd_endpoint[slash + 3 : portcolon]\n        HA_LIST = env[\"KERNEL_CONDUCTOR_HA_ENDPOINTS\"].split(\",\")\n        HA_LIST.insert(0, host)\n\n        header = \"Accept: application/json\"\n        authorization = \"Authorization: Bearer %s\" % jwt_token\n        cookie_jar = pjoin(env[\"KERNEL_NOTEBOOK_DATA_DIR\"], env[\"KERNEL_NOTEBOOK_COOKIE_JAR\"])\n        sslconf = env[\"KERNEL_CURL_SECURITY_OPT\"].split()\n        url = \"{}/auth/logon/jwt?topology={}\".format(self.ascd_endpoint, env[\"KERNEL_TOPOLOGY\"])\n        cmd = [\"curl\", \"-v\", \"-b\", cookie_jar, \"-X\", \"GET\", \"-H\", header, \"-H\", authorization, url]\n        cmd[2:2] = sslconf\n        output, stderr = self._performRestCall(cmd, url, HA_LIST)\n        if \"Error\" in output:\n            reasonErr = \"Failed to perform JWT Auth Logon. \" + output.splitlines()[0]\n            self.log.warning(cmd)\n            self.log_and_raise(http_status_code=500, reason=reasonErr)\n        self.rest_credential = url_unescape(output)[1:-1]\n\n        # Assemble EGO Token Logon REST call\n        authorization = \"Authorization: PlatformToken token=\" + output.strip('\"')\n        url = \"%s/auth/logon\" % self.ascd_endpoint\n        cmd = [\"curl\", \"-v\", \"-c\", cookie_jar, \"-X\", \"GET\", \"-H\", header, \"-H\", authorization, url]\n        cmd[2:2] = sslconf\n        output, stderr = self._performRestCall(cmd, url, HA_LIST)\n        if \"Error\" in output:\n            reasonErr = \"Failed to perform EGO Auth Logon. \" + output.splitlines()[0]\n            self.log.warning(cmd)\n            self.log_and_raise(http_status_code=500, reason=reasonErr)\n\n        # Get the Python path to use to make sure the right conda environment is used\n        url = \"{}/anaconda/instances/{}\".format(\n            self.ascd_endpoint, env[\"KERNEL_ANACONDA_INST_UUID\"]\n        )\n        cmd = [\"curl\", \"-v\", \"-b\", cookie_jar, \"-X\", \"GET\", \"-H\", header, \"-H\", authorization, url]\n        cmd[2:2] = sslconf\n        output, stderr = self._performRestCall(cmd, url, HA_LIST)\n        response = json.loads(output) if output else None\n        if response is None or not response[\"parameters\"][\"deploy_home\"][\"value\"]:\n            reasonErr = \"Could not retrieve anaconda instance. Verify anaconda instance with id \"\n            reasonErr = reasonErr + env[\"KERNEL_ANACONDA_INST_UUID\"] + \" exists\"\n            self.log.warning(cmd)\n            self.log_and_raise(http_status_code=500, reason=reasonErr)\n        else:\n            env_dict[\"KERNEL_PYSPARK_PYTHON\"] = (\n                response[\"parameters\"][\"deploy_home\"][\"value\"]\n                + \"/anaconda/envs/\"\n                + env[\"KERNEL_ANACONDA_ENV\"]\n                + \"/bin/python\"\n            )\n\n        # Get instance group information we need\n        url = \"{}/instances?id={}&fields=sparkinstancegroup,outputs\".format(\n            self.ascd_endpoint,\n            env[\"KERNEL_IG_UUID\"],\n        )\n        cmd = [\"curl\", \"-v\", \"-b\", cookie_jar, \"-X\", \"GET\", \"-H\", header, \"-H\", authorization, url]\n        cmd[2:2] = sslconf\n        output, stderr = self._performRestCall(cmd, url, HA_LIST)\n        response = json.loads(output) if output else None\n\n        if response is None or len(response) == 0 or response[0] is None:\n            reasonErr = (\n                \"Could not retrieve instance group. Verify instance group with id \"\n                + env[\"KERNEL_IG_UUID\"]\n                + \" exists.\"\n            )\n            self.log.warning(cmd)\n            self.log_and_raise(http_status_code=500, reason=reasonErr)\n        elif (\n            response is None\n            or response[0] is None\n            or \"value\" not in response[0][\"outputs\"][\"batch_master_rest_urls\"]\n        ):\n            reasonErr = (\n                \"Could not retrieve outputs for instance group. Verify instance group with id \"\n                + env[\"KERNEL_IG_UUID\"]\n                + \" is started\"\n            )\n            self.log.warning(cmd)\n            self.log_and_raise(http_status_code=500, reason=reasonErr)\n        else:\n            env_dict[\"KERNEL_SPARK_HOME\"] = response[0][\"sparkinstancegroup\"][\"sparkhomedir\"]\n            env_dict[\"KERNEL_NOTEBOOK_MASTER_REST\"] = response[0][\"outputs\"][\n                \"batch_master_rest_urls\"\n            ][\"value\"]\n            self.conductor_endpoint = response[0][\"outputs\"][\"one_batch_master_web_submission_url\"][\n                \"value\"\n            ]\n        return response\n"
  },
  {
    "path": "enterprise_gateway/services/processproxies/container.py",
    "content": "\"\"\"Code related to managing kernels running in containers.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import annotations\n\nimport abc\nimport logging\nimport os\nimport signal\nfrom typing import Any\n\nimport urllib3  # docker ends up using this and it causes lots of noise, so turn off warnings\nfrom jupyter_client import localinterfaces\n\nfrom ..kernels.remotemanager import RemoteKernelManager\nfrom .processproxy import RemoteProcessProxy\n\nlog = logging.getLogger(__name__)\n\nurllib3.disable_warnings()\n\nlocal_ip = localinterfaces.public_ips()[0]\n\ndefault_kernel_uid = \"1000\"  # jovyan user is the default\ndefault_kernel_gid = \"100\"  # users group is the default\n\n\ndef _parse_prohibited_ids(env_var: str, default: str) -> list[int]:\n    \"\"\"Parse a comma-separated list of IDs from an environment variable into integers.\n\n    Raises:\n        ValueError: If any entry in the configured value is not a valid integer.\n            This enforces a fail-closed posture — a misconfigured prohibited list\n            (e.g. usernames instead of numeric IDs) will prevent startup rather than\n            silently yielding an empty list.\n    \"\"\"\n    result: list[int] = []\n    raw_value = os.getenv(env_var, default)\n    for item in raw_value.split(\",\"):\n        stripped = item.strip()\n        if stripped:\n            try:\n                result.append(int(stripped))\n            except ValueError:\n                msg = (\n                    f\"Invalid entry '{stripped}' in {env_var}='{raw_value}'. \"\n                    f\"All entries must be numeric IDs, not usernames or group names. \"\n                    f\"Example: {env_var}=0,1000\"\n                )\n                log.critical(msg)\n                raise ValueError(msg) from None\n    return result\n\n\n# These could be enforced via a PodSecurityPolicy, but those affect\n# all pods so the cluster admin would need to configure those for\n# all applications.\nprohibited_uids = _parse_prohibited_ids(\"EG_PROHIBITED_UIDS\", \"0\")\nprohibited_gids = _parse_prohibited_ids(\"EG_PROHIBITED_GIDS\", \"0\")\n\nmirror_working_dirs = bool(os.getenv(\"EG_MIRROR_WORKING_DIRS\", \"false\").lower() == \"true\")\n\n# Get the globally-configured default images.  Defaulting to None if not set.\ndefault_kernel_image = os.getenv(\"EG_KERNEL_IMAGE\")\ndefault_kernel_executor_image = os.getenv(\"EG_KERNEL_EXECUTOR_IMAGE\")\n\n\nclass ContainerProcessProxy(RemoteProcessProxy):\n    \"\"\"\n    Kernel lifecycle management for container-based kernels.\n    \"\"\"\n\n    def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict):\n        \"\"\"Initialize the proxy.\"\"\"\n        super().__init__(kernel_manager, proxy_config)\n        self.container_name = \"\"\n        self.assigned_node_ip = None\n\n    def _determine_kernel_images(self, **kwargs: dict[str, Any] | None) -> None:\n        \"\"\"\n        Determine which kernel images to use.\n\n        Initialize to any defined in the process proxy override that then let those provided\n        by client via env override.\n        \"\"\"\n        kernel_image = self.proxy_config.get(\"image_name\", default_kernel_image)\n        self.kernel_image = kwargs[\"env\"].get(\"KERNEL_IMAGE\", kernel_image)\n\n        if self.kernel_image is None:\n            self.log_and_raise(\n                http_status_code=500,\n                reason=\"No kernel image could be determined! Set the `image_name` in the \"\n                \"process_proxy.config stanza of the corresponding kernel.json file.\",\n            )\n\n        # If no default executor image is configured, default it to current image\n        kernel_executor_image = self.proxy_config.get(\n            \"executor_image_name\", default_kernel_executor_image or self.kernel_image\n        )\n        self.kernel_executor_image = kwargs[\"env\"].get(\n            \"KERNEL_EXECUTOR_IMAGE\", kernel_executor_image\n        )\n\n    async def launch_process(\n        self, kernel_cmd: str, **kwargs: dict[str, Any] | None\n    ) -> ContainerProcessProxy:\n        \"\"\"\n        Launches the specified process within the container environment.\n        \"\"\"\n        # Set env before superclass call so we see these in the debug output\n\n        self._determine_kernel_images(**kwargs)\n        kwargs[\"env\"][\"KERNEL_IMAGE\"] = self.kernel_image\n        kwargs[\"env\"][\"KERNEL_EXECUTOR_IMAGE\"] = self.kernel_executor_image\n\n        # If mirroring is not enabled, remove working directory if present\n        if not mirror_working_dirs and \"KERNEL_WORKING_DIR\" in kwargs[\"env\"]:\n            del kwargs[\"env\"][\"KERNEL_WORKING_DIR\"]\n\n        self._enforce_prohibited_ids(**kwargs)\n\n        await super().launch_process(kernel_cmd, **kwargs)\n\n        self.local_proc = self.launch_kernel(kernel_cmd, **kwargs)\n        self.pid = self.local_proc.pid\n        self.ip = local_ip\n\n        self.log.info(\n            \"{}: kernel launched. Kernel image: {}, KernelID: {}, cmd: '{}'\".format(\n                self.__class__.__name__, self.kernel_image, self.kernel_id, kernel_cmd\n            )\n        )\n\n        await self.confirm_remote_startup()\n        return self\n\n    def _enforce_prohibited_ids(self, **kwargs: dict[str, Any] | None) -> None:\n        \"\"\"Determine UID and GID with which to launch container and ensure they are not prohibited.\"\"\"\n        kernel_uid = kwargs[\"env\"].get(\"KERNEL_UID\", default_kernel_uid)\n        kernel_gid = kwargs[\"env\"].get(\"KERNEL_GID\", default_kernel_gid)\n\n        try:\n            uid_int = int(kernel_uid)\n        except (ValueError, TypeError):\n            self.log_and_raise(\n                http_status_code=403,\n                reason=f\"Invalid KERNEL_UID value '{kernel_uid}': not a valid integer!\",\n            )\n\n        try:\n            gid_int = int(kernel_gid)\n        except (ValueError, TypeError):\n            self.log_and_raise(\n                http_status_code=403,\n                reason=f\"Invalid KERNEL_GID value '{kernel_gid}': not a valid integer!\",\n            )\n\n        max_id = 4294967295  # uint32 max — Linux uid_t/gid_t upper bound\n\n        if not (0 <= uid_int <= max_id):\n            self.log_and_raise(\n                http_status_code=403,\n                reason=f\"Invalid KERNEL_UID value '{kernel_uid}': must be in range 0-{max_id}!\",\n            )\n\n        if not (0 <= gid_int <= max_id):\n            self.log_and_raise(\n                http_status_code=403,\n                reason=f\"Invalid KERNEL_GID value '{kernel_gid}': must be in range 0-{max_id}!\",\n            )\n\n        if uid_int in prohibited_uids:\n            self.log_and_raise(\n                http_status_code=403,\n                reason=f\"Kernel's UID value of '{kernel_uid}' has been denied via EG_PROHIBITED_UIDS!\",\n            )\n\n        if gid_int in prohibited_gids:\n            self.log_and_raise(\n                http_status_code=403,\n                reason=f\"Kernel's GID value of '{kernel_gid}' has been denied via EG_PROHIBITED_GIDS!\",\n            )\n\n        # Ensure the kernel's env has normalized values\n        kwargs[\"env\"][\"KERNEL_UID\"] = str(uid_int)\n        kwargs[\"env\"][\"KERNEL_GID\"] = str(gid_int)\n\n    def poll(self) -> bool | None:\n        \"\"\"Determines if container is still active.\n\n        Submitting a new kernel to the container manager will take a while to be Running.\n        Thus kernel ID will probably not be available immediately for poll.\n        So will regard the container as active when no status is available or one of the initial\n        phases.\n\n        Returns\n        -------\n        None if the container cannot be found or its in an initial state. Otherwise False.\n        \"\"\"\n        result = False\n\n        container_status = self.get_container_status(None)\n        # Do not check whether container_status is None\n        # EG couldn't restart kernels although connections exists.\n        # See https://github.com/jupyter-server/enterprise_gateway/issues/827\n        if container_status in self.get_initial_states():\n            result = None\n        return result\n\n    def send_signal(self, signum: int) -> bool | None:\n        \"\"\"Send signal `signum` to container.\n\n        Parameters\n        ----------\n        signum : int\n            The signal number to send.  Zero is used to determine heartbeat.\n        \"\"\"\n        if signum == 0:\n            return self.poll()\n        elif signum == signal.SIGKILL:\n            return self.kill()\n        else:\n            # This is very likely an interrupt signal, so defer to the super class\n            # which should use the communication port.\n            return super().send_signal(signum)\n\n    def kill(self) -> bool | None:\n        \"\"\"Kills a containerized kernel.\n\n        Returns\n        -------\n        None if the container is gracefully terminated, False otherwise.\n        \"\"\"\n        result = None\n\n        if self.container_name:  # We only have something to terminate if we have a name\n            result = self.terminate_container_resources()\n\n        return result\n\n    def shutdown_listener(self):\n        \"\"\"Shut down the listener.\"\"\"\n        super().shutdown_listener()\n        if self.container_name:  # We only have something to terminate if we have a name\n            self.terminate_container_resources()\n\n    async def confirm_remote_startup(self) -> None:\n        \"\"\"Confirms the container has started and returned necessary connection information.\"\"\"\n        self.log.debug(\"Trying to confirm kernel container startup status\")\n        self.start_time = RemoteProcessProxy.get_current_time()\n        i = 0\n        ready_to_connect = False  # we're ready to connect when we have a connection file to use\n        while not ready_to_connect:\n            i += 1\n            await self.handle_timeout()\n\n            container_status = self.get_container_status(i)\n            if container_status:\n                if container_status in self.get_error_states():\n                    self.log_and_raise(\n                        http_status_code=500,\n                        reason=f\"Error starting kernel container; status: '{container_status}'.\",\n                    )\n                else:\n                    if self.assigned_host:\n                        ready_to_connect = await self.receive_connection_info()\n                        self.pid = (\n                            0  # We won't send process signals for kubernetes lifecycle management\n                        )\n                        self.pgid = 0\n            else:\n                self.detect_launch_failure()\n\n    def get_process_info(self) -> dict[str, Any]:\n        \"\"\"Captures the base information necessary for kernel persistence relative to containers.\"\"\"\n        process_info = super().get_process_info()\n        process_info.update(\n            {\n                \"assigned_node_ip\": self.assigned_node_ip,\n            }\n        )\n        return process_info\n\n    def load_process_info(self, process_info: dict[str, Any]) -> None:\n        \"\"\"Loads the base information necessary for kernel persistence relative to containers.\"\"\"\n        super().load_process_info(process_info)\n        self.assigned_node_ip = process_info[\"assigned_node_ip\"]\n\n    @abc.abstractmethod\n    def get_initial_states(self):\n        \"\"\"Return list of states in lowercase indicating container is starting (includes running).\"\"\"\n        raise NotImplementedError\n\n    @abc.abstractmethod\n    def get_error_states(self):\n        \"\"\"Returns the list of error states (in lowercase).\"\"\"\n        raise NotImplementedError\n\n    @abc.abstractmethod\n    def get_container_status(self, iteration: int | None) -> str:\n        \"\"\"Returns the current container state (in lowercase) or the empty string if not available.\"\"\"\n        raise NotImplementedError\n\n    @abc.abstractmethod\n    def terminate_container_resources(self):\n        \"\"\"Terminate any artifacts created on behalf of the container's lifetime.\"\"\"\n        raise NotImplementedError\n"
  },
  {
    "path": "enterprise_gateway/services/processproxies/crd.py",
    "content": "\"\"\"Code related to managing kernels running based on k8s custom resource.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import annotations\n\nimport re\nfrom contextlib import suppress\nfrom typing import Any\n\nfrom kubernetes import client\n\nfrom ..kernels.remotemanager import RemoteKernelManager\nfrom .k8s import KubernetesProcessProxy\n\n\nclass CustomResourceProcessProxy(KubernetesProcessProxy):\n    \"\"\"A custom resource process proxy.\"\"\"\n\n    # Identifies the kind of object being managed by this process proxy.\n    # For these values we will prefer the values found in the 'kind' field\n    # of the object's metadata.  This attribute is strictly used to provide\n    # context to log messages.\n    object_kind = \"CustomResourceDefinition\"\n\n    def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict):\n        \"\"\"Initialize the proxy.\"\"\"\n        super().__init__(kernel_manager, proxy_config)\n        self.group = self.version = self.plural = None\n        self.kernel_resource_name = None\n\n    async def launch_process(\n        self, kernel_cmd: str, **kwargs: dict[str, Any] | None\n    ) -> CustomResourceProcessProxy:\n        \"\"\"Launch the process for a kernel.\"\"\"\n        self.kernel_resource_name = self._determine_kernel_pod_name(**kwargs)\n        kwargs[\"env\"][\"KERNEL_RESOURCE_NAME\"] = self.kernel_resource_name\n        kwargs[\"env\"][\"KERNEL_CRD_GROUP\"] = self.group\n        kwargs[\"env\"][\"KERNEL_CRD_VERSION\"] = self.version\n        kwargs[\"env\"][\"KERNEL_CRD_PLURAL\"] = self.plural\n\n        await super().launch_process(kernel_cmd, **kwargs)\n        return self\n\n    def get_container_status(self, iteration: int | None) -> str:\n        \"\"\"Determines submitted CRD application status\n\n        Submitting a new kernel application CRD will take a while to\n        reach the Running state and the submission can also fail due\n        to malformation or other issues which will prevent the application\n        pod to reach the desired Running state.\n\n        This function check the CRD submission state and in case of\n        success it then delegates to parent to check if the application\n        pod is running.\n\n        Returns\n        -------\n        Empty string if the container cannot be found otherwise.\n        The pod application status in case of success on Spark Operator side\n        Or the retrieved spark operator submission status in other cases (e.g. Failed)\n        \"\"\"\n\n        application_state = \"\"\n\n        with suppress(Exception):\n            custom_resource = client.CustomObjectsApi().get_namespaced_custom_object(\n                self.group,\n                self.version,\n                self.kernel_namespace,\n                self.plural,\n                self.kernel_resource_name,\n            )\n\n            if custom_resource:\n                application_state = custom_resource['status']['applicationState']['state'].lower()\n\n                if application_state in self.get_error_states():\n                    exception_text = self._get_exception_text(\n                        custom_resource['status']['applicationState']['errorMessage']\n                    )\n                    error_message = (\n                        f\"CRD submission for kernel {self.kernel_id} failed: {exception_text}\"\n                    )\n                    self.log.debug(error_message)\n                elif application_state == \"running\" and not self.assigned_host:\n                    super().get_container_status(iteration)\n\n        # only log if iteration is not None (otherwise poll() is too noisy)\n        # check for running state to avoid double logging with superclass\n        if iteration and application_state != \"running\":\n            self.log.debug(\n                f\"{iteration}: Waiting from CRD status from resource manager {self.object_kind.lower()} in \"\n                f\"namespace '{self.kernel_namespace}'. Name: '{self.kernel_resource_name}', \"\n                f\"Status: '{application_state}', KernelID: '{self.kernel_id}'\"\n            )\n\n        return application_state\n\n    def delete_managed_object(self, termination_stati: list[str]) -> bool:\n        \"\"\"Deletes the object managed by this process-proxy\n\n        A return value of True indicates the object is considered deleted,\n        otherwise a False or None value is returned.\n\n        Note: the caller is responsible for handling exceptions.\n        \"\"\"\n        delete_status = client.CustomObjectsApi().delete_namespaced_custom_object(\n            self.group,\n            self.version,\n            self.kernel_namespace,\n            self.plural,\n            self.kernel_resource_name,\n            grace_period_seconds=0,\n            propagation_policy=\"Background\",\n        )\n\n        result = delete_status and delete_status.get(\"status\", None) in termination_stati\n\n        return result\n\n    def get_initial_states(self) -> set:\n        \"\"\"Return list of states in lowercase indicating container is starting (includes running).\"\"\"\n        return [\"submitted\", \"pending\", \"running\"]\n\n    def _get_exception_text(self, error_message):\n        match = re.search(r'Exception\\s*:\\s*(.*)', error_message, re.MULTILINE)\n\n        if match:\n            error_message = match.group(1)\n\n        return error_message\n"
  },
  {
    "path": "enterprise_gateway/services/processproxies/distributed.py",
    "content": "\"\"\"Code used for the generic distribution of kernels across a set of hosts.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import annotations\n\nimport asyncio\nimport json\nimport os\nimport signal\nfrom socket import gethostbyname\nfrom subprocess import STDOUT\nfrom typing import Any, ClassVar\n\nfrom ..kernels.remotemanager import RemoteKernelManager\nfrom .processproxy import BaseProcessProxyABC, RemoteProcessProxy\n\npoll_interval = float(os.getenv(\"EG_POLL_INTERVAL\", \"0.5\"))\nkernel_log_dir = os.getenv(\n    \"EG_KERNEL_LOG_DIR\", \"/tmp\"  # noqa\n)  # would prefer /var/log, but its only writable by root\n\n\nclass TrackKernelOnHost:\n    \"\"\"A class for tracking a kernel on a host.\"\"\"\n\n    _host_kernels: ClassVar = {}\n    _kernel_host_mapping: ClassVar = {}\n\n    def add_kernel_id(self, host: str, kernel_id: str) -> None:\n        \"\"\"Add a kernel to a host.\"\"\"\n        self._kernel_host_mapping[kernel_id] = host\n        self.increment(host)\n\n    def delete_kernel_id(self, kernel_id: str) -> None:\n        \"\"\"Delete a kernel id from tracking.\"\"\"\n        host = self._kernel_host_mapping.get(kernel_id)\n        if host:\n            self.decrement(host)\n            del self._kernel_host_mapping[kernel_id]\n\n    def min_or_remote_host(self, remote_host: str | None = None) -> str:\n        \"\"\"Return the remote host if given, or the kernel with the min value.\"\"\"\n        if remote_host:\n            return remote_host\n        return min(self._host_kernels, key=lambda k: self._host_kernels[k])\n\n    def increment(self, host: str) -> None:\n        \"\"\"Increment the value for a host.\"\"\"\n        val = int(self._host_kernels.get(host, 0))\n        self._host_kernels[host] = val + 1\n\n    def decrement(self, host: str) -> None:\n        \"\"\"Decrement the value for a host.\"\"\"\n        val = int(self._host_kernels.get(host, 0))\n        self._host_kernels[host] = val - 1\n\n    def init_host_kernels(self, hosts) -> None:\n        \"\"\"Inititialize the kernels for a set of hosts.\"\"\"\n        if len(self._host_kernels) == 0:\n            self._host_kernels.update({key: 0 for key in hosts})\n\n\nclass DistributedProcessProxy(RemoteProcessProxy):\n    \"\"\"\n    Manages the lifecycle of kernels distributed across a set of hosts.\n    \"\"\"\n\n    host_index = 0\n    kernel_on_host = TrackKernelOnHost()\n\n    def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict):\n        \"\"\"Initialize the proxy.\"\"\"\n        super().__init__(kernel_manager, proxy_config)\n        self.kernel_log = None\n        self.local_stdout = None\n        self.least_connection = kernel_manager.load_balancing_algorithm == \"least-connection\"\n        if proxy_config.get(\"remote_hosts\"):\n            self.hosts = proxy_config.get(\"remote_hosts\").split(\",\")\n        else:\n            self.hosts = kernel_manager.remote_hosts  # from command line or env\n\n        if self.least_connection:\n            DistributedProcessProxy.kernel_on_host.init_host_kernels(self.hosts)\n\n    async def launch_process(\n        self, kernel_cmd: str, **kwargs: dict[str, Any] | None\n    ) -> DistributedProcessProxy:\n        \"\"\"\n        Launches a kernel process on a selected host.\n        \"\"\"\n        env_dict = kwargs.get(\"env\")\n        await super().launch_process(kernel_cmd, **kwargs)\n\n        self.assigned_host = self._determine_next_host(env_dict)\n        self.ip = gethostbyname(self.assigned_host)  # convert to ip if host is provided\n        self.assigned_ip = self.ip\n\n        try:\n            result_pid = self._launch_remote_process(kernel_cmd, **kwargs)\n            self.pid = int(result_pid)\n        except Exception as e:\n            error_message = \"Failure occurred starting kernel on '{}'.  Returned result: {}\".format(\n                self.ip, e\n            )\n            self.log_and_raise(http_status_code=500, reason=error_message)\n\n        self.log.info(\n            \"Kernel launched on '{}', pid: {}, ID: {}, Log file: {}:{}, Command: '{}'.  \".format(\n                self.assigned_host,\n                self.pid,\n                self.kernel_id,\n                self.assigned_host,\n                self.kernel_log,\n                kernel_cmd,\n            )\n        )\n        await self.confirm_remote_startup()\n        return self\n\n    def _launch_remote_process(self, kernel_cmd: str, **kwargs: dict[str, Any] | None) -> str:\n        \"\"\"\n        Launch the kernel as indicated by the argv stanza in the kernelspec.  Note that this method\n        will bypass use of ssh if the remote host is also the local machine.\n        \"\"\"\n\n        cmd = self._build_startup_command(kernel_cmd, **kwargs)\n        self.log.debug(f\"Invoking cmd: '{cmd}' on host: {self.assigned_host}\")\n        result_pid = \"bad_pid\"  # purposely initialize to bad int value\n\n        if BaseProcessProxyABC.ip_is_local(self.ip):\n            # launch the local command with redirection in place\n            self.local_stdout = open(self.kernel_log, mode=\"a\")  # noqa\n            self.local_proc = self.launch_kernel(\n                cmd, stdout=self.local_stdout, stderr=STDOUT, **kwargs\n            )\n            result_pid = str(self.local_proc.pid)\n        else:\n            # launch remote command via ssh\n            result = self.rsh(self.ip, cmd)\n            for line in result:\n                result_pid = line.strip()\n\n        return result_pid\n\n    def _build_startup_command(self, argv_cmd: str, **kwargs: dict[str, Any] | None) -> str:\n        \"\"\"\n        Builds the command to invoke by concatenating envs from kernelspec followed by the kernel argvs.\n\n        We also force nohup, redirection to a file and place in background, then follow with an echo\n        for the background pid.\n\n        Note: We optimize for the local case and just return the existing command.\n        \"\"\"\n\n        # Optimized case needs to also redirect the kernel output, so unconditionally compose kernel_log\n        env_dict = kwargs[\"env\"]\n        kid = env_dict.get(\"KERNEL_ID\")\n        self.kernel_log = os.path.join(kernel_log_dir, f\"kernel-{kid}.log\")\n\n        if BaseProcessProxyABC.ip_is_local(self.ip):  # We're local so just use what we're given\n            cmd = argv_cmd\n        else:  # Add additional envs, including those in kernelspec\n            cmd = \"\"\n\n            for key, value in env_dict.items():\n                cmd += \"export {}={};\".format(key, json.dumps(value).replace(\"'\", \"''\"))\n\n            for key, value in self.kernel_manager.kernel_spec.env.items():\n                cmd += \"export {}={};\".format(key, json.dumps(value).replace(\"'\", \"''\"))\n\n            cmd += \"nohup\"\n            for arg in argv_cmd:\n                cmd += f\" {arg}\"\n\n            cmd += f\" >> {self.kernel_log} 2>&1 & echo $!\"  # return the process id\n\n        return cmd\n\n    def _determine_next_host(self, env_dict: dict) -> str:\n        \"\"\"Simple round-robin index into list of hosts or use least-connection .\"\"\"\n        remote_host = env_dict.get(\"KERNEL_REMOTE_HOST\")\n        if self.least_connection:\n            next_host = DistributedProcessProxy.kernel_on_host.min_or_remote_host(remote_host)\n            DistributedProcessProxy.kernel_on_host.add_kernel_id(next_host, self.kernel_id)\n        else:\n            next_host = (\n                remote_host\n                if remote_host\n                else self.hosts[DistributedProcessProxy.host_index % self.hosts.__len__()]\n            )\n            DistributedProcessProxy.host_index += 1\n\n        return next_host\n\n    def _unregister_assigned_host(self) -> None:\n        if self.least_connection:\n            DistributedProcessProxy.kernel_on_host.delete_kernel_id(self.kernel_id)\n\n    async def confirm_remote_startup(self) -> None:\n        \"\"\"Confirms the remote kernel has started by obtaining connection information from the remote host.\"\"\"\n        self.start_time = RemoteProcessProxy.get_current_time()\n        i = 0\n        ready_to_connect = False  # we're ready to connect when we have a connection file to use\n        while not ready_to_connect:\n            i += 1\n            await self.handle_timeout()\n\n            self.log.debug(\n                \"{}: Waiting to connect.  Host: '{}', KernelID: '{}'\".format(\n                    i, self.assigned_host, self.kernel_id\n                )\n            )\n\n            if self.assigned_host:\n                ready_to_connect = await self.receive_connection_info()\n\n    async def handle_timeout(self) -> None:\n        \"\"\"Checks to see if the kernel launch timeout has been exceeded while awaiting connection info.\"\"\"\n        await asyncio.sleep(poll_interval)\n        time_interval = RemoteProcessProxy.get_time_diff(\n            self.start_time, RemoteProcessProxy.get_current_time()\n        )\n\n        if time_interval > self.kernel_launch_timeout:\n            reason = (\n                \"Waited too long ({}s) to get connection file.  Check Enterprise Gateway log and kernel \"\n                \"log ({}:{}) for more information.\".format(\n                    self.kernel_launch_timeout, self.assigned_host, self.kernel_log\n                )\n            )\n            timeout_message = f\"KernelID: '{self.kernel_id}' launch timeout due to: {reason}\"\n            await asyncio.get_event_loop().run_in_executor(None, self.kill)\n            self.log_and_raise(http_status_code=500, reason=timeout_message)\n\n    def cleanup(self) -> None:\n        \"\"\"Clean up the proxy.\"\"\"\n        # DistributedProcessProxy can have a tendency to leave zombies, particularly when EG is\n        # abruptly terminated.  This extra call to shutdown_lister does the trick.\n        self.shutdown_listener()\n        self._unregister_assigned_host()\n        if self.local_stdout:\n            self.local_stdout.close()\n            self.local_stdout = None\n        super().cleanup()\n\n    def shutdown_listener(self) -> None:\n        \"\"\"Ensure that kernel process is terminated.\"\"\"\n        self.send_signal(signal.SIGTERM)\n        super().shutdown_listener()\n"
  },
  {
    "path": "enterprise_gateway/services/processproxies/docker_swarm.py",
    "content": "\"\"\"Code related to managing kernels running in docker-based containers.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import annotations\n\nimport logging\nimport os\nfrom typing import Any\n\nfrom docker.client import DockerClient\nfrom docker.errors import NotFound\nfrom docker.models.containers import Container\nfrom docker.models.services import Service\n\n# Debug logging level of docker produces too much noise - raise to info by default.\nfrom ..kernels.remotemanager import RemoteKernelManager\nfrom .container import ContainerProcessProxy\n\nlogging.getLogger(\"urllib3.connectionpool\").setLevel(\n    os.environ.get(\"EG_DOCKER_LOG_LEVEL\", logging.WARNING)\n)\n\ndocker_network = os.environ.get(\"EG_DOCKER_NETWORK\", \"bridge\")\n\nclient = DockerClient.from_env()\n\n\nclass DockerSwarmProcessProxy(ContainerProcessProxy):\n    \"\"\"\n    Kernel lifecycle management for kernels in Docker Swarm.\n    \"\"\"\n\n    def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict):\n        \"\"\"Initialize the proxy.\"\"\"\n        super().__init__(kernel_manager, proxy_config)\n\n    def launch_process(\n        self, kernel_cmd: str, **kwargs: dict[str, Any] | None\n    ) -> DockerSwarmProcessProxy:\n        \"\"\"\n        Launches the specified process within a Docker Swarm environment.\n        \"\"\"\n        # Convey the network to the docker launch script\n        kwargs[\"env\"][\"EG_DOCKER_NETWORK\"] = docker_network\n        kwargs[\"env\"][\"EG_DOCKER_MODE\"] = \"swarm\"\n        return super().launch_process(kernel_cmd, **kwargs)\n\n    def get_initial_states(self) -> set:\n        \"\"\"Return list of states in lowercase indicating container is starting (includes running).\"\"\"\n        return {\"preparing\", \"starting\", \"running\"}\n\n    def get_error_states(self) -> set:\n        \"\"\"Returns the list of error states indicating container is shutting down or receiving error.\"\"\"\n        return {\"failed\", \"rejected\", \"complete\", \"shutdown\", \"orphaned\", \"remove\"}\n\n    def _get_service(self) -> Service:\n        # Fetches the service object corresponding to the kernel with a matching label.\n        service = None\n        services = client.services.list(filters={\"label\": \"kernel_id=\" + self.kernel_id})\n        num_services = len(services)\n        if num_services != 1:\n            if num_services > 1:\n                msg = \"{}: Found more than one service ({}) for kernel_id '{}'!\".format(\n                    self.__class__.__name__, num_services, self.kernel_id\n                )\n                raise RuntimeError(msg)\n        else:\n            service = services[0]\n            self.container_name = service.name\n        return service\n\n    def _get_task(self) -> dict:\n        # Fetches the task object corresponding to the service associated with the kernel.  We only ask for the\n        # current task with desired-state == running.  This eliminates failed states.\n\n        task = None\n        service = self._get_service()\n        if service:\n            tasks = service.tasks(filters={\"desired-state\": \"running\"})\n            num_tasks = len(tasks)\n            if num_tasks != 1:\n                if num_tasks > 1:\n                    msg = \"{}: Found more than one task ({}) for service '{}', kernel_id '{}'!\".format(\n                        self.__class__.__name__, num_tasks, service.name, self.kernel_id\n                    )\n                    raise RuntimeError(msg)\n            else:\n                task = tasks[0]\n        return task\n\n    def get_container_status(self, iteration: int | None) -> str:\n        \"\"\"Return current container state.\"\"\"\n        # Locates the kernel container using the kernel_id filter.  If the status indicates an initial state we\n        # should be able to get at the NetworksAttachments and determine the associated container's IP address.\n        task_state = \"\"\n        task_id = None\n        task = self._get_task()\n        if task:\n            task_status = task[\"Status\"]\n            task_id = task[\"ID\"]\n            if task_status:\n                task_state = task_status[\"State\"].lower()\n                if (\n                    not self.assigned_host and task_state == \"running\"\n                ):  # in self.get_initial_states()\n                    # get the NetworkAttachments and pick out the first of the Network and first\n                    networks_attachments = task[\"NetworksAttachments\"]\n                    if len(networks_attachments) > 0:\n                        address = networks_attachments[0][\"Addresses\"][0]\n                        ip = address.split(\"/\")[0]\n                        self.assigned_ip = ip\n                        self.assigned_host = self.container_name\n\n        if iteration:  # only log if iteration is not None (otherwise poll() is too noisy)\n            self.log.debug(\n                \"{}: Waiting to connect to docker container. \"\n                \"Name: '{}', Status: '{}', IPAddress: '{}', KernelID: '{}', TaskID: '{}'\".format(\n                    iteration,\n                    self.container_name,\n                    task_state,\n                    self.assigned_ip,\n                    self.kernel_id,\n                    task_id,\n                )\n            )\n        return task_state\n\n    def terminate_container_resources(self) -> bool | None:\n        \"\"\"Terminate any artifacts created on behalf of the container's lifetime.\"\"\"\n        # Remove the docker service.\n\n        result = True  # We'll be optimistic\n        service = self._get_service()\n        if service:\n            try:\n                service.remove()  # Service still exists, attempt removal\n            except Exception as err:\n                self.log.debug(\n                    \"{} Termination of service: {} raised exception: {}\".format(\n                        self.__class__.__name__, service.name, err\n                    )\n                )\n                if isinstance(err, NotFound):\n                    pass  # okay if its not found\n                else:\n                    result = False\n                    self.log.warning(f\"Error occurred removing service: {err}\")\n        if result:\n            self.log.debug(\n                \"{}.terminate_container_resources, service {}, kernel ID: {} has been terminated.\".format(\n                    self.__class__.__name__, self.container_name, self.kernel_id\n                )\n            )\n            self.container_name = None\n            result = None  # maintain jupyter contract\n        else:\n            self.log.warning(\n                \"{}.terminate_container_resources, container {}, kernel ID: {} has not been terminated.\".format(\n                    self.__class__.__name__, self.container_name, self.kernel_id\n                )\n            )\n        return result\n\n\nclass DockerProcessProxy(ContainerProcessProxy):\n    \"\"\"Kernel lifecycle management for Docker kernels (non-Swarm).\"\"\"\n\n    def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict):\n        \"\"\"Initialize the proxy.\"\"\"\n        super().__init__(kernel_manager, proxy_config)\n\n    def launch_process(\n        self, kernel_cmd: str, **kwargs: dict[str, Any] | None\n    ) -> DockerProcessProxy:\n        \"\"\"Launches the specified process within a Docker environment.\"\"\"\n        # Convey the network to the docker launch script\n        kwargs[\"env\"][\"EG_DOCKER_NETWORK\"] = docker_network\n        kwargs[\"env\"][\"EG_DOCKER_MODE\"] = \"docker\"\n        return super().launch_process(kernel_cmd, **kwargs)\n\n    def get_initial_states(self) -> set:\n        \"\"\"Return list of states in lowercase indicating container is starting (includes running).\"\"\"\n        return {\"created\", \"running\"}\n\n    def get_error_states(self) -> set:\n        \"\"\"Returns the list of error states indicating container is shutting down or receiving error.\"\"\"\n        return {\"restarting\", \"removing\", \"paused\", \"exited\", \"dead\"}\n\n    def _get_container(self) -> Container:\n        # Fetches the container object corresponding the the kernel_id label.\n        # Only used when docker mode == regular (not swarm)\n\n        container = None\n        containers = client.containers.list(filters={\"label\": \"kernel_id=\" + self.kernel_id})\n        num_containers = len(containers)\n        if num_containers != 1:\n            if num_containers > 1:\n                msg = \"{}: Found more than one container ({}) for kernel_id '{}'!\".format(\n                    self.__class__.__name__, num_containers, self.kernel_id\n                )\n                raise RuntimeError(msg)\n        else:\n            container = containers[0]\n        return container\n\n    def get_container_status(self, iteration: int | None) -> str:\n        \"\"\"Return current container state.\"\"\"\n        # Locates the kernel container using the kernel_id filter.  If the phase indicates Running, the pod's IP\n        # is used for the assigned_ip.  Only used when docker mode == regular (non swarm)\n        container_status = \"\"\n\n        container = self._get_container()\n        if container:\n            self.container_name = container.name\n            if container.status:\n                container_status = container.status.lower()\n                if container_status == \"running\" and not self.assigned_host:\n                    # Container is running, capture IP\n\n                    # we'll use this as a fallback in case we don't find our network\n                    self.assigned_ip = container.attrs.get(\"NetworkSettings\").get(\"IPAddress\")\n                    networks = container.attrs.get(\"NetworkSettings\").get(\"Networks\")\n                    if len(networks) > 0:\n                        self.assigned_ip = networks.get(docker_network).get(\"IPAddress\")\n                        self.log.debug(\n                            \"Using assigned_ip {} from docker network '{}'.\".format(\n                                self.assigned_ip, docker_network\n                            )\n                        )\n                    else:\n                        self.log.warning(\n                            \"Docker network '{}' could not be located in container attributes - \"\n                            \"using assigned_ip '{}'.\".format(docker_network, self.assigned_ip)\n                        )\n\n                    self.assigned_host = self.container_name\n\n        if iteration:  # only log if iteration is not None (otherwise poll() is too noisy)\n            self.log.debug(\n                \"{}: Waiting to connect to docker container. \"\n                \"Name: '{}', Status: '{}', IPAddress: '{}', KernelID: '{}'\".format(\n                    iteration,\n                    self.container_name,\n                    container_status,\n                    self.assigned_ip,\n                    self.kernel_id,\n                )\n            )\n\n        return container_status\n\n    def terminate_container_resources(self) -> bool | None:\n        \"\"\"Terminate any artifacts created on behalf of the container's lifetime.\"\"\"\n        # Remove the container\n\n        result = True  # Since we run containers with remove=True, we'll be optimistic\n        container = self._get_container()\n        if container:\n            try:\n                container.remove(force=True)  # Container still exists, attempt forced removal\n            except Exception as err:\n                self.log.debug(\n                    f\"Container termination for container: {container.name} raised exception: {err}\"\n                )\n                if isinstance(err, NotFound):\n                    pass  # okay if its not found\n                else:\n                    result = False\n                    self.log.warning(f\"Error occurred removing container: {err}\")\n\n        if result:\n            self.log.debug(\n                \"{}.terminate_container_resources, container {}, kernel ID: {} has been terminated.\".format(\n                    self.__class__.__name__, self.container_name, self.kernel_id\n                )\n            )\n            self.container_name = None\n            result = None  # maintain jupyter contract\n        else:\n            self.log.warning(\n                \"{}.terminate_container_resources, container {}, kernel ID: {} has not been terminated.\".format(\n                    self.__class__.__name__, self.container_name, self.kernel_id\n                )\n            )\n        return result\n"
  },
  {
    "path": "enterprise_gateway/services/processproxies/k8s.py",
    "content": "\"\"\"Code related to managing kernels running in Kubernetes clusters.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import annotations\n\nimport logging\nimport os\nimport re\nfrom typing import Any\n\nimport kubernetes\nimport urllib3\nfrom kubernetes import client, config\n\nfrom ..kernels.remotemanager import RemoteKernelManager\nfrom ..sessions.kernelsessionmanager import KernelSessionManager\nfrom .container import ContainerProcessProxy\n\nurllib3.disable_warnings()\n\n# Default logging level of kubernetes produces too much noise - raise to warning only.\nlogging.getLogger(\"kubernetes\").setLevel(os.environ.get(\"EG_KUBERNETES_LOG_LEVEL\", logging.WARNING))\n\nenterprise_gateway_namespace = os.environ.get(\"EG_NAMESPACE\", \"default\")\ndefault_kernel_service_account_name = os.environ.get(\n    \"EG_DEFAULT_KERNEL_SERVICE_ACCOUNT_NAME\", \"default\"\n)\nkernel_cluster_role = os.environ.get(\"EG_KERNEL_CLUSTER_ROLE\", \"cluster-admin\")\nshare_gateway_namespace = bool(os.environ.get(\"EG_SHARED_NAMESPACE\", \"False\").lower() == \"true\")\nkpt_dir = os.environ.get(\"EG_POD_TEMPLATE_DIR\", \"/tmp\")  # noqa\n\nconfig.load_incluster_config()\n\n\ndef get_subject_class():\n    \"\"\"\n    Returns the appropriate Subject class based on the kubernetes client version.\n\n    In kubernetes-client, V1Subject was renamed to RbacV1Subject.\n    This function returns the appropriate class based on the installed version.\n    \"\"\"\n    # Check if V1Subject exists in the client\n    if hasattr(client, 'V1Subject'):\n        logging.debug(\n            \"Using client.V1Subject for Kubernetes client version: %s\", kubernetes.__version__\n        )\n        return client.V1Subject\n    # Fall back to RbacV1Subject for older versions\n    logging.debug(\n        \"Using client.RbacV1Subject for Kubernetes client version: %s\", kubernetes.__version__\n    )\n    return client.RbacV1Subject\n\n\nclass KubernetesProcessProxy(ContainerProcessProxy):\n    \"\"\"\n    Kernel lifecycle management for Kubernetes kernels.\n    \"\"\"\n\n    # Identifies the kind of object being managed by this process proxy.\n    # For these values we will prefer the values found in the 'kind' field\n    # of the object's metadata.  This attribute is strictly used to provide\n    # context to log messages.\n    object_kind = \"Pod\"\n\n    def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict):\n        \"\"\"Initialize the proxy.\"\"\"\n        super().__init__(kernel_manager, proxy_config)\n\n        self.kernel_pod_name = None\n        self.kernel_namespace = None\n        self.delete_kernel_namespace = False\n\n    async def launch_process(\n        self, kernel_cmd: str, **kwargs: dict[str, Any] | None\n    ) -> KubernetesProcessProxy:\n        \"\"\"Launches the specified process within a Kubernetes environment.\"\"\"\n        # Set env before superclass call, so we can see these in the debug output\n\n        # Kubernetes relies on internal env variables to determine its configuration.  When\n        # running within a K8s cluster, these start with KUBERNETES_SERVICE, otherwise look\n        # for envs prefixed with KUBECONFIG.\n        for key in os.environ:\n            if key.startswith(\"KUBECONFIG\") or key.startswith(\"KUBERNETES_SERVICE\"):\n                kwargs[\"env\"][key] = os.environ[key]\n\n        # Determine pod name and namespace - creating the latter if necessary\n        self.kernel_pod_name = self._determine_kernel_pod_name(**kwargs)\n        self.kernel_namespace = self._determine_kernel_namespace(**kwargs)\n\n        await super().launch_process(kernel_cmd, **kwargs)\n        return self\n\n    def get_initial_states(self) -> set:\n        \"\"\"Return list of states in lowercase indicating container is starting (includes running).\"\"\"\n        return [\"pending\", \"running\"]\n\n    def get_error_states(self) -> set:\n        \"\"\"Return list of states in lowercase indicating container failed .\"\"\"\n        return [\"failed\"]\n\n    def get_container_status(self, iteration: int | None) -> str:\n        \"\"\"Return current container state.\"\"\"\n        # Locates the kernel pod using the kernel_id selector.  If the phase indicates Running, the pod's IP\n        # is used for the assigned_ip.\n        pod_status = \"\"\n        kernel_label_selector = \"kernel_id=\" + self.kernel_id + \",component=kernel\"\n        ret = client.CoreV1Api().list_namespaced_pod(\n            namespace=self.kernel_namespace, label_selector=kernel_label_selector\n        )\n        if ret and ret.items:\n            pod_info = ret.items[0]\n            self.container_name = pod_info.metadata.name\n            if pod_info.status:\n                pod_status = pod_info.status.phase.lower()\n                if pod_status == \"running\" and not self.assigned_host:\n                    # Pod is running, capture IP\n                    self.assigned_ip = pod_info.status.pod_ip\n                    self.assigned_host = self.container_name\n                    self.assigned_node_ip = pod_info.status.host_ip\n\n        if iteration:  # only log if iteration is not None (otherwise poll() is too noisy)\n            self.log.debug(\n                f\"{iteration}: Waiting to connect to k8s {self.object_kind.lower()} in \"\n                f\"namespace '{self.kernel_namespace}'. Name: '{self.container_name}', \"\n                f\"Status: '{pod_status}', Pod IP: '{self.assigned_ip}', KernelID: '{self.kernel_id}'\"\n            )\n\n        return pod_status\n\n    def delete_managed_object(self, termination_stati: list[str]) -> bool:\n        \"\"\"Deletes the object managed by this process-proxy\n\n        A return value of True indicates the object is considered deleted,\n        otherwise a False or None value is returned.\n\n        Note: the caller is responsible for handling exceptions.\n        \"\"\"\n        body = client.V1DeleteOptions(grace_period_seconds=0, propagation_policy=\"Background\")\n\n        # Deleting a Pod will return a v1.Pod if found and its status will be a PodStatus containing\n        # a phase string property\n        # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#podstatus-v1-core\n        v1_pod = client.CoreV1Api().delete_namespaced_pod(\n            namespace=self.kernel_namespace, body=body, name=self.container_name\n        )\n        status = None\n        if v1_pod and v1_pod.status:\n            status = v1_pod.status.phase\n\n        result = status in termination_stati\n\n        return result\n\n    def terminate_container_resources(self) -> bool | None:\n        \"\"\"Terminate any artifacts created on behalf of the container's lifetime.\"\"\"\n        # Kubernetes objects don't go away on their own - so we need to tear down the namespace\n        # and/or pod associated with the kernel.  We'll always target the pod first so that shutdown\n        # is perceived as happening more rapidly.  Then, if we created the namespace, and we're not\n        # in the process of restarting the kernel, we'll delete the namespace.\n        # After deleting the pod we check the container status, rather than the status returned\n        # from the pod deletion API, since it's not necessarily reflective of the actual status.\n\n        result = False\n        termination_stati = [\"Succeeded\", \"Failed\", \"Terminating\", \"Success\"]\n\n        # Delete the managed object then, if applicable, the namespace\n        object_type = self.object_kind\n        try:\n            result = self.delete_managed_object(termination_stati)\n            if not result:\n                # If the status indicates the object is not terminated, capture its current status.\n                # If None, update the result to True, else issue warning that it is not YET deleted\n                # since we still have the hard termination sequence to occur.\n                cur_status = self.get_container_status(None)\n                if cur_status is None:\n                    result = True\n                else:\n                    self.log.warning(\n                        f\"{object_type} '{self.kernel_namespace}.{self.container_name}'\"\n                        f\" is not yet deleted.  Current status is '{cur_status}'.\"\n                    )\n\n            if self.delete_kernel_namespace and not self.kernel_manager.restarting:\n                object_type = \"Namespace\"\n                # Status is a return value for calls that don't return other objects.\n                # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#status-v1-meta\n                body = client.V1DeleteOptions(\n                    grace_period_seconds=0, propagation_policy=\"Background\"\n                )\n                v1_status = client.CoreV1Api().delete_namespace(\n                    name=self.kernel_namespace, body=body\n                )\n                status = None\n                if v1_status:\n                    status = v1_status.status\n\n                if status and any(s in status for s in termination_stati):\n                    result = True\n\n                if not result:\n                    self.log.warning(\n                        f\"Namespace {self.kernel_namespace} is not yet deleted.  \"\n                        f\"Current status is '{status}'.\"\n                    )\n\n        except Exception as err:\n            if isinstance(err, client.rest.ApiException) and err.status == 404:\n                result = True  # okay if it's not found\n            else:\n                self.log.warning(f\"Error occurred deleting {object_type.lower()}: {err}\")\n\n        if result:\n            self.log.debug(\n                f\"KubernetesProcessProxy.terminate_container_resources, \"\n                f\"{self.object_kind}: {self.kernel_namespace}.{self.container_name}, \"\n                f\"kernel ID: {self.kernel_id} has been terminated.\"\n            )\n            self.container_name = None\n            result = None  # maintain jupyter contract\n        else:\n            self.log.warning(\n                \"KubernetesProcessProxy.terminate_container_resources, \"\n                f\"{self.object_kind}: {self.kernel_namespace}.{self.container_name}, \"\n                f\"kernel ID: {self.kernel_id} has not been terminated.\"\n            )\n\n        # Check if there's a kernel pod template file for this kernel and silently delete it.\n        kpt_file = kpt_dir + \"/kpt_\" + self.kernel_id\n        try:\n            os.remove(kpt_file)\n        except OSError:\n            pass\n\n        return result\n\n    def _safe_template_substitute(self, template_str: str, variables: dict) -> str | None:\n        \"\"\"\n        Safely substitute variables in Jinja2-style template syntax.\n        Only supports simple variable substitution: {{ variable_name }}\n        Logs missing variables and returns None if any are missing.\n        \"\"\"\n        # Pattern to match {{ variable_name }} with optional whitespace\n        # Explicitly exclude variables starting with underscore to prevent magic method attacks\n        pattern = r'\\{\\{\\s*([a-zA-Z][a-zA-Z0-9_]*)\\s*\\}\\}'\n        missing_vars = []\n\n        def replace_var(match):\n            var_name = match.group(1)\n            if var_name in variables:\n                return str(variables[var_name])\n            else:\n                missing_vars.append(var_name)\n                return match.group(0)  # Keep original placeholder\n\n        result = re.sub(pattern, replace_var, template_str)\n\n        # Check if there are any remaining {{ }} patterns that didn't match our simple pattern\n        # This catches malicious templates like {{ foo.__class__ }} or {{ 1+1 }}\n        if '{{' in result and '}}' in result:\n            self.log.warning(\n                \"Invalid template syntax detected in KERNEL_POD_NAME: contains unsupported expressions\"\n            )\n            return None\n\n        # Log missing variables and return None if any are missing\n        if missing_vars:\n            self.log.warning(f\"Template variables not found in KERNEL_POD_NAME: {missing_vars}\")\n            return None  # Signal caller to use default\n\n        return result\n\n    def _determine_kernel_pod_name(self, **kwargs: dict[str, Any] | None) -> str:\n        pod_name = kwargs[\"env\"].get(\"KERNEL_POD_NAME\")\n\n        if pod_name is None:\n            pod_name = KernelSessionManager.get_kernel_username(**kwargs) + \"-\" + self.kernel_id\n        else:\n            self.log.debug(f\"Processing KERNEL_POD_NAME based on env var => {pod_name}\")\n            if \"{{\" in pod_name and \"}}\" in pod_name:\n                self.log.debug(\"Processing KERNEL_POD_NAME template variables\")\n                keywords = {}\n                for name, value in kwargs[\"env\"].items():\n                    if name.startswith(\"KERNEL_\"):\n                        keywords[name.lower()] = value\n                keywords[\"kernel_id\"] = self.kernel_id\n\n                # Safe template substitution with fallback\n                substituted = self._safe_template_substitute(pod_name, keywords)\n                if substituted is None:\n                    # Fall back to default if template variables are missing\n                    self.log.warning(\n                        \"Falling back to default pod name due to missing template variables\"\n                    )\n                    pod_name = (\n                        KernelSessionManager.get_kernel_username(**kwargs) + \"-\" + self.kernel_id\n                    )\n                else:\n                    pod_name = substituted\n\n        # Rewrite pod_name to be compatible with DNS name convention\n        # And put back into env since kernel needs this\n        pod_name = re.sub(\"[^0-9a-z]+\", \"-\", pod_name.lower())\n        while pod_name.startswith(\"-\"):\n            pod_name = pod_name[1:]\n        while pod_name.endswith(\"-\"):\n            pod_name = pod_name[:-1]\n        kwargs[\"env\"][\"KERNEL_POD_NAME\"] = pod_name\n\n        return pod_name\n\n    def _determine_kernel_namespace(self, **kwargs: dict[str, Any] | None) -> str:\n        # Since we need the service account name regardless of whether we're creating the namespace or not,\n        # get it now.\n        service_account_name = KubernetesProcessProxy._determine_kernel_service_account_name(\n            **kwargs\n        )\n\n        # If KERNEL_NAMESPACE was provided, then we assume it already exists.  If not provided, then we'll\n        # create the namespace and record that we'll want to delete it as well.\n        namespace = kwargs[\"env\"].get(\"KERNEL_NAMESPACE\")\n        if namespace is None:\n            # check if share gateway namespace is configured...\n            if share_gateway_namespace:  # if so, set to EG namespace\n                namespace = enterprise_gateway_namespace\n                self.log.warning(\n                    \"Shared namespace has been configured.  All kernels will reside in EG namespace: {}\".format(\n                        namespace\n                    )\n                )\n            else:\n                namespace = self._create_kernel_namespace(service_account_name)\n            kwargs[\"env\"][\"KERNEL_NAMESPACE\"] = namespace  # record in env since kernel needs this\n        else:\n            self.log.info(f\"KERNEL_NAMESPACE provided by client: {namespace}\")\n\n        return namespace\n\n    @staticmethod\n    def _determine_kernel_service_account_name(**kwargs: dict[str, Any] | None) -> str:\n        # Check if an account name was provided.  If not, set to the default name (which can be set\n        # from the EG env as well).  Finally, ensure the env value is set.\n        service_account_name = kwargs[\"env\"].get(\n            \"KERNEL_SERVICE_ACCOUNT_NAME\", default_kernel_service_account_name\n        )\n        kwargs[\"env\"][\"KERNEL_SERVICE_ACCOUNT_NAME\"] = service_account_name\n        return service_account_name\n\n    def _create_kernel_namespace(self, service_account_name: str) -> str:\n        # Creates the namespace for the kernel based on the kernel username and kernel id.  Since we're creating\n        # the namespace, we'll also note that it should be deleted as well.  In addition, the kernel pod may need\n        # to list/create other pods (true for spark-on-k8s), so we'll also create a RoleBinding associated with\n        # the namespace's default ServiceAccount.  Since this is always done when creating a namespace, we can\n        # delete the RoleBinding when deleting the namespace (no need to record that via another member variable).\n\n        namespace = self.kernel_pod_name\n\n        # create the namespace ...\n        labels = {\"app\": \"enterprise-gateway\", \"component\": \"kernel\", \"kernel_id\": self.kernel_id}\n        namespace_metadata = client.V1ObjectMeta(name=namespace, labels=labels)\n        body = client.V1Namespace(metadata=namespace_metadata)\n\n        # create the namespace\n        try:\n            client.CoreV1Api().create_namespace(body=body)\n            self.delete_kernel_namespace = True\n            self.log.info(f\"Created kernel namespace: {namespace}\")\n\n            # Now create a RoleBinding for this namespace for the default ServiceAccount.  We'll reference\n            # the ClusterRole, but that will only be applied for this namespace.  This prevents the need for\n            # creating a role each time.\n            self._create_role_binding(namespace, service_account_name)\n        except Exception as err:\n            if (\n                isinstance(err, client.rest.ApiException)\n                and err.status == 409\n                and self.kernel_manager.restarting\n            ):\n                self.delete_kernel_namespace = (\n                    True  # okay if ns already exists and restarting, still mark for delete\n                )\n                self.log.info(f\"Re-using kernel namespace: {namespace}\")\n            else:\n                if self.delete_kernel_namespace:\n                    reason = \"Error occurred creating role binding for namespace '{}': {}\".format(\n                        namespace, err\n                    )\n                    # delete the namespace since we'll be using the EG namespace...\n                    body = client.V1DeleteOptions(\n                        grace_period_seconds=0, propagation_policy=\"Background\"\n                    )\n                    client.CoreV1Api().delete_namespace(name=namespace, body=body)\n                    self.log.warning(f\"Deleted kernel namespace: {namespace}\")\n                else:\n                    reason = f\"Error occurred creating namespace '{namespace}': {err}\"\n                self.log_and_raise(http_status_code=500, reason=reason)\n\n        return namespace\n\n    def _create_role_binding(self, namespace: str, service_account_name: str) -> None:\n        # Creates RoleBinding instance for the given namespace.  The role used will be the ClusterRole named by\n        # EG_KERNEL_CLUSTER_ROLE.\n        # Note that roles referenced in RoleBindings are scoped to the namespace so re-using the cluster role prevents\n        # the need for creating a new role with each kernel.\n        # The ClusterRole will be bound to the kernel service user identified by KERNEL_SERVICE_ACCOUNT_NAME then\n        # EG_DEFAULT_KERNEL_SERVICE_ACCOUNT_NAME, respectively.\n        # We will not use a try/except clause here since _create_kernel_namespace will handle exceptions.\n\n        role_binding_name = kernel_cluster_role  # use same name for binding as cluster role\n        labels = {\"app\": \"enterprise-gateway\", \"component\": \"kernel\", \"kernel_id\": self.kernel_id}\n        binding_metadata = client.V1ObjectMeta(name=role_binding_name, labels=labels)\n        binding_role_ref = client.V1RoleRef(\n            api_group=\"\", kind=\"ClusterRole\", name=kernel_cluster_role\n        )\n        # Use the appropriate Subject class based on kubernetes client version\n        SubjectClass = get_subject_class()\n        binding_subjects = SubjectClass(\n            api_group=\"\", kind=\"ServiceAccount\", name=service_account_name, namespace=namespace\n        )\n\n        body = client.V1RoleBinding(\n            kind=\"RoleBinding\",\n            metadata=binding_metadata,\n            role_ref=binding_role_ref,\n            subjects=[binding_subjects],\n        )\n\n        client.RbacAuthorizationV1Api().create_namespaced_role_binding(\n            namespace=namespace, body=body\n        )\n        self.log.info(\n            \"Created kernel role-binding '{}' in namespace: {} for service account: {}\".format(\n                role_binding_name, namespace, service_account_name\n            )\n        )\n\n    def get_process_info(self) -> dict[str, Any]:\n        \"\"\"Captures the base information necessary for kernel persistence relative to kubernetes.\"\"\"\n        process_info = super().get_process_info()\n        process_info.update(\n            {\"kernel_ns\": self.kernel_namespace, \"delete_ns\": self.delete_kernel_namespace}\n        )\n        return process_info\n\n    def load_process_info(self, process_info: dict[str, Any]) -> None:\n        \"\"\"Loads the base information necessary for kernel persistence relative to kubernetes.\"\"\"\n        super().load_process_info(process_info)\n        self.kernel_namespace = process_info[\"kernel_ns\"]\n        self.delete_kernel_namespace = process_info[\"delete_ns\"]\n"
  },
  {
    "path": "enterprise_gateway/services/processproxies/processproxy.py",
    "content": "\"\"\"Kernel managers that operate against a remote process.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom __future__ import annotations\n\nimport abc\nimport asyncio\nimport base64\nimport errno\nimport getpass\nimport json\nimport logging\nimport os\nimport random\nimport re\nimport signal\nimport subprocess\nimport sys\nimport time\nimport warnings\nfrom calendar import timegm\nfrom enum import Enum\nfrom socket import (\n    AF_INET,\n    SHUT_RDWR,\n    SHUT_WR,\n    SO_REUSEADDR,\n    SOCK_STREAM,\n    SOL_SOCKET,\n    gethostbyname,\n    gethostname,\n    socket,\n    timeout,\n)\nfrom typing import Any\n\nimport paramiko\nimport pexpect\nfrom Cryptodome.Cipher import AES, PKCS1_v1_5\nfrom Cryptodome.PublicKey import RSA\nfrom Cryptodome.Util.Padding import unpad\nfrom jupyter_client import launch_kernel, localinterfaces\nfrom jupyter_server import _tz\nfrom jupyter_server.serverapp import random_ports\nfrom paramiko.client import SSHClient\nfrom tornado import web\nfrom tornado.ioloop import PeriodicCallback\nfrom traitlets.config import SingletonConfigurable\nfrom zmq.ssh import tunnel\n\nfrom ..sessions.kernelsessionmanager import KernelSessionManager\n\n# Default logging level of paramiko produces too much noise - raise to warning only.\nlogging.getLogger(\"paramiko\").setLevel(os.getenv(\"EG_SSH_LOG_LEVEL\", logging.WARNING))\n\n# Pop certain env variables that don't need to be logged, e.g. remote_pwd\nenv_pop_list = [\"EG_REMOTE_PWD\", \"LS_COLORS\"]\n\n# Comma separated list of env variables that shouldn't be logged\nsensitive_env_keys = os.getenv(\"EG_SENSITIVE_ENV_KEYS\", \"\").lower().split(\",\")\nredaction_mask = os.getenv(\"EG_REDACTION_MASK\", \"********\")\n\ndefault_kernel_launch_timeout = float(os.getenv(\"EG_KERNEL_LAUNCH_TIMEOUT\", \"30\"))\nmax_poll_attempts = int(os.getenv(\"EG_MAX_POLL_ATTEMPTS\", \"10\"))\npoll_interval = float(os.getenv(\"EG_POLL_INTERVAL\", \"0.5\"))\nsocket_timeout = float(os.getenv(\"EG_SOCKET_TIMEOUT\", \"0.005\"))\ntunneling_enabled = bool(os.getenv(\"EG_ENABLE_TUNNELING\", \"False\").lower() == \"true\")\nssh_port = int(os.getenv(\"EG_SSH_PORT\", \"22\"))\neg_response_ip = os.getenv(\"EG_RESPONSE_IP\", None)\ndesired_response_port = int(os.getenv(\"EG_RESPONSE_PORT\", 8877))\nresponse_port_retries = int(os.getenv(\"EG_RESPONSE_PORT_RETRIES\", 10))\nresponse_addr_any = bool(os.getenv(\"EG_RESPONSE_ADDR_ANY\", \"False\").lower() == \"true\")\n\nconnection_interval = (\n    poll_interval / 100.0\n)  # already polling, so make connection timeout a fraction of outer poll\n\n# Minimum port range size and max retries\nmin_port_range_size = int(os.getenv(\"EG_MIN_PORT_RANGE_SIZE\", \"1000\"))\nmax_port_range_retries = int(os.getenv(\"EG_MAX_PORT_RANGE_RETRIES\", \"5\"))\n\n# Number of seconds in 100 years as the max keep-alive interval value.\nmax_keep_alive_interval = 100 * 365 * 24 * 60 * 60\n\n# Allow users to specify local ips (regular expressions can be used) that should not be included\n# when determining the response address.  For example, on systems with many network interfaces,\n# some may have their IPs appear the local interfaces list (e.g., docker's 172.17.0.* is an example)\n# that should not be used.  This env can be used to indicate such IPs.\nprohibited_local_ips = os.getenv(\"EG_PROHIBITED_LOCAL_IPS\", \"\").split(\",\")\n\n\ndef _get_local_ip() -> str:\n    \"\"\"\n    Honor the prohibited IPs, locating the first not in the list.\n    \"\"\"\n    for ip in localinterfaces.public_ips():\n        is_prohibited = False\n        for prohibited_ip in prohibited_local_ips:  # exhaust prohibited list, applying regexs\n            if re.match(prohibited_ip, ip):\n                is_prohibited = True\n                break\n        if not is_prohibited:\n            return ip\n    return localinterfaces.public_ips()[0]  # all were prohibited, so go with the first\n\n\nlocal_ip = _get_local_ip()\n\nrandom.seed()\n\n\nclass KernelChannel(Enum):\n    \"\"\"\n    Enumeration used to better manage tunneling\n    \"\"\"\n\n    SHELL = \"SHELL\"\n    IOPUB = \"IOPUB\"\n    STDIN = \"STDIN\"\n    HEARTBEAT = \"HB\"\n    CONTROL = \"CONTROL\"\n    COMMUNICATION = (\n        \"EG_COMM\"  # Optional channel for remote launcher to issue interrupts - NOT a ZMQ channel\n    )\n\n\nclass Response(asyncio.Event):\n    \"\"\"Combines the event behavior with the kernel launch response.\"\"\"\n\n    _response = None\n\n    @property\n    def response(self):\n        return self._response\n\n    @response.setter\n    def response(self, value):\n        \"\"\"Set the response.  NOTE: this marks the event as set.\"\"\"\n        self._response = value\n        self.set()\n\n\nclass ResponseManager(SingletonConfigurable):\n    \"\"\"Singleton that manages the responses from each kernel launcher at startup.\n\n    This singleton does the following:\n    1. Acquires a public and private RSA key pair at first use to encrypt and decrypt the\n       received responses.  The public key is sent to the launcher during startup\n       and is used by the launcher to encrypt the AES key the launcher uses to encrypt\n       the connection information, while the private key remains in the server and is\n       used to decrypt the AES key from the response - which it then uses to decrypt\n       the connection information.\n    2. Creates a single socket based on the configuration settings that is listened on\n       via a periodic callback.\n    3. On receipt, it decrypts the response (key then connection info) and posts the\n       response payload to a map identified by the kernel_id embedded in the response.\n    4. Provides a wait mechanism for callers to poll to get their connection info\n       based on their registration (of kernel_id).\n    \"\"\"\n\n    KEY_SIZE = 1024  # Can be small since it's only used to {en,de}crypt the AES key.\n    _instance = None\n\n    def __init__(self, **kwargs: dict[str, Any] | None):\n        \"\"\"Initialize the manager.\"\"\"\n        super().__init__(**kwargs)\n        self._response_ip = None\n        self._response_port = None\n        self._response_socket = None\n        self._connection_processor = None\n\n        # Create encryption keys...\n        self._private_key = RSA.generate(ResponseManager.KEY_SIZE)\n        self._public_key = self._private_key.publickey()\n        self._public_pem = self._public_key.export_key(\"PEM\")\n\n        # Event facility...\n        self._response_registry = {}\n\n        # Start the response manager (create socket, periodic callback, etc.) ...\n        self._start_response_manager()\n\n    @property\n    def public_key(self) -> str:\n        \"\"\"Provides the string-form of public key PEM with header/footer/newlines stripped.\"\"\"\n        return (\n            self._public_pem.decode()\n            .replace(\"-----BEGIN PUBLIC KEY-----\", \"\")\n            .replace(\"-----END PUBLIC KEY-----\", \"\")\n            .replace(\"\\n\", \"\")\n        )\n\n    @property\n    def response_address(self) -> str:\n        return self._response_ip + \":\" + str(self._response_port)\n\n    def register_event(self, kernel_id: str) -> None:\n        \"\"\"Register kernel_id so its connection information can be processed.\"\"\"\n        self._response_registry[kernel_id] = Response()\n\n    async def get_connection_info(self, kernel_id: str) -> dict:\n        \"\"\"Performs a timeout wait on the event, returning the conenction information on completion.\"\"\"\n        await asyncio.wait_for(self._response_registry[kernel_id].wait(), connection_interval)\n        return self._response_registry.pop(kernel_id).response\n\n    def _prepare_response_socket(self) -> None:\n        \"\"\"Prepares the response socket on which connection info arrives from remote kernel launcher.\"\"\"\n        s = socket(AF_INET, SOCK_STREAM)\n        s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n\n        # If response_addr_any is enabled (default disabled), we will permit the server to listen\n        # on all addresses, else we will honor a configured response IP (via env) over the local IP\n        # (which is the default).\n        # Multiple IP bindings should be configured for containerized configurations (k8s) that need to\n        # launch kernels into external YARN clusters.\n        bind_ip = local_ip if eg_response_ip is None else eg_response_ip\n        bind_ip = bind_ip if response_addr_any is False else \"\"\n\n        response_port = desired_response_port\n        for port in random_ports(response_port, response_port_retries + 1):\n            try:\n                s.bind((bind_ip, port))\n            except OSError as e:\n                if e.errno == errno.EADDRINUSE:\n                    self.log.info(f\"Response port {port} is already in use, trying another port...\")\n                    continue\n                elif e.errno in (errno.EACCES, getattr(errno, \"WSAEACCES\", errno.EACCES)):\n                    self.log.warning(\n                        f\"Permission to bind to response port {port} denied - continuing...\"\n                    )\n                    continue\n                else:\n                    msg = f\"Failed to bind to port '{port}' for response address due to: '{e}'\"\n                    raise RuntimeError(msg) from e\n            else:\n                response_port = port\n                break\n        else:\n            msg = f\"No available response port could be found after {response_port_retries + 1} attempts\"\n            self.log.critical(msg)\n            raise RuntimeError(msg)\n\n        self.log.info(\n            f\"Enterprise Gateway is bound to port {response_port} \"\n            f\"for remote kernel connection information.\"\n        )\n        s.listen(128)\n        s.settimeout(socket_timeout)\n        self._response_socket = s\n        self._response_port = response_port\n        self._response_ip = local_ip if eg_response_ip is None else eg_response_ip\n\n    def _start_response_manager(self) -> None:\n        \"\"\"If not already started, creates and starts the periodic callback to process connections.\"\"\"\n        if self._response_socket is None:\n            self._prepare_response_socket()\n\n        if self._connection_processor is None:\n            self._connection_processor = PeriodicCallback(self._process_connections, 0.1, 0.1)\n            self._connection_processor.start()\n\n    def stop_response_manager(self) -> None:\n        \"\"\"Stops the connection processor.\"\"\"\n        if self._connection_processor is not None:\n            self._connection_processor.stop()\n            self._connection_processor = None\n\n        if self._response_socket is not None:\n            self._response_socket = None\n\n    async def _process_connections(self) -> None:\n        \"\"\"Checks the socket for data, if found, decrypts the payload and posts to 'wait map'.\"\"\"\n        loop = asyncio.get_event_loop()\n        data = \"\"\n        try:\n            conn, addr = await loop.sock_accept(self._response_socket)\n            while True:\n                buffer = await loop.sock_recv(conn, 1024)\n                if not buffer:  # send is complete, process payload\n                    self.log.debug(f\"Received payload '{data}'\")\n                    payload = self._decode_payload(data)\n                    self.log.debug(f\"Decrypted payload '{payload}'\")\n                    self._post_connection(payload)\n                    break\n                data = data + buffer.decode(\n                    encoding=\"utf-8\"\n                )  # append what we received until we get no more...\n            conn.close()\n        except timeout:\n            pass\n        except Exception as ex:\n            self.log.error(f\"Failure occurred processing connection: {ex}\")\n\n    def _decode_payload(self, data: json) -> dict:\n        \"\"\"\n        Decodes the payload.\n\n        Decodes the payload, identifying the payload's version and returns a dictionary\n        representing the kernel's connection information.\n\n        Version \"0\" payloads do not specify a kernel-id within the payload, nor do they\n        include a 'key', 'version' or 'conn_info' fields.  They are purely an AES encrypted\n        form of the base64-encoded JSON connection information, and encrypted using the\n        kernel-id as a key.  Since no kernel-id is in the payload, we will capture the keys\n        of registered kernel-ids and attempt to decrypt the payload until we find the\n        appropriate registrant.\n\n        Version \"1+\" payloads are a base64-encoded JSON string consisting of a 'version', 'key'\n        and 'conn_info' fields.  The 'key' field will be decrpyted using the private key to\n        reveal the AES key, which is then used to decrypt the `conn_info` field.\n\n        Once decryption has taken place, the connection information string is loaded into a\n        dictionary and returned.\n        \"\"\"\n\n        payload_str = base64.b64decode(data)\n        try:\n            payload = json.loads(payload_str)\n            # Get the version\n            version = payload.get(\"version\")\n            if version is None:\n                msg = \"Payload received from kernel does not include a version indicator!\"\n                raise ValueError(msg)\n            self.log.debug(f\"Version {version} payload received.\")\n\n            if version == 1:\n                # Decrypt the AES key using the RSA private key\n                encrypted_aes_key = base64.b64decode(payload[\"key\"].encode())\n                cipher = PKCS1_v1_5.new(self._private_key)\n                aes_key = cipher.decrypt(encrypted_aes_key, b\"\\x42\")\n                # Per docs, don't convey that decryption returned sentinel.  So just let\n                # things fail \"naturally\".\n                # Decrypt and unpad the connection information using the just-decrypted AES key\n                cipher = AES.new(aes_key, AES.MODE_ECB)\n                encrypted_connection_info = base64.b64decode(payload[\"conn_info\"].encode())\n                connection_info_str = unpad(cipher.decrypt(encrypted_connection_info), 16).decode()\n            else:\n                msg = f\"Unexpected version indicator received: {version}!\"\n                raise ValueError(msg)\n        except Exception as ex:\n            # Could be version \"0\", walk the registrant kernel-ids and attempt to decrypt using each as a key.\n            # If none are found, re-raise the triggering exception.\n            self.log.debug(f\"decode_payload exception - {ex.__class__.__name__}: {ex}\")\n            connection_info_str = None\n            for kernel_id in self._response_registry:\n                aes_key = kernel_id[0:16]\n                try:\n                    cipher = AES.new(aes_key.encode(\"utf-8\"), AES.MODE_ECB)\n                    decrypted_payload = cipher.decrypt(payload_str)\n                    # Version \"0\" responses use custom padding, so remove that here.\n                    connection_info_str = \"\".join(\n                        [decrypted_payload.decode(\"utf-8\").rsplit(\"}\", 1)[0], \"}\"]\n                    )\n                    # Try to load as JSON\n                    new_connection_info = json.loads(connection_info_str)\n                    # Add kernel_id into dict, then dump back to string so this can be processed as valid response\n                    new_connection_info[\"kernel_id\"] = kernel_id\n                    connection_info_str = json.dumps(new_connection_info)\n                    self.log.warning(\n                        f\"WARNING!!!! Legacy kernel response received for kernel_id '{kernel_id}'! \"\n                        \"Update kernel launchers to current version!\"\n                    )\n                    break  # If we're here, we made it!\n                except Exception as ex2:\n                    # Any exception fails this experiment and we continue\n                    self.log.debug(\n                        \"Received the following exception detecting legacy kernel response - {}: {}\".format(\n                            ex2.__class__.__name__, ex2\n                        )\n                    )\n                    connection_info_str = None\n\n            if connection_info_str is None:\n                raise ex\n\n        # and convert to usable dictionary\n        connection_info = json.loads(connection_info_str)\n        return connection_info\n\n    def _post_connection(self, connection_info: dict) -> None:\n        \"\"\"Posts connection information into \"wait map\" based on kernel_id value.\"\"\"\n        kernel_id = connection_info.get(\"kernel_id\")\n        if kernel_id is None:\n            self.log.error(\"No kernel id found in response!  Kernel launch will fail.\")\n            return\n        if kernel_id not in self._response_registry:\n            self.log.error(\n                f\"Kernel id '{kernel_id}' has not been registered and will not be processed!\"\n            )\n            return\n\n        self.log.debug(f\"Connection info received for kernel '{kernel_id}': {connection_info}\")\n        self._response_registry[kernel_id].response = connection_info\n\n\nclass BaseProcessProxyABC(metaclass=abc.ABCMeta):\n    \"\"\"\n    Process Proxy Abstract Base Class.\n\n    Defines the required methods for process proxy classes.  Some implementation is also performed\n    by these methods - common to all subclasses.\n    \"\"\"\n\n    def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict):  # noqa: F821\n        \"\"\"\n        Initialize the process proxy instance.\n\n        Parameters\n        ----------\n        kernel_manager : RemoteKernelManager\n            The kernel manager instance tied to this process proxy.  This drives the process proxy method calls.\n\n        proxy_config : dict\n            The dictionary of per-kernel config settings.  If none are specified, this will be an empty dict.\n        \"\"\"\n        self.kernel_manager = kernel_manager\n        self.proxy_config = proxy_config\n        # Initialize to 0 IP primarily so restarts of remote kernels don't encounter local-only enforcement during\n        # relaunch (see jupyter_client.manager.start_kernel().\n        self.kernel_manager.ip = \"0.0.0.0\"  # noqa\n        self.log = kernel_manager.log\n\n        # extract the kernel_id string from the connection file and set the KERNEL_ID environment variable\n        if self.kernel_manager.kernel_id is None:\n            self.kernel_manager.kernel_id = (\n                os.path.basename(self.kernel_manager.connection_file)\n                .replace(\"kernel-\", \"\")\n                .replace(\".json\", \"\")\n            )\n\n        self.kernel_id = self.kernel_manager.kernel_id\n        self.kernel_launch_timeout = default_kernel_launch_timeout\n        self.lower_port = 0\n        self.upper_port = 0\n        self._validate_port_range()\n\n        # Handle authorization sets...\n        # Take union of unauthorized users...\n        self.unauthorized_users = self.kernel_manager.unauthorized_users\n        if proxy_config.get(\"unauthorized_users\"):\n            self.unauthorized_users = self.unauthorized_users.union(\n                proxy_config.get(\"unauthorized_users\").split(\",\")\n            )\n\n        # Let authorized users override global value - if set on kernelspec...\n        if proxy_config.get(\"authorized_users\"):\n            self.authorized_users = set(proxy_config.get(\"authorized_users\").split(\",\"))\n        else:\n            self.authorized_users = self.kernel_manager.authorized_users\n\n        # Represents the local process (from popen) if applicable.  Note that we could have local_proc = None even when\n        # the subclass is a LocalProcessProxy (or YarnProcessProxy).  This will happen if EG is restarted and the\n        # persisted kernel-sessions indicate that its now running on a different server.  In those cases, we use the ip\n        # member variable to determine if the persisted state is local or remote and use signals with the pid to\n        # implement the poll, kill and send_signal methods.  As a result, what was a local kernel with one EG instance\n        # could be a remote kernel in a restarted EG instance - and vice versa.\n        self.local_proc = None\n        self.ip = None\n        self.pid = 0\n        self.pgid = 0\n        _remote_user = os.getenv(\"EG_REMOTE_USER\")\n        self.remote_pwd = os.getenv(\"EG_REMOTE_PWD\")\n        self._use_gss_raw = os.getenv(\"EG_REMOTE_GSS_SSH\", \"False\")\n        if self._use_gss_raw.lower() not in (\"\", \"true\", \"false\"):\n            msg = (\n                \"Invalid Value for EG_REMOTE_GSS_SSH expected one of \"\n                f'\"\", \"True\", \"False\", got {self._use_gss_raw!r}'\n            )\n            raise ValueError(msg)\n        self.use_gss = self._use_gss_raw == \"true\"\n        if self.use_gss:\n            if self.remote_pwd or _remote_user:\n                warnings.warn(\n                    \"Both `EG_REMOTE_GSS_SSH` and one of `EG_REMOTE_PWD` or \"\n                    \"`EG_REMOTE_USER` is set. \"\n                    \"Those options are mutually exclusive, you configuration may be incorrect. \"\n                    \"EG_REMOTE_GSS_SSH will take priority.\",\n                    stacklevel=2,\n                )\n            self.remote_user = None\n        else:\n            self.remote_user = _remote_user if _remote_user else getpass.getuser()\n\n    @abc.abstractmethod\n    async def launch_process(self, kernel_cmd: str, **kwargs: dict[str, Any] | None) -> None:\n        \"\"\"\n        Provides basic implementation for launching the process corresponding to the process proxy.\n\n        All overrides should call this method via `super()` so that basic/common operations can be\n        performed.  Leaf class implementations are required to perform the actual process launch\n        depending on the type of process proxy.\n\n        Parameters\n        ----------\n        kernel_cmd : str\n            The properly formatted string composed from the argv stanza of the kernelspec with\n            all curly-braced substitutions performed.\n\n        kwargs : optional\n            Additional arguments used during the launch - primarily the env to use for the kernel.\n        \"\"\"\n        env_dict = kwargs.get(\"env\")\n        if env_dict is None:\n            env_dict = dict(os.environ.copy())\n            kwargs.update({\"env\": env_dict})\n\n        # see if KERNEL_LAUNCH_TIMEOUT was included from user.  If so, override default\n        if env_dict.get(\"KERNEL_LAUNCH_TIMEOUT\"):\n            self.kernel_launch_timeout = float(env_dict.get(\"KERNEL_LAUNCH_TIMEOUT\"))\n\n        # add the applicable kernel_id and language to the env dict\n        env_dict[\"KERNEL_ID\"] = self.kernel_id\n\n        kernel_language = \"unknown-kernel-language\"\n        if len(self.kernel_manager.kernel_spec.language) > 0:\n            kernel_language = self.kernel_manager.kernel_spec.language.lower()\n        # if already set in env: stanza, let that override.\n        env_dict[\"KERNEL_LANGUAGE\"] = env_dict.get(\"KERNEL_LANGUAGE\", kernel_language)\n\n        # Remove any potential sensitive (e.g., passwords) or annoying values (e.g., LG_COLORS)\n        for k in env_pop_list:\n            env_dict.pop(k, None)\n\n        self._enforce_authorization(**kwargs)\n\n        # Filter sensitive values from being logged\n        env_copy = kwargs.get(\"env\").copy()\n\n        if sensitive_env_keys:\n            for key in list(env_copy):\n                if any(phrase in key.lower() for phrase in sensitive_env_keys):\n                    env_copy[key] = redaction_mask\n\n        self.log.debug(f\"BaseProcessProxy.launch_process() env: {env_copy}\")\n\n    def launch_kernel(\n        self, cmd: list[str], **kwargs: dict[str, Any] | None\n    ) -> subprocess.Popen[str | bytes]:\n        \"\"\"\n        Returns the result of launching the kernel via Popen.\n\n        This method exists to allow process proxies to perform any final preparations for\n        launch, including the removal of any arguments that are not recoginized by Popen.\n        \"\"\"\n\n        # Remove kernel_headers\n        kwargs.pop(\"kernel_headers\", None)\n        return launch_kernel(cmd, **kwargs)\n\n    def cleanup(self) -> None:  # noqa\n        \"\"\"Performs optional cleanup after kernel is shutdown.  Child classes are responsible for implementations.\"\"\"\n        pass\n\n    def poll(self) -> Any | None:\n        \"\"\"\n        Determines if process proxy is still alive.\n\n        If this corresponds to a local (popen) process, poll() is called on the subprocess.\n        Otherwise, the zero signal is used to determine if active.\n        \"\"\"\n        if self.local_proc:\n            return self.local_proc.poll()\n\n        return self.send_signal(0)\n\n    def wait(self) -> int | None:\n        \"\"\"\n        Wait for the process to become inactive.\n        \"\"\"\n        # If we have a local_proc, call its wait method.  This will clean up any defunct processes when the kernel\n        # is shutdown (when using waitAppCompletion = false).  Otherwise (if no local_proc) we'll use polling to\n        # determine if a (remote or revived) process is still active.\n        if self.local_proc:\n            return self.local_proc.wait()\n\n        for _ in range(max_poll_attempts):\n            if self.poll():\n                time.sleep(poll_interval)\n            else:\n                break\n        else:\n            self.log.warning(\n                \"Wait timeout of {} seconds exhausted. Continuing...\".format(\n                    max_poll_attempts * poll_interval\n                )\n            )\n        return None\n\n    def send_signal(self, signum: int) -> bool | None:\n        \"\"\"\n        Send signal `signum` to process proxy.\n\n        Parameters\n        ----------\n        signum : int\n            The signal number to send.  Zero is used to determine heartbeat.\n        \"\"\"\n        # if we have a local process, use its method, else determine if the ip is local or remote and issue\n        # the appropriate version to signal the process.\n        result = None\n        if self.local_proc:\n            if self.pgid > 0 and hasattr(os, \"killpg\"):\n                try:\n                    os.killpg(self.pgid, signum)\n                    return result\n                except OSError:\n                    pass\n            result = self.local_proc.send_signal(signum)\n        else:\n            if self.ip and self.pid > 0:\n                if BaseProcessProxyABC.ip_is_local(self.ip):\n                    result = self.local_signal(signum)\n                else:\n                    result = self.remote_signal(signum)\n        return result\n\n    def kill(self) -> bool | None:\n        \"\"\"\n        Terminate the process proxy process.\n\n        First attempts graceful termination, then forced termination.\n        Note that this should only be necessary if the message-based kernel termination has\n        proven unsuccessful.\n        \"\"\"\n        # If we have a local process, use its method, else signal soft kill first before hard kill.\n        result = self.terminate()  # Send -15 signal first\n        i = 1\n        while self.poll() is None and i <= max_poll_attempts:\n            time.sleep(poll_interval)\n            i = i + 1\n        if i > max_poll_attempts:  # Send -9 signal if process is still alive\n            if self.local_proc:\n                result = self.local_proc.kill()\n                self.log.debug(f\"BaseProcessProxy.kill(): {result}\")\n            else:\n                if self.ip and self.pid > 0:\n                    if BaseProcessProxyABC.ip_is_local(self.ip):\n                        result = self.local_signal(signal.SIGKILL)\n                    else:\n                        result = self.remote_signal(signal.SIGKILL)\n                    self.log.debug(f\"SIGKILL signal sent to pid: {self.pid}\")\n        return result\n\n    def terminate(self) -> bool | None:\n        \"\"\"\n        Gracefully terminate the process proxy process.\n\n        Note that this should only be necessary if the message-based kernel termination has\n        proven unsuccessful.\n        \"\"\"\n        # If we have a local process, use its method, else send signal SIGTERM to soft kill.\n        result = None\n        if self.local_proc:\n            result = self.local_proc.terminate()\n            self.log.debug(f\"BaseProcessProxy.terminate(): {result}\")\n        else:\n            if self.ip and self.pid > 0:\n                if BaseProcessProxyABC.ip_is_local(self.ip):\n                    result = self.local_signal(signal.SIGTERM)\n                else:\n                    result = self.remote_signal(signal.SIGTERM)\n                self.log.debug(f\"SIGTERM signal sent to pid: {self.pid}\")\n        return result\n\n    @staticmethod\n    def ip_is_local(ip: str) -> bool:\n        \"\"\"\n        Returns True if `ip` is considered local to this server, False otherwise.\n        \"\"\"\n        return localinterfaces.is_public_ip(ip) or localinterfaces.is_local_ip(ip)\n\n    def _get_ssh_client(self, host: str) -> SSHClient | None:\n        \"\"\"\n        Create a SSH Client based on host, username and password if provided.\n        If there is any AuthenticationException/SSHException, raise HTTP Error 403 as permission denied.\n\n        :param host:\n        :return: ssh client instance\n        \"\"\"\n        ssh = None\n\n        try:\n            ssh = paramiko.SSHClient()\n            ssh.load_system_host_keys()\n            host_ip = gethostbyname(host)\n            if self.use_gss:\n                self.log.debug(\"Connecting to remote host via GSS.\")\n                ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n                ssh.connect(host_ip, port=ssh_port, gss_auth=True)\n            else:\n                ssh.set_missing_host_key_policy(paramiko.RejectPolicy())\n                if self.remote_pwd:\n                    self.log.debug(\"Connecting to remote host with username and password.\")\n                    ssh.connect(\n                        host_ip,\n                        port=ssh_port,\n                        username=self.remote_user,\n                        password=self.remote_pwd,\n                    )\n                else:\n                    self.log.debug(\"Connecting to remote host with ssh key.\")\n                    ssh.connect(host_ip, port=ssh_port, username=self.remote_user)\n        except Exception as e:\n            http_status_code = 500\n            current_host = gethostbyname(gethostname())\n            error_message = (\n                \"Exception '{}' occurred when creating a SSHClient at {} connecting \"\n                \"to '{}:{}' with user '{}', message='{}'.\".format(\n                    type(e).__name__, current_host, host, ssh_port, self.remote_user, e\n                )\n            )\n            if e is paramiko.SSHException or paramiko.AuthenticationException:\n                http_status_code = 403\n                error_message_prefix = \"Failed to authenticate SSHClient with password\"\n                error_message = error_message_prefix + (\n                    \" provided\" if self.remote_pwd else \"-less SSH\"\n                )\n                error_message = error_message + \"and EG_REMOTE_GSS_SSH={!r} ({})\".format(\n                    self._use_gss_raw, self.use_gss\n                )\n\n            self.log_and_raise(http_status_code=http_status_code, reason=error_message)\n        return ssh\n\n    def rsh(self, host: str, command: str) -> list[str]:\n        \"\"\"\n        Executes a command on a remote host using ssh.\n\n        Parameters\n        ----------\n        host : str\n            The host on which the command is executed.\n        command : str\n            The command to execute.\n\n        Returns\n        -------\n        lines : List\n            The command's output.  If stdout is zero length, the stderr output is returned.\n        \"\"\"\n        ssh = self._get_ssh_client(host)\n        try:\n            stdin, stdout, stderr = ssh.exec_command(command, timeout=30)\n            lines = stdout.readlines()\n            if len(lines) == 0:  # if nothing in stdout, return stderr\n                lines = stderr.readlines()\n        except Exception as e:\n            # Let caller decide if exception should be logged\n            raise e\n\n        finally:\n            if ssh:\n                ssh.close()\n\n        return lines\n\n    def remote_signal(self, signum: int) -> bool | None:\n        \"\"\"\n        Sends signal `signum` to process proxy on remote host.\n        \"\"\"\n        val = None\n        # if we have a process group, use that, else use the pid...\n        target = \"-\" + str(self.pgid) if self.pgid > 0 and signum > 0 else str(self.pid)\n        cmd = f\"kill -{signum} {target}; echo $?\"\n        if signum > 0:  # only log if meaningful signal (not for poll)\n            self.log.debug(f\"Sending signal: {signum} to target: {target} on host: {self.ip}\")\n\n        try:\n            result = self.rsh(self.ip, cmd)\n        except Exception as e:\n            self.log.warning(\n                \"Remote signal({}) to '{}' on host '{}' failed with exception '{}'.\".format(\n                    signum, target, self.ip, e\n                )\n            )\n            return False\n\n        for line in result:\n            val = line.strip()\n        if val == \"0\":\n            return None\n\n        return False\n\n    def local_signal(self, signum: int) -> bool | None:\n        \"\"\"\n        Sends signal `signum` to local process.\n        \"\"\"\n        # if we have a process group, use that, else use the pid...\n        target = \"-\" + str(self.pgid) if self.pgid > 0 and signum > 0 else str(self.pid)\n        if signum > 0:  # only log if meaningful signal (not for poll)\n            self.log.debug(f\"Sending signal: {signum} to target: {target}\")\n\n        cmd = [\"kill\", \"-\" + str(signum), target]\n\n        with open(os.devnull, \"w\") as devnull:\n            result = subprocess.call(cmd, stderr=devnull)\n\n        if result == 0:\n            return None\n        return False\n\n    def _enforce_authorization(self, **kwargs: dict[str, Any] | None) -> None:\n        \"\"\"\n        Applies any authorization configuration using the kernel user.\n\n        Regardless of impersonation enablement, this method first adds the appropriate value for\n        EG_IMPERSONATION_ENABLED into environment (for use by kernelspecs), then ensures that KERNEL_USERNAME\n        has a value and is present in the environment (again, for use by kernelspecs).  If unset, KERNEL_USERNAME\n        will be defaulted to the current user.\n\n        Authorization is performed by comparing the value of KERNEL_USERNAME with each value in the set of\n        unauthorized users.  If any (case-sensitive) matches are found, HTTP error 403 (Forbidden) will be raised\n        - preventing the launch of the kernel.  If the authorized_users set is non-empty, it is then checked to\n        ensure the value of KERNEL_USERNAME is present in that list.  If not found, HTTP error 403 will be raised.\n\n        It is assumed that the kernelspec logic will take the appropriate steps to impersonate the user identified\n        by KERNEL_USERNAME when impersonation_enabled is True.\n        \"\"\"\n        # Get the env\n        env_dict = kwargs.get(\"env\")\n\n        # Although it may already be set in the env, just override in case it was only set via command line or config\n        # Convert to string since execve() (called by Popen in base classes) wants string values.\n        env_dict[\"EG_IMPERSONATION_ENABLED\"] = str(self.kernel_manager.impersonation_enabled)\n\n        # Ensure KERNEL_USERNAME is set\n        kernel_username = KernelSessionManager.get_kernel_username(**kwargs)\n\n        # Now perform authorization checks\n        if kernel_username in self.unauthorized_users:\n            self._raise_authorization_error(kernel_username, \"not authorized\")\n\n        # If authorized users are non-empty, ensure user is in that set.\n        if self.authorized_users.__len__() > 0 and kernel_username not in self.authorized_users:\n            self._raise_authorization_error(kernel_username, \"not in the set of users authorized\")\n\n    def _raise_authorization_error(self, kernel_username: str, differentiator_clause: str) -> None:\n        \"\"\"\n        Raises a 403 status code after building the appropriate message.\n        \"\"\"\n        kernel_name = self.kernel_manager.kernel_spec.display_name\n        kernel_clause = f\" '{kernel_name}'.\" if kernel_name is not None else \"s.\"\n        error_message = (\n            f\"User '{kernel_username}' is {differentiator_clause} to start kernel{kernel_clause} \"\n            \"Ensure KERNEL_USERNAME is set to an appropriate value and retry the request.\"\n        )\n        self.log_and_raise(http_status_code=403, reason=error_message)\n\n    def get_process_info(self) -> dict[str, Any]:\n        \"\"\"\n        Captures the base information necessary for kernel persistence relative to process proxies.\n\n        The superclass method must always be called first to ensure proper ordering.  Since this is the\n        most base class, no call to `super()` is necessary.\n        \"\"\"\n        process_info = {\"pid\": self.pid, \"pgid\": self.pgid, \"ip\": self.ip}\n        return process_info\n\n    def load_process_info(self, process_info: dict[str, Any]) -> None:\n        \"\"\"\n        Loads the base information necessary for kernel persistence relative to process proxies.\n\n        The superclass method must always be called first to ensure proper ordering.  Since this is the\n        most base class, no call to `super()` is necessary.\n        \"\"\"\n        self.pid = process_info[\"pid\"]\n        self.pgid = process_info[\"pgid\"]\n        self.ip = process_info[\"ip\"]\n        self.kernel_manager.ip = process_info[\"ip\"]\n\n    def _validate_port_range(self) -> None:\n        \"\"\"\n        Validates the port range configuration option to ensure appropriate values.\n        \"\"\"\n        # Let port_range override global value - if set on kernelspec...\n        port_range = self.kernel_manager.port_range\n        if self.proxy_config.get(\"port_range\"):\n            port_range = self.proxy_config.get(\"port_range\")\n\n        try:\n            port_ranges = port_range.split(\"..\")\n\n            self.lower_port = int(port_ranges[0])\n            self.upper_port = int(port_ranges[1])\n\n            port_range_size = self.upper_port - self.lower_port\n            if port_range_size != 0:\n                if port_range_size < min_port_range_size:\n                    self.log_and_raise(\n                        http_status_code=500,\n                        reason=\"Port range validation failed for range: '{}'.  \"\n                        \"Range size must be at least {} as specified by env EG_MIN_PORT_RANGE_SIZE\".format(\n                            port_range, min_port_range_size\n                        ),\n                    )\n\n                # According to RFC 793, port is a 16-bit unsigned int. Which means the port\n                # numbers must be in the range (0, 65535). However, within that range,\n                # ports 0 - 1023 are called \"well-known ports\" and are typically reserved for\n                # specific purposes. For example, 0 is reserved for random port assignment,\n                # 80 is used for HTTP, 443 for TLS/SSL, 25 for SMTP, etc. But, there is\n                # flexibility as one can choose any port with the aforementioned protocols.\n                # Ports 1024 - 49151 are called \"user or registered ports\" that are bound to\n                # services running on the server listening to client connections. And, ports\n                # 49152 - 65535 are called \"dynamic or ephemeral ports\". A TCP connection\n                # has two endpoints. Each endpoint consists of an IP address and a port number.\n                # And, each connection is made up of a 4-tuple consisting of -- client-IP,\n                # client-port, server-IP, and server-port. A service runs on a server with a\n                # specific IP and is bound to a specific \"user or registered port\" that is\n                # advertised for clients to connect. So, when a client connects to a service\n                # running on a server, three out of 4-tuple - client-IP, client-port, server-IP -\n                # are already known. To be able to serve multiple clients concurrently, the\n                # server's IP stack assigns an ephemeral port for the connection to complete\n                # the 4-tuple.\n                #\n                # In case of JEG, we will accept ports in the range 1024 - 65535 as these days\n                # admins use dedicated hosts for individual services.\n                if self.lower_port < 1024 or self.lower_port > 65535:\n                    self.log_and_raise(\n                        http_status_code=500,\n                        reason=f\"Invalid port range '{port_range}' specified. \"\n                        \"Range for valid port numbers is (1024, 65535).\",\n                    )\n                if self.upper_port < 1024 or self.upper_port > 65535:\n                    self.log_and_raise(\n                        http_status_code=500,\n                        reason=f\"Invalid port range '{port_range}' specified. \"\n                        \"Range for valid port numbers is (1024, 65535).\",\n                    )\n        except ValueError as ve:\n            self.log_and_raise(\n                http_status_code=500,\n                reason=f\"Port range validation failed for range: '{port_range}'.  \"\n                f\"Error was: {ve}\",\n            )\n        except IndexError as ie:\n            self.log_and_raise(\n                http_status_code=500,\n                reason=f\"Port range validation failed for range: '{port_range}'.  \"\n                f\"Error was: {ie}\",\n            )\n\n        self.kernel_manager.port_range = port_range\n\n    def select_ports(self, count: int) -> list[int]:\n        \"\"\"\n        Selects and returns n random ports that adhere to the configured port range, if applicable.\n\n        Parameters\n        ----------\n        count : int\n            The number of ports to return\n\n        Returns\n        -------\n        List - ports available and adhering to the configured port range\n        \"\"\"\n        ports = []\n        sockets = []\n        for _ in range(count):\n            sock = self.select_socket()\n            ports.append(sock.getsockname()[1])\n            sockets.append(sock)\n        for sock in sockets:\n            sock.close()\n        return ports\n\n    def select_socket(self, ip: str | None = \"\") -> socket:\n        \"\"\"\n        Creates and returns a socket whose port adheres to the configured port range, if applicable.\n\n        Parameters\n        ----------\n        ip : str\n            Optional ip address to which the port is bound\n\n        Returns\n        -------\n        socket - Bound socket that is available and adheres to configured port range\n        \"\"\"\n        sock = socket(AF_INET, SOCK_STREAM)\n        found_port = False\n        retries = 0\n        while not found_port:\n            try:\n                sock.bind((ip, self._get_candidate_port()))\n                found_port = True\n            except Exception:\n                retries = retries + 1\n                if retries > max_port_range_retries:\n                    self.log_and_raise(\n                        http_status_code=500,\n                        reason=\"Failed to locate port within range {} after {} \"\n                        \"retries!\".format(self.kernel_manager.port_range, max_port_range_retries),\n                    )\n        return sock\n\n    def _get_candidate_port(self) -> int:\n        \"\"\"Randomly selects a port number within the configured range.\n\n        If no range is configured, the 0 port is used - allowing the server to choose from the full range.\n        \"\"\"\n        range_size = self.upper_port - self.lower_port\n        if range_size == 0:\n            return 0\n        return random.randint(self.lower_port, self.upper_port)\n\n    def log_and_raise(self, http_status_code: int | None = None, reason: str | None = None) -> None:\n        \"\"\"\n        Helper method that combines the logging and raising of exceptions.\n\n        If http_status_code is provided an HTTPError is created using the status code and\n        reason.  If http_status_code is not provided, a RuntimeError is raised with reason\n        as the message.  In either case, an error is logged using the reason.  If reason is\n        not provided a generic message will be used.\n        Parameters\n        ----------\n        http_status_code : int\n            The status code to raise\n        reason : str\n            The message to log and associate with the exception\n        \"\"\"\n        if reason is None:\n            reason = \"Internal server issue!\"\n\n        self.log.error(reason)\n        if http_status_code:\n            raise web.HTTPError(status_code=http_status_code, reason=reason)\n        else:\n            raise RuntimeError(reason)\n\n\nclass LocalProcessProxy(BaseProcessProxyABC):\n    \"\"\"\n    Manages the lifecycle of a locally launched kernel process.\n\n    This process proxy is used when no other process proxy is configured.\n    \"\"\"\n\n    def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict):  # noqa: F821\n        \"\"\"Initialize the proxy.\"\"\"\n        super().__init__(kernel_manager, proxy_config)\n        kernel_manager.ip = localinterfaces.LOCALHOST\n\n    async def launch_process(\n        self, kernel_cmd: str, **kwargs: dict[str, Any] | None\n    ) -> type[LocalProcessProxy]:\n        \"\"\"Launch a process for a kernel.\"\"\"\n        await super().launch_process(kernel_cmd, **kwargs)\n\n        # launch the local run.sh\n        self.local_proc = self.launch_kernel(kernel_cmd, **kwargs)\n        self.pid = self.local_proc.pid\n        if hasattr(os, \"getpgid\"):\n            try:\n                self.pgid = os.getpgid(self.pid)\n            except OSError:\n                pass\n        self.ip = local_ip\n        self.log.info(\n            \"Local kernel launched on '{}', pid: {}, pgid: {}, KernelID: {}, cmd: '{}'\".format(\n                self.ip, self.pid, self.pgid, self.kernel_id, kernel_cmd\n            )\n        )\n        return self\n\n\nclass RemoteProcessProxy(BaseProcessProxyABC, metaclass=abc.ABCMeta):\n    \"\"\"\n    Abstract Base Class implementation associated with remote process proxies.\n    \"\"\"\n\n    def __init__(self, kernel_manager, proxy_config):\n        \"\"\"Initialize the proxy.\"\"\"\n        super().__init__(kernel_manager, proxy_config)\n        self.response_socket = None\n        self.start_time = None\n        self.assigned_ip = None\n        self.assigned_host = \"\"\n        self.comm_ip = None\n        self.comm_port = 0\n        self.tunneled_connect_info = (\n            None  # Contains the destination connection info when tunneling in use\n        )\n        self.tunnel_processes = {}\n        self.response_manager = (\n            ResponseManager.instance()\n        )  # This will create the key pair and socket on first use\n        self.response_manager.register_event(self.kernel_id)\n        self.kernel_manager.response_address = self.response_manager.response_address\n        self.kernel_manager.public_key = self.response_manager.public_key\n\n    async def launch_process(self, kernel_cmd, **kwargs):\n        \"\"\"Launch a process for a kernel.\"\"\"\n        # Pass along port-range info to kernels...\n        kwargs[\"env\"][\"EG_MIN_PORT_RANGE_SIZE\"] = str(min_port_range_size)\n        kwargs[\"env\"][\"EG_MAX_PORT_RANGE_RETRIES\"] = str(max_port_range_retries)\n\n        await super().launch_process(kernel_cmd, **kwargs)\n        # remove connection file because a) its not necessary any longer since launchers will return\n        # the connection information which will (sufficiently) remain in memory and b) launchers\n        # landing on this node may want to write to this file and be denied access.\n        self.kernel_manager.cleanup_connection_file()\n\n    @abc.abstractmethod\n    def confirm_remote_startup(self):\n        \"\"\"Confirms the remote process has started and returned necessary connection information.\"\"\"\n        pass\n\n    def detect_launch_failure(self) -> None:\n        \"\"\"\n        Helper method called from implementations of `confirm_remote_startup()` that checks if\n        self.local_proc (a popen instance) has terminated prior to the confirmation of startup.\n        This prevents users from having to wait for the kernel timeout duration to know if the\n        launch fails.  It also helps distinguish local invocation issues from remote post-launch\n        issues since the failure will be relatively immediate.\n\n        Note that this method only applies to those process proxy implementations that launch\n        from the local node.  Proxies like DistributedProcessProxy use rsh against a remote\n        node, so there's not `local_proc` in play to interrogate.\n        \"\"\"\n\n        # Check if the local proc has faulted (poll() will return non-None with a non-zero return\n        # code in such cases).  If a fault was encountered, raise server error (500) with a message\n        # indicating to check the EG log for more information.\n        if self.local_proc:\n            poll_result = self.local_proc.poll()\n            if poll_result and poll_result > 0:\n                self.local_proc.wait()\n                error_message = (\n                    f\"Error occurred during launch of KernelID: {self.kernel_id}.  \"\n                    \"Check Enterprise Gateway log for more information.\"\n                )\n                self.local_proc = None\n                self.log_and_raise(http_status_code=500, reason=error_message)\n\n    def _tunnel_to_kernel(\n        self, connection_info: dict, server: str, port: int = ssh_port, key: str | None = None\n    ) -> tuple:\n        \"\"\"\n        Tunnel connections to a kernel over SSH\n\n        This will open five SSH tunnels from localhost on this machine to the\n        ports associated with the kernel.\n        See jupyter_client/connect.py for original implementation.\n        \"\"\"\n        cf = connection_info\n\n        lports = self.select_ports(5)\n\n        rports = (\n            cf[\"shell_port\"],\n            cf[\"iopub_port\"],\n            cf[\"stdin_port\"],\n            cf[\"hb_port\"],\n            cf[\"control_port\"],\n        )\n\n        channels = (\n            KernelChannel.SHELL,\n            KernelChannel.IOPUB,\n            KernelChannel.STDIN,\n            KernelChannel.HEARTBEAT,\n            KernelChannel.CONTROL,\n        )\n\n        remote_ip = cf[\"ip\"]\n\n        if not tunnel.try_passwordless_ssh(server + \":\" + str(port), key):\n            self.log_and_raise(\n                http_status_code=403,\n                reason=\"Must use password-less scheme by setting up the \"\n                \"SSH public key on the cluster nodes\",\n            )\n\n        for lp, rp, kc in zip(lports, rports, channels):\n            self._create_ssh_tunnel(kc, lp, rp, remote_ip, server, port, key)\n\n        return tuple(lports)\n\n    def _tunnel_to_port(\n        self,\n        kernel_channel: KernelChannel,\n        remote_ip: str,\n        remote_port: int,\n        server: str,\n        port: int = ssh_port,\n        key: str | None = None,\n    ) -> int:\n        \"\"\"\n        Analogous to _tunnel_to_kernel, but deals with a single port.  This will typically be called for\n        any one-off ports that require tunnelling. Note - this method assumes that passwordless ssh is\n        in use and has been previously validated.\n        \"\"\"\n        local_port = self.select_ports(1)[0]\n        self._create_ssh_tunnel(\n            kernel_channel, local_port, remote_port, remote_ip, server, port, key\n        )\n        return local_port\n\n    def _create_ssh_tunnel(\n        self,\n        kernel_channel: KernelChannel,\n        local_port: int,\n        remote_port: int,\n        remote_ip: str,\n        server: str,\n        port: int,\n        key: str,\n    ) -> None:\n        \"\"\"\n        Creates an SSH tunnel between the local and remote port/server for the given kernel channel.\n        \"\"\"\n        channel_name = kernel_channel.value\n        self.log.debug(\n            \"Creating SSH tunnel for '{}': 127.0.0.1:'{}' to '{}':'{}'\".format(\n                channel_name, local_port, remote_ip, remote_port\n            )\n        )\n        try:\n            process = self._spawn_ssh_tunnel(\n                kernel_channel, local_port, remote_port, remote_ip, server, port, key\n            )\n            self.tunnel_processes[channel_name] = process\n        except Exception as e:\n            self.log_and_raise(\n                http_status_code=500,\n                reason=f\"Could not open SSH tunnel for port {channel_name}. Exception: '{e}'\",\n            )\n\n    def _spawn_ssh_tunnel(\n        self,\n        kernel_channel: KernelChannel,\n        local_port: int,\n        remote_port: int,\n        remote_ip: str,\n        server: str,\n        port: int = ssh_port,\n        key: str | None = None,\n    ):\n        \"\"\"\n        This method spawns a child process to create an SSH tunnel and returns the spawned process.\n        ZMQ's implementation returns a pid on UNIX based platforms and a process handle/reference on\n        Win32. By consistently returning a process handle/reference on both UNIX and Win32 platforms,\n        this method enables the caller to deal with the same currency regardless of the platform. For\n        example, on both UNIX and Win32 platforms, the developer will have the option to stash the\n        child process reference and manage it's lifecycle consistently.\n\n        On UNIX based platforms, ZMQ's implementation is more generic to be able to handle various\n        use-cases. ZMQ's implementation also requests the spawned process to go to background using\n        '-f' command-line option. As a result, the spawned process becomes an orphan and any references\n        to the process obtained using it's pid become stale. On the other hand, this implementation is\n        specifically for password-less SSH login WITHOUT the '-f' command-line option thereby allowing\n        the spawned process to be owned by the parent process. This allows the parent process to control\n        the lifecycle of it's child processes and do appropriate cleanup during termination.\n        \"\"\"\n        if sys.platform == \"win32\":\n            ssh_server = server + \":\" + str(port)\n            return tunnel.paramiko_tunnel(local_port, remote_port, ssh_server, remote_ip, key)\n        else:\n            ssh = \"ssh -p %s -o ServerAliveInterval=%i\" % (\n                port,\n                self._get_keep_alive_interval(kernel_channel),\n            )\n            cmd = \"%s -S none -L 127.0.0.1:%i:%s:%i %s\" % (\n                ssh,\n                local_port,\n                remote_ip,\n                remote_port,\n                server,\n            )\n            return pexpect.spawn(cmd, env=os.environ.copy().pop(\"SSH_ASKPASS\", None))\n\n    def _get_keep_alive_interval(self, kernel_channel: KernelChannel) -> int:\n        cull_idle_timeout = self.kernel_manager.cull_idle_timeout\n\n        if (\n            kernel_channel == KernelChannel.COMMUNICATION\n            or kernel_channel == KernelChannel.CONTROL\n            or cull_idle_timeout <= 0\n            or cull_idle_timeout > max_keep_alive_interval\n        ):\n            # For COMMUNICATION and CONTROL channels, keep-alive interval will be set to\n            # max_keep_alive_interval to make sure that the SSH session does not timeout\n            # or expire for a very long time. Also, if cull_idle_timeout is unspecified,\n            # negative, or a very large value, then max_keep_alive_interval will be\n            # used as keep-alive value.\n            return max_keep_alive_interval\n\n        # Ideally, keep-alive interval should be greater than cull_idle_timeout. So, we\n        # will add 60 seconds to cull_idle_timeout to come up with the value for keep-alive\n        # interval for the rest of the kernel channels.\n        return cull_idle_timeout + 60\n\n    async def receive_connection_info(self) -> bool:\n        \"\"\"\n        Monitors the response address for connection info sent by the remote kernel launcher.\n        \"\"\"\n        # Polls the socket using accept.  When data is found, returns ready indicator and encrypted data.\n        ready_to_connect = False\n\n        try:\n            connect_info = await self.response_manager.get_connection_info(self.kernel_id)\n            self._setup_connection_info(connect_info)\n            ready_to_connect = True\n        except Exception as e:\n            if type(e) is timeout or type(e) is asyncio.TimeoutError:\n                self.log.debug(\n                    \"Waiting for KernelID '{}' to send connection info from host '{}' - retrying...\".format(\n                        self.kernel_id, self.assigned_host\n                    )\n                )\n            else:\n                error_message = (\n                    \"Exception occurred waiting for connection file response for KernelId '{}' \"\n                    \"on host '{}': {}\".format(self.kernel_id, self.assigned_host, e)\n                )\n                self.kill()\n                self.log_and_raise(http_status_code=500, reason=error_message)\n\n        return ready_to_connect\n\n    def _setup_connection_info(self, connect_info: dict) -> None:\n        \"\"\"\n        Take connection info (returned from launcher or loaded from session persistence) and properly\n        configure port variables for the 5 kernel and (possibly) the launcher communication port.  If\n        tunneling is enabled, these ports will be tunneled with the original port information recorded.\n        \"\"\"\n\n        self.log.debug(\n            f\"Host assigned to the kernel is: '{self.assigned_host}' '{self.assigned_ip}'\"\n        )\n\n        connect_info[\"ip\"] = (\n            self.assigned_ip\n        )  # Set connection to IP address of system where the kernel was launched\n\n        if tunneling_enabled is True:\n            # Capture the current(tunneled) connect_info relative to the IP and ports (including the\n            # communication port - if present).\n            self.tunneled_connect_info = dict(connect_info)\n\n            # Open tunnels to the 5 ZMQ kernel ports\n            tunnel_ports = self._tunnel_to_kernel(connect_info, self.assigned_ip)\n            self.log.debug(f\"Local ports used to create SSH tunnels: '{tunnel_ports}'\")\n\n            # Replace the remote connection ports with the local ports used to create SSH tunnels.\n            connect_info[\"ip\"] = \"127.0.0.1\"\n            connect_info[\"shell_port\"] = tunnel_ports[0]\n            connect_info[\"iopub_port\"] = tunnel_ports[1]\n            connect_info[\"stdin_port\"] = tunnel_ports[2]\n            connect_info[\"hb_port\"] = tunnel_ports[3]\n            connect_info[\"control_port\"] = tunnel_ports[4]\n\n            # If a communication port was provided, tunnel it\n            if \"comm_port\" in connect_info:\n                self.comm_ip = connect_info[\"ip\"]\n                tunneled_comm_port = int(connect_info[\"comm_port\"])\n                self.comm_port = self._tunnel_to_port(\n                    KernelChannel.COMMUNICATION,\n                    self.assigned_ip,\n                    tunneled_comm_port,\n                    self.assigned_ip,\n                )\n                connect_info[\"comm_port\"] = self.comm_port\n                self.log.debug(\n                    \"Established gateway communication to: {}:{} for KernelID '{}' via tunneled port \"\n                    \"127.0.0.1:{}\".format(\n                        self.assigned_ip, tunneled_comm_port, self.kernel_id, self.comm_port\n                    )\n                )\n\n        else:  # tunneling not enabled, still check for and record communication port\n            if \"comm_port\" in connect_info:\n                self.comm_ip = connect_info[\"ip\"]\n                self.comm_port = int(connect_info[\"comm_port\"])\n                self.log.debug(\n                    \"Established gateway communication to: {}:{} for KernelID '{}'\".format(\n                        self.assigned_ip, self.comm_port, self.kernel_id\n                    )\n                )\n\n        # If no communication port was provided, record that fact as well since this is useful to know\n        if \"comm_port\" not in connect_info:\n            self.log.debug(\n                \"Gateway communication port has NOT been established for KernelID '{}' (optional).\".format(\n                    self.kernel_id\n                )\n            )\n\n        self._update_connection(connect_info)\n\n    def _update_connection(self, connect_info: dict) -> None:\n        \"\"\"\n        Updates the connection info member variables of the kernel manager.  Also pulls the PID and PGID\n        info, if present, in case we need to use it for lifecycle management.\n        Note: Do NOT update connect_info with IP and other such artifacts in this method/function.\n        \"\"\"\n        # Reset the ports to 0 so load can take place (which resets the members to value from file or json)...\n        self.kernel_manager.stdin_port = self.kernel_manager.iopub_port = (\n            self.kernel_manager.shell_port\n        ) = self.kernel_manager.hb_port = self.kernel_manager.control_port = 0\n\n        if connect_info:\n            # Load new connection information into memory. No need to write back out to a file or track loopback, etc.\n            # The launcher may also be sending back process info, so check and extract\n            self._extract_pid_info(connect_info)\n            self.kernel_manager.load_connection_info(info=connect_info)\n            self.log.debug(\n                \"Received connection info for KernelID '{}' from host '{}': {}...\".format(\n                    self.kernel_id, self.assigned_host, connect_info\n                )\n            )\n        else:\n            error_message = (\n                f\"Unexpected runtime encountered for Kernel ID '{self.kernel_id}' - \"\n                \"connection information is null!\"\n            )\n            self.log_and_raise(http_status_code=500, reason=error_message)\n\n        self._close_response_socket()\n        self.kernel_manager._connection_file_written = (\n            True  # allows for cleanup of local files (as necessary)\n        )\n\n    def _close_response_socket(self) -> None:\n        # If there's a response-socket, close it since its no longer needed.\n        if self.response_socket:\n            try:\n                self.log.debug(\"response socket still open, close it\")\n                self.response_socket.shutdown(SHUT_RDWR)\n                self.response_socket.close()\n            except OSError:\n                pass  # tolerate exceptions here since we don't need this socket and would like ot continue\n            self.response_socket = None\n\n    def _extract_pid_info(self, connect_info: dict) -> None:\n        \"\"\"\n        Extracts any PID, PGID info from the payload received on the response socket.\n        \"\"\"\n        pid = connect_info.pop(\"pid\", None)\n        if pid:\n            try:\n                self.pid = int(pid)\n            except ValueError:\n                self.log.warning(\n                    f\"pid returned from kernel launcher is not an integer: {pid} - ignoring.\"\n                )\n                pid = None\n        pgid = connect_info.pop(\"pgid\", None)\n        if pgid:\n            try:\n                self.pgid = int(pgid)\n            except ValueError:\n                self.log.warning(\n                    f\"pgid returned from kernel launcher is not an integer: {pgid} - ignoring.\"\n                )\n                pgid = None\n        if (\n            pid or pgid\n        ):  # if either process ids were updated, update the ip as well and don't use local_proc\n            self.ip = self.assigned_ip\n            if not BaseProcessProxyABC.ip_is_local(\n                self.ip\n            ):  # only unset local_proc if we're remote\n                self.local_proc = None\n\n    async def handle_timeout(self):\n        \"\"\"\n        Checks to see if the kernel launch timeout has been exceeded while awaiting connection info.\n        \"\"\"\n        await asyncio.sleep(poll_interval)\n        time_interval = RemoteProcessProxy.get_time_diff(\n            self.start_time, RemoteProcessProxy.get_current_time()\n        )\n\n        if time_interval > self.kernel_launch_timeout:\n            error_http_code = 500\n            reason = f\"Waited too long ({self.kernel_launch_timeout}s) to get connection file\"\n            timeout_message = f\"KernelID: '{self.kernel_id}' launch timeout due to: {reason}\"\n            await asyncio.get_event_loop().run_in_executor(None, self.kill)\n            self.log_and_raise(http_status_code=error_http_code, reason=timeout_message)\n\n    def cleanup(self):\n        \"\"\"\n        Terminates tunnel processes, if applicable.\n        \"\"\"\n        self.assigned_ip = None\n\n        for kernel_channel, process in self.tunnel_processes.items():\n            self.log.debug(f\"cleanup: terminating {kernel_channel} tunnel process.\")\n            process.terminate()\n\n        self.tunnel_processes.clear()\n        super().cleanup()\n\n    def _send_listener_request(self, request: dict, shutdown_socket: bool = False) -> None:\n        \"\"\"\n        Sends the request dictionary to the kernel listener via the comm port.  Caller is responsible for\n        handling any exceptions.\n        \"\"\"\n\n        if self.comm_port > 0:\n            sock = socket(AF_INET, SOCK_STREAM)\n            try:\n                sock.settimeout(socket_timeout)\n                sock.connect((self.comm_ip, self.comm_port))\n                sock.send(json.dumps(request).encode(encoding=\"utf-8\"))\n            finally:\n                if shutdown_socket:\n                    try:\n                        sock.shutdown(SHUT_WR)\n                    except Exception as e2:\n                        if isinstance(e2, OSError) and e2.errno == errno.ENOTCONN:\n                            # Listener is not connected.  This is probably a follow-on to ECONNREFUSED on connect\n                            self.log.debug(\n                                f\"OSError(ENOTCONN) raised on socket shutdown, listener \"\n                                f\"has likely already exited. Cannot send '{request}'\"\n                            )\n                        else:\n                            self.log.warning(\n                                f\"Exception occurred attempting to shutdown communication \"\n                                f\"socket to {self.comm_ip}:{self.comm_port} \"\n                                f\"for KernelID '{self.kernel_id}' (ignored): {e2!s}\"\n                            )\n                sock.close()\n        else:\n            self.log.debug(\n                f\"Invalid comm port, not sending request '{request}' to comm_port '{self.comm_port}'.\"\n            )\n\n    def send_signal(self, signum):\n        \"\"\"\n        Sends `signum` via the communication port.\n        The kernel launcher listening on its communication port will receive the signum and perform\n        the necessary signal operation local to the process.\n        \"\"\"\n        # If the launcher returned a comm_port value, then use that to send the signal,\n        # else, defer to the superclass - which will use a remote shell to issue kill.\n        # Note that if the target process is running as a different user than the REMOTE_USER,\n        # using anything other than the socket-based signal (via signal_addr) will not work.\n\n        if self.comm_port > 0:\n            try:\n                self._send_listener_request({\"signum\": signum})\n\n                if signum > 0:  # Polling (signum == 0) is too frequent\n                    self.log.debug(f\"Signal ({signum}) sent via gateway communication port.\")\n                return None\n            except Exception as e:\n                if (\n                    isinstance(e, OSError) and e.errno == errno.ECONNREFUSED\n                ):  # Return False since there's no process.\n                    self.log.debug(\"ERROR: ECONNREFUSED, no process listening, cannot send signal.\")\n                    return False\n\n                self.log.warning(\n                    \"An unexpected exception occurred sending signal ({}) for KernelID '{}': {}\".format(\n                        signum, self.kernel_id, str(e)\n                    )\n                )\n\n        return super().send_signal(signum)\n\n    def shutdown_listener(self):\n        \"\"\"\n        Sends a shutdown request to the kernel launcher listener.\n        \"\"\"\n        # If a comm port has been established, instruct the listener to shutdown so that proper\n        # kernel termination can occur.  If not done, the listener keeps the launcher process\n        # active, even after the kernel has terminated, leading to less than graceful terminations.\n\n        if self.comm_port > 0:\n            shutdown_request = {}\n            shutdown_request[\"shutdown\"] = 1\n\n            try:\n                self._send_listener_request(shutdown_request, shutdown_socket=True)\n                self.log.debug(\"Shutdown request sent to listener via gateway communication port.\")\n            except Exception as e:\n                if not isinstance(e, OSError) or e.errno != errno.ECONNREFUSED:\n                    self.log.warning(\n                        \"An unexpected exception occurred sending listener shutdown to {}:{} for \"\n                        \"KernelID '{}': {}\".format(\n                            self.comm_ip, self.comm_port, self.kernel_id, str(e)\n                        )\n                    )\n\n            # Also terminate the tunnel process for the communication port - if in play.  Failure to terminate\n            # this process results in the kernel (launcher) appearing to remain alive following the shutdown\n            # request, which triggers the \"forced kill\" termination logic.\n\n            comm_port_name = KernelChannel.COMMUNICATION.value\n            comm_port_tunnel = self.tunnel_processes.get(comm_port_name, None)\n            if comm_port_tunnel:\n                self.log.debug(f\"shutdown_listener: terminating {comm_port_name} tunnel process.\")\n                comm_port_tunnel.terminate()\n                del self.tunnel_processes[comm_port_name]\n\n    def get_process_info(self):\n        \"\"\"\n        Captures the base information necessary for kernel persistence relative to remote processes.\n        \"\"\"\n        process_info = super().get_process_info()\n        process_info.update(\n            {\n                \"assigned_ip\": self.assigned_ip,\n                \"assigned_host\": self.assigned_host,\n                \"comm_ip\": self.comm_ip,\n                \"comm_port\": self.comm_port,\n                \"tunneled_connect_info\": self.tunneled_connect_info,\n            }\n        )\n        return process_info\n\n    def load_process_info(self, process_info):\n        \"\"\"\n        Captures the base information necessary for kernel persistence relative to remote processes.\n        \"\"\"\n        super().load_process_info(process_info)\n        self.assigned_ip = process_info[\"assigned_ip\"]\n        self.assigned_host = process_info[\"assigned_host\"]\n        self.comm_ip = process_info[\"comm_ip\"]\n        self.comm_port = process_info[\"comm_port\"]\n        if (\n            \"tunneled_connect_info\" in process_info\n            and process_info[\"tunneled_connect_info\"] is not None\n        ):\n            # If this was a tunneled connection, re-establish tunnels.  Note, this will reset the\n            # communication socket (comm_ip, comm_port) members as well.\n            self._setup_connection_info(process_info[\"tunneled_connect_info\"])\n\n    def log_and_raise(self, http_status_code: int | None = None, reason: str | None = None):\n        \"\"\"\n        Override log_and_raise method in order to verify that the response socket is properly closed\n        before raise exception\n        \"\"\"\n        self._close_response_socket()\n        super().log_and_raise(http_status_code, reason)\n\n    @staticmethod\n    def get_current_time():\n        \"\"\"Return the current time stamp in UTC time epoch format in milliseconds.\"\"\"\n        return timegm(_tz.utcnow().utctimetuple()) * 1000\n\n    @staticmethod\n    def get_time_diff(time1, time2):\n        \"\"\"Return the difference between two timestamps in seconds, assuming the timestamp is in milliseconds.\"\"\"\n        # e.g. the difference between 1504028203000 and 1504028208300 is 5300 milliseconds or 5.3 seconds\n        diff = abs(time2 - time1)\n        return float(\"%d.%d\" % (diff / 1000, diff % 1000))\n"
  },
  {
    "path": "enterprise_gateway/services/processproxies/spark_operator.py",
    "content": "\"\"\"A spark operator process proxy.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom __future__ import annotations\n\nfrom ..kernels.remotemanager import RemoteKernelManager\nfrom .crd import CustomResourceProcessProxy\n\n\nclass SparkOperatorProcessProxy(CustomResourceProcessProxy):\n    \"\"\"Spark operator process proxy.\"\"\"\n\n    # Identifies the kind of object being managed by this process proxy.\n    # For these values we will prefer the values found in the 'kind' field\n    # of the object's metadata.  This attribute is strictly used to provide\n    # context to log messages.\n    object_kind = \"SparkApplication\"\n\n    def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict):\n        \"\"\"Initialize the proxy.\"\"\"\n        super().__init__(kernel_manager, proxy_config)\n        self.group = \"sparkoperator.k8s.io\"\n        self.version = \"v1beta2\"\n        self.plural = \"sparkapplications\"\n"
  },
  {
    "path": "enterprise_gateway/services/processproxies/yarn.py",
    "content": "\"\"\"Code related to managing kernels running in YARN clusters.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import annotations\n\nimport asyncio\nimport errno\nimport logging\nimport os\nimport signal\nimport socket\nimport time\nfrom typing import Any, ClassVar\n\nfrom jupyter_client import localinterfaces\nfrom yarn_api_client.base import Response\nfrom yarn_api_client.resource_manager import ResourceManager\n\nfrom ..kernels.remotemanager import RemoteKernelManager\nfrom ..sessions.kernelsessionmanager import KernelSessionManager\nfrom .processproxy import RemoteProcessProxy\n\n# Default logging level of yarn-api and underlying connectionpool produce too much noise - raise to warning only.\nlogging.getLogger(\"yarn_api_client\").setLevel(os.getenv(\"EG_YARN_LOG_LEVEL\", logging.WARNING))\nlogging.getLogger(\"urllib3.connectionpool\").setLevel(\n    os.environ.get(\"EG_YARN_LOG_LEVEL\", logging.WARNING)\n)\n\nlocal_ip = localinterfaces.public_ips()[0]\npoll_interval = float(os.getenv(\"EG_POLL_INTERVAL\", \"0.5\"))\nmax_poll_attempts = int(os.getenv(\"EG_MAX_POLL_ATTEMPTS\", \"10\"))\nyarn_shutdown_wait_time = float(os.getenv(\"EG_YARN_SHUTDOWN_WAIT_TIME\", \"15.0\"))\n# cert_path: Boolean, defaults to `True`, that controls\n#            whether we verify the server's TLS certificate in yarn-api-client.\n#            Or a string, in which case it must be a path to a CA bundle(.pem file) to use.\ncert_path = os.getenv(\"EG_YARN_CERT_BUNDLE\", True)\nmutual_authentication = os.getenv(\"EG_YARN_MUTUAL_AUTHENTICATION\", \"REQUIRED\")\n\n\nclass YarnClusterProcessProxy(RemoteProcessProxy):\n    \"\"\"\n    Kernel lifecycle management for YARN clusters.\n    \"\"\"\n\n    initial_states: ClassVar = {\"NEW\", \"SUBMITTED\", \"ACCEPTED\", \"RUNNING\"}\n    final_states: ClassVar = {\"FINISHED\", \"KILLED\", \"FAILED\"}\n\n    def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict):\n        \"\"\"Initialize the proxy.\"\"\"\n        super().__init__(kernel_manager, proxy_config)\n        self.application_id = None\n        self.last_known_state = None\n        self.candidate_queue = None\n        self.candidate_partition = None\n\n        self.yarn_endpoint = proxy_config.get(\"yarn_endpoint\", kernel_manager.yarn_endpoint)\n        self.alt_yarn_endpoint = proxy_config.get(\n            \"alt_yarn_endpoint\", kernel_manager.alt_yarn_endpoint\n        )\n\n        self.yarn_endpoint_security_enabled = proxy_config.get(\n            \"yarn_endpoint_security_enabled\", kernel_manager.yarn_endpoint_security_enabled\n        )\n\n        # YARN applications tend to take longer than the default 5 second wait time.  Rather than\n        # require a command-line option for those using YARN, we'll adjust based on a local env that\n        # defaults to 15 seconds.  Note: we'll only adjust if the current wait time is shorter than\n        # the desired value.\n        if kernel_manager.shutdown_wait_time < yarn_shutdown_wait_time:\n            kernel_manager.shutdown_wait_time = yarn_shutdown_wait_time\n            self.log.debug(\n                \"{class_name} shutdown wait time adjusted to {wait_time} seconds.\".format(\n                    class_name=type(self).__name__, wait_time=kernel_manager.shutdown_wait_time\n                )\n            )\n\n        # If yarn resource check is enabled and it isn't available immediately,\n        # 20% of kernel_launch_timeout is used to wait\n        # and retry at fixed interval before pronouncing as not feasible to launch.\n        self.yarn_resource_check_wait_time = 0.20 * self.kernel_launch_timeout\n\n    def _initialize_resource_manager(self, **kwargs: dict[str, Any] | None) -> None:\n        \"\"\"Initialize the Hadoop YARN Resource Manager instance used for this kernel's lifecycle.\"\"\"\n\n        endpoints = None\n        if self.yarn_endpoint:\n            endpoints = [self.yarn_endpoint]\n\n            # Only check alternate if \"primary\" is set.\n            if self.alt_yarn_endpoint:\n                endpoints.append(self.alt_yarn_endpoint)\n\n        if self.yarn_endpoint_security_enabled:\n            from requests_kerberos import DISABLED, OPTIONAL, REQUIRED, HTTPKerberosAuth\n\n            auth = HTTPKerberosAuth(\n                mutual_authentication={\n                    \"REQUIRED\": REQUIRED,\n                    \"OPTIONAL\": OPTIONAL,\n                    \"DISABLED\": DISABLED,\n                }.get(mutual_authentication.upper())\n            )\n        else:\n            # If we have the appropriate version of yarn-api-client, use its SimpleAuth class.\n            # This allows EG to continue to issue requests against the YARN api when anonymous\n            # access is not allowed. (Default is to allow anonymous access.)\n            try:\n                from yarn_api_client.auth import SimpleAuth\n\n                kernel_username = KernelSessionManager.get_kernel_username(**kwargs)\n                auth = SimpleAuth(kernel_username)\n                self.log.debug(\n                    f\"Using SimpleAuth with '{kernel_username}' against endpoints: {endpoints}\"\n                )\n            except ImportError:\n                auth = None\n\n        self.resource_mgr = ResourceManager(\n            service_endpoints=endpoints, auth=auth, verify=cert_path\n        )\n\n        self.rm_addr = self.resource_mgr.get_active_endpoint()\n\n    async def launch_process(\n        self, kernel_cmd: str, **kwargs: dict[str, Any] | None\n    ) -> YarnClusterProcessProxy:\n        \"\"\"\n        Launches the specified process within a YARN cluster environment.\n        \"\"\"\n\n        self._initialize_resource_manager(**kwargs)\n\n        # checks to see if the queue resource is available\n        # if not available, kernel startup is not attempted\n        self.confirm_yarn_queue_availability(**kwargs)\n\n        await super().launch_process(kernel_cmd, **kwargs)\n\n        # launch the local run.sh - which is configured for yarn-cluster...\n        self.local_proc = self.launch_kernel(kernel_cmd, **kwargs)\n        self.pid = self.local_proc.pid\n        self.ip = local_ip\n\n        self.log.debug(\n            \"Yarn cluster kernel launched using YARN RM address: {}, pid: {}, Kernel ID: {}, cmd: '{}'\".format(\n                self.rm_addr, self.local_proc.pid, self.kernel_id, kernel_cmd\n            )\n        )\n        await self.confirm_remote_startup()\n        return self\n\n    def confirm_yarn_queue_availability(self, **kwargs: dict[str, Any] | None) -> None:\n        \"\"\"\n        Submitting jobs to yarn queue and then checking till the jobs are in running state\n        will lead to orphan jobs being created in some scenarios.\n\n        We take kernel_launch_timeout time and divide this into two parts.\n        If the queue is unavailable we take max 20% of the time to poll the queue periodically\n        and if the queue becomes available the rest of timeout is met in 80% of the remaining\n        time.\n\n        This algorithm is subject to change. Please read the below cases to understand\n        when and how checks are applied.\n\n        Confirms if the yarn queue has capacity to handle the resource requests that\n        will be sent to it.\n\n        First check ensures the driver and executor memory request falls within\n        the container size of yarn configuration. This check requires executor and\n        driver memory to be available in the env.\n\n        Second,Current version of check, takes into consideration node label partitioning\n        on given queues. Provided the queue name and node label this checks if\n        the given partition has capacity available for kernel startup.\n\n        All Checks are optional. If we have KERNEL_EXECUTOR_MEMORY and KERNEL_DRIVER_MEMORY\n        specified, first check is performed.\n\n        If we have KERNEL_QUEUE and KERNEL_NODE_LABEL specified, second check is performed.\n\n        Proper error messages are sent back for user experience\n        :param kwargs:\n        :return:\n        \"\"\"\n        env_dict = kwargs.get(\"env\", {})\n\n        executor_memory = int(env_dict.get(\"KERNEL_EXECUTOR_MEMORY\", 0))\n        driver_memory = int(env_dict.get(\"KERNEL_DRIVER_MEMORY\", 0))\n\n        if executor_memory * driver_memory > 0:\n            container_memory = self.resource_mgr.cluster_node_container_memory()\n            if max(executor_memory, driver_memory) > container_memory:\n                self.log_and_raise(\n                    http_status_code=500,\n                    reason=\"Container Memory not sufficient for a executor/driver allocation\",\n                )\n\n        candidate_queue_name = env_dict.get(\"KERNEL_QUEUE\", None)\n        node_label = env_dict.get(\"KERNEL_NODE_LABEL\", None)\n        partition_availability_threshold = float(env_dict.get(\"YARN_PARTITION_THRESHOLD\", 95.0))\n\n        if candidate_queue_name is None or node_label is None:\n            return\n\n        # else the resources may or may not be available now. it may be possible that if we wait then the resources\n        # become available. start  a timeout process\n\n        self.start_time = RemoteProcessProxy.get_current_time()\n        self.candidate_queue = self.resource_mgr.cluster_scheduler_queue(candidate_queue_name)\n\n        if self.candidate_queue is None:\n            self.log.warning(\n                f\"Queue: {candidate_queue_name} not found in cluster.\"\n                \"Availability check will not be performed\"\n            )\n            return\n\n        self.candidate_partition = self.resource_mgr.cluster_queue_partition(\n            self.candidate_queue, node_label\n        )\n\n        if self.candidate_partition is None:\n            self.log.debug(\n                f\"Partition: {node_label} not found in {candidate_queue_name} queue.\"\n                \"Availability check will not be performed\"\n            )\n            return\n\n        self.log.debug(\n            f\"Checking endpoint: {self.yarn_endpoint} if partition: {self.candidate_partition} \"\n            f\"has used capacity <= {partition_availability_threshold}%\"\n        )\n\n        yarn_available = self.resource_mgr.cluster_scheduler_queue_availability(\n            self.candidate_partition, partition_availability_threshold\n        )\n        if not yarn_available:\n            self.log.debug(\n                \"Retrying for {} ms since resources are not available\".format(\n                    self.yarn_resource_check_wait_time\n                )\n            )\n            while not yarn_available:\n                self.handle_yarn_queue_timeout()\n                yarn_available = self.resource_mgr.cluster_scheduler_queue_availability(\n                    self.candidate_partition, partition_availability_threshold\n                )\n\n        # subtracting the total amount of time spent for polling for queue availability\n        self.kernel_launch_timeout -= RemoteProcessProxy.get_time_diff(\n            self.start_time, RemoteProcessProxy.get_current_time()\n        )\n\n    def handle_yarn_queue_timeout(self) -> None:\n        \"\"\"Handle a yarn queue timeout.\"\"\"\n        time.sleep(poll_interval)\n        time_interval = RemoteProcessProxy.get_time_diff(\n            self.start_time, RemoteProcessProxy.get_current_time()\n        )\n\n        if time_interval > self.yarn_resource_check_wait_time:\n            error_http_code = 500\n            reason = \"Yarn Compute Resource is unavailable after {} seconds\".format(\n                self.yarn_resource_check_wait_time\n            )\n            self.log_and_raise(http_status_code=error_http_code, reason=reason)\n\n    def poll(self) -> bool | None:\n        \"\"\"Submitting a new kernel/app to YARN will take a while to be ACCEPTED.\n        Thus application ID will probably not be available immediately for poll.\n        So will regard the application as RUNNING when application ID still in ACCEPTED or SUBMITTED state.\n\n        :return: None if the application's ID is available and state is ACCEPTED/SUBMITTED/RUNNING. Otherwise False.\n        \"\"\"\n        result = False\n\n        if self._get_application_id():\n            state = self._query_app_state_by_id(self.application_id)\n            if state in YarnClusterProcessProxy.initial_states:\n                result = None\n\n        # The following produces too much output (every 3 seconds by default), so commented-out at this time.\n        # self.log.debug(\"YarnProcessProxy.poll, application ID: {}, kernel ID: {}, state: {}\".\n        #               format(self.application_id, self.kernel_id, state))\n        return result\n\n    def send_signal(self, signum: int) -> bool | None:\n        \"\"\"Currently only support 0 as poll and other as kill.\n\n        :param signum\n        :return:\n        \"\"\"\n        if signum == 0:\n            return self.poll()\n        elif signum == signal.SIGKILL:\n            return self.kill()\n        else:\n            # Yarn api doesn't support the equivalent to interrupts, so take our chances\n            # via a remote signal.  Note that this condition cannot check against the\n            # signum value because altternate interrupt signals might be in play.\n            return super().send_signal(signum)\n\n    def kill(self) -> bool | None:\n        \"\"\"Kill a kernel.\n        :return: None if the application existed and is not in RUNNING state, False otherwise.\n        \"\"\"\n        state = None\n        result = False\n        if self._get_application_id():\n            self._kill_app_by_id(self.application_id)\n            # Check that state has moved to a final state (most likely KILLED)\n            i = 1\n            state = self._query_app_state_by_id(self.application_id)\n            while state not in YarnClusterProcessProxy.final_states and i <= max_poll_attempts:\n                time.sleep(poll_interval)\n                state = self._query_app_state_by_id(self.application_id)\n                i = i + 1\n\n            if state in YarnClusterProcessProxy.final_states:\n                result = None\n\n        if result is False:  # We couldn't terminate via Yarn, try remote signal\n            result = super().kill()\n\n        self.log.debug(\n            \"YarnClusterProcessProxy.kill, application ID: {}, kernel ID: {}, state: {}, result: {}\".format(\n                self.application_id, self.kernel_id, state, result\n            )\n        )\n        return result\n\n    def cleanup(self) -> None:\n        \"\"\"Clean up the proxy\"\"\"\n        # we might have a defunct process (if using waitAppCompletion = false) - so poll, kill, wait when we have\n        # a local_proc.\n        if self.local_proc:\n            self.log.debug(\n                \"YarnClusterProcessProxy.cleanup: Clearing possible defunct process, pid={}...\".format(\n                    self.local_proc.pid\n                )\n            )\n            if super().poll():\n                super().kill()\n            super().wait()\n            self.local_proc = None\n\n        # reset application id to force new query - handles kernel restarts/interrupts\n        self.application_id = None\n\n        # for cleanup, we should call the superclass last\n        super().cleanup()\n\n    async def confirm_remote_startup(self) -> None:\n        \"\"\"Confirms the yarn application is in a started state before returning.  Should post-RUNNING states be\n        unexpectedly encountered (FINISHED, KILLED, FAILED) then we must throw,\n        otherwise the rest of the gateway will believe its talking to a valid kernel.\n        \"\"\"\n        self.start_time = RemoteProcessProxy.get_current_time()\n        i = 0\n        ready_to_connect = False  # we're ready to connect when we have a connection file to use\n        while not ready_to_connect:\n            i += 1\n            await self.handle_timeout()\n\n            if self._get_application_id(True):\n                # Once we have an application ID, start monitoring state, obtain assigned host and get connection info\n                app_state = self._get_application_state()\n\n                if app_state in YarnClusterProcessProxy.final_states:\n                    error_message = (\n                        \"KernelID: '{}', ApplicationID: '{}' unexpectedly found in state '{}'\"\n                        \" during kernel startup!\".format(\n                            self.kernel_id, self.application_id, app_state\n                        )\n                    )\n                    self.log_and_raise(http_status_code=500, reason=error_message)\n\n                self.log.debug(\n                    \"{}: State: '{}', Host: '{}', KernelID: '{}', ApplicationID: '{}'\".format(\n                        i, app_state, self.assigned_host, self.kernel_id, self.application_id\n                    )\n                )\n\n                if self.assigned_host:\n                    ready_to_connect = await self.receive_connection_info()\n            else:\n                self.detect_launch_failure()\n\n    async def handle_timeout(self) -> None:\n        \"\"\"Checks to see if the kernel launch timeout has been exceeded while awaiting connection info.\"\"\"\n        await asyncio.sleep(poll_interval)\n        time_interval = RemoteProcessProxy.get_time_diff(\n            self.start_time, RemoteProcessProxy.get_current_time()\n        )\n\n        if time_interval > self.kernel_launch_timeout:\n            reason = (\n                \"Application ID is None. Failed to submit a new application to YARN within {} seconds.  \"\n                \"Check Enterprise Gateway log for more information.\".format(\n                    self.kernel_launch_timeout\n                )\n            )\n            error_http_code = 500\n            if self._get_application_id(True):\n                if self._query_app_state_by_id(self.application_id) != \"RUNNING\":\n                    reason = (\n                        \"YARN resources unavailable after {} seconds for app {}, launch timeout: {}!  \"\n                        \"Check YARN configuration.\".format(\n                            time_interval, self.application_id, self.kernel_launch_timeout\n                        )\n                    )\n                    error_http_code = 503\n                else:\n                    reason = (\n                        \"App {} is RUNNING, but waited too long ({} secs) to get connection file.  \"\n                        \"Check YARN logs for more information.\".format(\n                            self.application_id, self.kernel_launch_timeout\n                        )\n                    )\n            await asyncio.get_event_loop().run_in_executor(None, self.kill)\n            timeout_message = f\"KernelID: '{self.kernel_id}' launch timeout due to: {reason}\"\n            self.log_and_raise(http_status_code=error_http_code, reason=timeout_message)\n\n    def get_process_info(self) -> dict[str, Any]:\n        \"\"\"Captures the base information necessary for kernel persistence relative to YARN clusters.\"\"\"\n        process_info = super().get_process_info()\n        process_info.update({\"application_id\": self.application_id})\n        return process_info\n\n    def load_process_info(self, process_info: dict[str, Any]) -> None:\n        \"\"\"Loads the base information necessary for kernel persistence relative to YARN clusters.\"\"\"\n        super().load_process_info(process_info)\n        self.application_id = process_info[\"application_id\"]\n\n    def _get_application_state(self) -> str:\n        # Gets the current application state using the application_id already obtained.  Once the assigned host\n        # has been identified, 'amHostHttpAddress' is nolonger accessed.\n        app_state = self.last_known_state\n        app = self._query_app_by_id(self.application_id)\n        if app:\n            if app.get(\"state\"):\n                app_state = app.get(\"state\")\n                self.last_known_state = app_state\n\n            if not self.assigned_host and app.get(\"amHostHttpAddress\"):\n                self.assigned_host = app.get(\"amHostHttpAddress\").split(\":\")[0]\n                # Set the kernel manager ip to the actual host where the application landed.\n                self.assigned_ip = socket.gethostbyname(self.assigned_host)\n\n        return app_state\n\n    def _get_application_id(self, ignore_final_states: bool = False) -> str:\n        # Return the kernel's YARN application ID if available, otherwise None.  If we're obtaining application_id\n        # from scratch, do not consider kernels in final states.\n        if not self.application_id:\n            app = self._query_app_by_name(self.kernel_id)\n            state_condition = True\n            if isinstance(app, dict):\n                state = app.get(\"state\")\n                self.last_known_state = state\n\n                if ignore_final_states:\n                    state_condition = state not in YarnClusterProcessProxy.final_states\n\n                if len(app.get(\"id\", \"\")) > 0 and state_condition:\n                    self.application_id = app[\"id\"]\n                    time_interval = RemoteProcessProxy.get_time_diff(\n                        self.start_time, RemoteProcessProxy.get_current_time()\n                    )\n                    self.log.info(\n                        \"ApplicationID: '{}' assigned for KernelID: '{}', \"\n                        \"state: {}, {} seconds after starting.\".format(\n                            app[\"id\"], self.kernel_id, state, time_interval\n                        )\n                    )\n            if not self.application_id:\n                self.log.debug(\n                    f\"ApplicationID not yet assigned for KernelID: '{self.kernel_id}' - retrying...\"\n                )\n        return self.application_id\n\n    def _query_app_by_name(self, kernel_id: str) -> dict:\n        \"\"\"Retrieve application by using kernel_id as the unique app name.\n        With the started_time_begin as a parameter to filter applications started earlier than the target one from YARN.\n        When submit a new app, it may take a while for YARN to accept and run and generate the application ID.\n        Note: if a kernel restarts with the same kernel id as app name, multiple applications will be returned.\n        For now, the app/kernel with the top most application ID will be returned as the target app, assuming the app\n        ID will be incremented automatically on the YARN side.\n\n        :param kernel_id: as the unique app name for query\n        :return: The JSON object of an application.\n        \"\"\"\n        top_most_app_id = \"\"\n        target_app = None\n        try:\n            response = self.resource_mgr.cluster_applications(\n                started_time_begin=str(self.start_time)\n            )\n        except OSError as sock_err:\n            if sock_err.errno == errno.ECONNREFUSED:\n                self.log.warning(\n                    \"YARN RM address: '{}' refused the connection.  Is the resource manager running?\".format(\n                        self.rm_addr\n                    )\n                )\n            else:\n                self.log.warning(\n                    \"Query for kernel ID '{}' failed with exception: {} - '{}'.  Continuing...\".format(\n                        kernel_id, type(sock_err), sock_err\n                    )\n                )\n        except Exception as e:\n            self.log.warning(\n                \"Query for kernel ID '{}' failed with exception: {} - '{}'.  Continuing...\".format(\n                    kernel_id, type(e), e\n                )\n            )\n        else:\n            data = response.data\n            if (\n                isinstance(data, dict)\n                and isinstance(data.get(\"apps\"), dict)\n                and \"app\" in data.get(\"apps\")\n            ):\n                for app in data[\"apps\"][\"app\"]:\n                    if app.get(\"name\", \"\").find(kernel_id) >= 0 and app.get(\"id\") > top_most_app_id:\n                        target_app = app\n                        top_most_app_id = app.get(\"id\")\n        return target_app\n\n    def _query_app_by_id(self, app_id: str) -> dict:\n        \"\"\"Retrieve an application by application ID.\n\n        :param app_id\n        :return: The JSON object of an application.\n        \"\"\"\n        app = None\n        try:\n            response = self.resource_mgr.cluster_application(application_id=app_id)\n        except Exception as e:\n            self.log.warning(\n                f\"Query for application ID '{app_id}' failed with exception: '{e}'.  Continuing...\"\n            )\n        else:\n            data = response.data\n            if isinstance(data, dict) and \"app\" in data:\n                app = data[\"app\"]\n\n        return app\n\n    def _query_app_state_by_id(self, app_id: str) -> str:\n        \"\"\"Return the state of an application. If a failure occurs, the last known state is returned.\n\n        :param app_id:\n        :return: application state (str)\n        \"\"\"\n        state = self.last_known_state\n        try:\n            response = self.resource_mgr.cluster_application_state(application_id=app_id)\n        except Exception as e:\n            self.log.warning(\n                f\"Query for application '{app_id}' state failed with exception: '{e}'.  \"\n                f\"Continuing with last known state = '{state}'...\"\n            )\n        else:\n            state = response.data[\"state\"]\n            self.last_known_state = state\n\n        return state\n\n    def _kill_app_by_id(self, app_id: str) -> Response:\n        \"\"\"Kill an application. If the app's state is FINISHED or FAILED, it won't be changed to KILLED.\n\n        :param app_id\n        :return: The JSON response of killing the application.\n        \"\"\"\n\n        response = None\n        try:\n            response = self.resource_mgr.cluster_application_kill(application_id=app_id)\n        except Exception as e:\n            self.log.warning(\n                f\"Termination of application '{app_id}' failed with exception: '{e}'.  Continuing...\"\n            )\n\n        return response\n"
  },
  {
    "path": "enterprise_gateway/services/sessions/__init__.py",
    "content": ""
  },
  {
    "path": "enterprise_gateway/services/sessions/handlers.py",
    "content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Tornado handlers for session CRUD.\"\"\"\nfrom typing import List\n\nimport jupyter_server.services.sessions.handlers as jupyter_server_handlers\nimport tornado\nfrom jupyter_server.utils import ensure_async\n\nfrom ...mixins import CORSMixin, JSONErrorsMixin, TokenAuthorizationMixin\n\n\nclass SessionRootHandler(\n    TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, jupyter_server_handlers.SessionRootHandler\n):\n    \"\"\"Extends the jupyter_server root session handler with token auth, CORS, and\n    JSON errors.\n    \"\"\"\n\n    async def get(self) -> None:\n        \"\"\"Overrides the super class method to honor the kernel listing\n        configuration setting.\n\n        Raises\n        ------\n        tornado.web.HTTPError\n            If eg_list_kernels is False, respond with 403 Forbidden\n        \"\"\"\n        if \"eg_list_kernels\" not in self.settings or not self.settings[\"eg_list_kernels\"]:\n            raise tornado.web.HTTPError(403, \"Forbidden\")\n        else:\n            await ensure_async(super().get())\n\n\ndefault_handlers: List[tuple] = []\nfor path, cls in jupyter_server_handlers.default_handlers:\n    if cls.__name__ in globals():\n        # Use the same named class from here if it exists\n        default_handlers.append((path, globals()[cls.__name__]))\n    else:\n        # Everything should have CORS and token auth\n        bases = (TokenAuthorizationMixin, CORSMixin, cls)\n        default_handlers.append((path, type(cls.__name__, bases, {})))\n"
  },
  {
    "path": "enterprise_gateway/services/sessions/kernelsessionmanager.py",
    "content": "\"\"\"Session manager that keeps all its metadata in memory.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import annotations\n\nimport getpass\nimport json\nimport os\nimport threading\n\nimport requests\nfrom jupyter_core.paths import jupyter_data_dir\nfrom requests.auth import HTTPBasicAuth, HTTPDigestAuth\nfrom traitlets import Bool, CaselessStrEnum, Unicode, default\nfrom traitlets.config.configurable import LoggingConfigurable\n\nkernels_lock = threading.Lock()\n\n# These will be located under the `persistence_root` and exist\n# to make integration with ContentsManager implementations easier.\nKERNEL_SESSIONS_DIR_NAME = \"kernel_sessions\"\n\n\nclass KernelSessionManager(LoggingConfigurable):\n    \"\"\"\n    KernelSessionManager is used to save and load kernel sessions from persistent storage.\n\n    KernelSessionManager provides the basis for an HA solution.  It loads the complete set of persisted kernel\n    sessions during construction.  Following construction the parent object calls start_sessions to allow\n    Enterprise Gateway to validate that all loaded sessions are still valid.  Those that it cannot 'revive'\n    are marked for deletion and the in-memory dictionary is updated - and the entire collection is written\n    to store (file or database).\n\n    As kernels are created and destroyed, the KernelSessionManager is called upon to keep kernel session\n    state consistent.\n\n    NOTE: This class is essentially an abstract base class that requires its `load_sessions` and `save_sessions`\n    have implementations in subclasses.  abc.MetaABC is not used due to conflicts with derivation of\n    LoggingConfigurable - which seemed more important.\n    \"\"\"\n\n    # Session Persistence\n    session_persistence_env = \"EG_KERNEL_SESSION_PERSISTENCE\"\n    session_persistence_default_value = False\n    enable_persistence = Bool(\n        session_persistence_default_value,\n        config=True,\n        help=\"\"\"Enable kernel session persistence (True or False). Default = False\n(EG_KERNEL_SESSION_PERSISTENCE env var)\"\"\",\n    )\n\n    @default(\"enable_persistence\")\n    def _session_persistence_default(self) -> bool:\n        return bool(\n            os.getenv(\n                self.session_persistence_env, str(self.session_persistence_default_value)\n            ).lower()\n            == \"true\"\n        )\n\n    # Persistence root\n    persistence_root_env = \"EG_PERSISTENCE_ROOT\"\n    persistence_root = Unicode(\n        config=True,\n        help=\"\"\"Identifies the root 'directory' under which the 'kernel_sessions' node will\nreside.  This directory should exist.  (EG_PERSISTENCE_ROOT env var)\"\"\",\n    )\n\n    @default(\"persistence_root\")\n    def _persistence_root_default(self) -> str:\n        return os.getenv(self.persistence_root_env, \"/\")\n\n    def __init__(self, kernel_manager: RemoteMappingKernelManager, **kwargs):  # noqa: F821\n        \"\"\"Initialize the manager.\"\"\"\n        super().__init__(**kwargs)\n        self.kernel_manager = kernel_manager\n        self._sessions = {}\n        self._sessionsByUser = {}\n\n    def create_session(self, kernel_id: str, **kwargs) -> None:\n        \"\"\"\n        Creates a session associated with this kernel.\n\n        All items associated with the active kernel's state are saved.\n\n        Parameters\n        ----------\n        kernel_id : str\n            The uuid string associated with the active kernel\n\n        **kwargs : optional\n            Information used for the launch of the kernel\n\n        \"\"\"\n        km = self.kernel_manager.get_kernel(kernel_id)\n\n        # Compose the kernel_session entry\n        kernel_session = {}\n        kernel_session[\"kernel_id\"] = kernel_id\n        kernel_session[\"username\"] = KernelSessionManager.get_kernel_username(**kwargs)\n        kernel_session[\"kernel_name\"] = km.kernel_name\n\n        # Build the inner dictionaries: connection_info, process_proxy and add to kernel_session\n        kernel_session[\"connection_info\"] = km.get_connection_info()\n        kernel_session[\"launch_args\"] = kwargs.copy()\n        kernel_session[\"process_info\"] = (\n            km.process_proxy.get_process_info() if km.process_proxy else {}\n        )\n        self._save_session(kernel_id, kernel_session)\n\n    def refresh_session(self, kernel_id: str) -> None:\n        \"\"\"\n        Refreshes the session from its persisted state. Called on kernel restarts.\n        \"\"\"\n        self.log.debug(f\"Refreshing kernel session for id: {kernel_id}\")\n        km = self.kernel_manager.get_kernel(kernel_id)\n\n        # Compose the kernel_session entry\n        kernel_session = self._sessions[kernel_id]\n\n        # Build the inner dictionaries: connection_info, process_proxy and add to kernel_session\n        kernel_session[\"connection_info\"] = km.get_connection_info()\n        kernel_session[\"process_info\"] = (\n            km.process_proxy.get_process_info() if km.process_proxy else {}\n        )\n        self._save_session(kernel_id, kernel_session)\n\n    def _save_session(self, kernel_id: str, kernel_session: dict) -> None:\n        # Write/commit the addition, update dictionary\n        kernels_lock.acquire()\n        try:\n            self._sessions[kernel_id] = kernel_session\n            username = kernel_session[\"username\"]\n            if username not in self._sessionsByUser:\n                self._sessionsByUser[username] = []\n                self._sessionsByUser[username].append(kernel_id)\n            else:\n                # Only append if not there yet (e.g. restarts will be there already)\n                if kernel_id not in self._sessionsByUser[username]:\n                    self._sessionsByUser[username].append(kernel_id)\n            self.save_session(kernel_id)  # persist changes in file/DB etc.\n        finally:\n            kernels_lock.release()\n\n    def start_session(self, kernel_id: str) -> bool | None:\n        \"\"\"Start a session for a given kernel.\"\"\"\n        kernel_session = self._sessions.get(kernel_id, None)\n        if kernel_session is not None:\n            return self._start_session(kernel_session)\n        return None\n\n    def start_sessions(self) -> None:\n        \"\"\"\n        Attempt to start persisted sessions.\n\n        Determines if session startup was successful.  If unsuccessful, the session is removed\n        from persistent storage.\n        \"\"\"\n        if self.enable_persistence:\n            self.load_sessions()\n            sessions_to_remove = []\n            for kernel_id, kernel_session in self._sessions.items():\n                self.log.info(\n                    \"Attempting startup of persisted kernel session for id: %s...\" % kernel_id\n                )\n                if self._start_session(kernel_session):\n                    self.log.info(\n                        \"Startup of persisted kernel session for id '{}' was successful.  Client should \"\n                        \"reconnect kernel.\".format(kernel_id)\n                    )\n                else:\n                    sessions_to_remove.append(kernel_id)\n                    self.log.warning(\n                        \"Startup of persisted kernel session for id '{}' was not successful.  Check if \"\n                        \"client is still active and restart kernel.\".format(kernel_id)\n                    )\n\n            self._delete_sessions(sessions_to_remove)\n\n    def _start_session(self, kernel_session: dict) -> bool:\n        # Attempt to start kernel from persisted state.  if started, record kernel_session in dictionary\n        # else delete session\n        kernel_id = kernel_session[\"kernel_id\"]\n        kernel_started = self.kernel_manager.start_kernel_from_session(\n            kernel_id=kernel_id,\n            kernel_name=kernel_session[\"kernel_name\"],\n            connection_info=kernel_session[\"connection_info\"],\n            process_info=kernel_session[\"process_info\"],\n            launch_args=kernel_session[\"launch_args\"],\n        )\n        if not kernel_started:\n            return False\n\n        return True\n\n    def delete_session(self, kernel_id: str) -> None:\n        \"\"\"\n        Removes saved session associated with kernel_id from dictionary and persisted storage.\n        \"\"\"\n        self._delete_sessions([kernel_id])\n\n        if self.enable_persistence:\n            self.log.info(\"Deleted persisted kernel session for id: %s\" % kernel_id)\n\n    def _delete_sessions(self, kernel_ids: list[str]) -> None:\n        # Remove unstarted sessions and rewrite\n        kernels_lock.acquire()\n        try:\n            for kernel_id in kernel_ids:\n                # Prior to removing session, update the per User list\n                kernel_session = self._sessions.get(kernel_id, None)\n                if kernel_session is not None:\n                    username = kernel_session[\"username\"]\n                    if (\n                        username in self._sessionsByUser\n                        and kernel_id in self._sessionsByUser[username]\n                    ):\n                        self._sessionsByUser[username].remove(kernel_id)\n                    self._sessions.pop(kernel_id, None)\n\n            self.delete_sessions(kernel_ids)\n        finally:\n            kernels_lock.release()\n\n    @staticmethod\n    def pre_save_transformation(session: dict) -> dict:\n        \"\"\"Handle a pre_save for a session.\"\"\"\n        kernel_id = next(iter(session.keys()))\n        session_info = session[kernel_id]\n        if session_info.get(\"connection_info\"):\n            info = session_info[\"connection_info\"]\n            key = info.get(\"key\")\n            if key:\n                info[\"key\"] = key.decode(\"utf8\")\n\n        return session\n\n    @staticmethod\n    def post_load_transformation(session: dict) -> dict:\n        \"\"\"Handle a post_load for a session.\"\"\"\n        kernel_id = next(iter(session.keys()))\n        session_info = session[kernel_id]\n        if session_info.get(\"connection_info\"):\n            info = session_info[\"connection_info\"]\n            key = info.get(\"key\")\n            if key:\n                info[\"key\"] = key.encode(\"utf8\")\n\n        return session\n\n    # abstractmethod\n    def load_sessions(self) -> None:\n        \"\"\"\n        Load and initialize _sessions member from persistent storage.  This method is called from start_sessions().\n        \"\"\"\n        msg = \"KernelSessionManager.load_sessions() requires an implementation!\"\n        raise NotImplementedError(msg)\n\n    # abstractmethod\n    def load_session(self, kernel_id: str) -> None:\n        \"\"\"\n        Load and initialize _sessions member from persistent storage for a single kernel.  This method is called from\n        refresh_sessions().\n        \"\"\"\n        msg = \"KernelSessionManager.load_session() requires an implementation!\"\n        raise NotImplementedError(msg)\n\n    # abstractmethod\n    def delete_sessions(self, kernel_ids: list[str]) -> None:\n        \"\"\"\n        Delete the sessions in persistent storage.  Caller is responsible for synchronizing call.\n        \"\"\"\n        msg = \"KernelSessionManager.delete_sessions(kernel_ids) requires an implementation!\"\n        raise NotImplementedError(msg)\n\n    def save_session(self, kernel_id: str) -> None:\n        \"\"\"\n        Saves the sessions dictionary to persistent store.  Caller is responsible for synchronizing call.\n        \"\"\"\n        msg = \"KernelSessionManager.save_session(kernel_id) requires an implementation!\"\n        raise NotImplementedError(msg)\n\n    def active_sessions(self, username: str) -> int:\n        \"\"\"\n        Returns the number of active sessions for the given username.\n\n        Parameters\n        ----------\n        username : str\n            The username associated with the active session\n\n        Returns\n        -------\n        int corresponding to the number of active sessions associated with given user\n        \"\"\"\n        if username in self._sessionsByUser:\n            return len(self._sessionsByUser[username])\n        return 0\n\n    @staticmethod\n    def get_kernel_username(**kwargs) -> str:\n        \"\"\"\n        Returns the kernel's logical username from env dict.\n\n        Checks the process env for KERNEL_USERNAME.  If set, that value is returned, else KERNEL_USERNAME is\n        initialized to the current user and that value is returned.\n\n        Parameters\n        ----------\n        kwargs : dict from which request env is accessed.\n\n        Returns\n        -------\n        str indicating kernel username\n        \"\"\"\n        # Get the env\n        env_dict = kwargs.get(\"env\", {})\n\n        # Ensure KERNEL_USERNAME is set\n        kernel_username = env_dict.get(\"KERNEL_USERNAME\")\n        if kernel_username is None:\n            kernel_username = getpass.getuser()\n            env_dict[\"KERNEL_USERNAME\"] = kernel_username\n\n        return kernel_username\n\n\nclass FileKernelSessionManager(KernelSessionManager):\n    \"\"\"\n    Performs kernel session persistence operations against the file `sessions.json` located in the kernel_sessions\n    directory in the directory pointed to by the persistence_root parameter (default JUPYTER_DATA_DIR).\n    \"\"\"\n\n    # Change the default to Jupyter Data Dir.\n    @default(\"persistence_root\")\n    def _persistence_root_default(self) -> str:\n        return os.getenv(self.persistence_root_env, jupyter_data_dir())\n\n    def __init__(self, kernel_manager: RemoteMappingKernelManager, **kwargs):  # noqa: F821\n        \"\"\"Initialize the manager.\"\"\"\n        super().__init__(kernel_manager, **kwargs)\n        if self.enable_persistence:\n            self.log.info(f\"Kernel session persistence location: {self._get_sessions_loc()}\")\n\n    def delete_sessions(self, kernel_ids: list[str]) -> None:\n        \"\"\"Delete the sessions for a list of kernels.\"\"\"\n        if self.enable_persistence:\n            for kernel_id in kernel_ids:\n                kernel_file_name = \"\".join([kernel_id, \".json\"])\n                kernel_session_file_path = os.path.join(self._get_sessions_loc(), kernel_file_name)\n                if os.path.exists(kernel_session_file_path):\n                    os.remove(kernel_session_file_path)\n\n    def save_session(self, kernel_id: str) -> None:\n        \"\"\"Save the session for a kernel.\"\"\"\n        if self.enable_persistence and kernel_id is not None:\n            kernel_file_name = \"\".join([kernel_id, \".json\"])\n            kernel_session_file_path = os.path.join(self._get_sessions_loc(), kernel_file_name)\n            temp_session = {}\n            temp_session[kernel_id] = self._sessions[kernel_id]\n            with open(kernel_session_file_path, \"w\") as fp:\n                json.dump(KernelSessionManager.pre_save_transformation(temp_session), fp)\n\n    def load_sessions(self) -> None:\n        \"\"\"Load the sessions.\"\"\"\n        if self.enable_persistence:\n            kernel_session_files = [\n                json_files\n                for json_files in os.listdir(self._get_sessions_loc())\n                if json_files.endswith(\".json\")\n            ]\n            for kernel_session_file in kernel_session_files:\n                self._load_session_from_file(kernel_session_file)\n\n    def load_session(self, kernel_id: str) -> None:\n        \"\"\"Load the session for a kernel.\"\"\"\n        if self.enable_persistence and kernel_id is not None:\n            kernel_session_file = \"\".join([kernel_id, \".json\"])\n            self._load_session_from_file(kernel_session_file)\n\n    def _load_session_from_file(self, file_name: str) -> None:\n        kernel_session_file_path = os.path.join(self._get_sessions_loc(), file_name)\n        if os.path.exists(kernel_session_file_path):\n            self.log.debug(f\"Loading saved session(s) from {kernel_session_file_path}\")\n            try:\n                with open(kernel_session_file_path) as fp:\n                    self._sessions.update(\n                        KernelSessionManager.post_load_transformation(json.load(fp))\n                    )\n            except json.JSONDecodeError as e:\n                self.log.error(\n                    f\"Failed to load session from {kernel_session_file_path}: Invalid JSON - {e}\"\n                )\n            except Exception as e:\n                self.log.error(\n                    f\"Failed to load session from {kernel_session_file_path}: {type(e).__name__} - {e}\"\n                )\n\n    def _get_sessions_loc(self) -> str:\n        path = os.path.join(self.persistence_root, KERNEL_SESSIONS_DIR_NAME)\n        if not os.path.exists(path):\n            os.makedirs(path, 0o755)\n        return path\n\n\nclass WebhookKernelSessionManager(KernelSessionManager):\n    \"\"\"\n    Performs kernel session persistence operations against URL provided (EG_WEBHOOK_URL). The URL must have 4 endpoints\n    associated with it. 1 delete endpoint that takes a list of kernel ids in\n    the body, 1 post endpoint that takes kernels id as a\n    url param and the kernel session as the body, 1 get endpoint that returns\n    all kernel sessions, and 1 get endpoint that returns\n    a specific kernel session based on kernel id as url param.\n    \"\"\"\n\n    # Webhook URL\n    webhook_url_env = \"EG_WEBHOOK_URL\"\n    webhook_url = Unicode(\n        config=True,\n        allow_none=True,\n        help=\"\"\"URL endpoint for webhook kernel session manager\"\"\",\n    )\n\n    @default(\"webhook_url\")\n    def _webhook_url_default(self) -> str | None:\n        return os.getenv(self.webhook_url_env, None)\n\n    # Webhook Username\n    webhook_username_env = \"EG_WEBHOOK_USERNAME\"\n    webhook_username = Unicode(\n        config=True,\n        allow_none=True,\n        help=\"\"\"Username for webhook kernel session manager API auth\"\"\",\n    )\n\n    @default(\"webhook_username\")\n    def _webhook_username_default(self) -> str | None:\n        return os.getenv(self.webhook_username_env, None)\n\n    # Webhook Password\n    webhook_password_env = \"EG_WEBHOOK_PASSWORD\"  # noqa\n    webhook_password = Unicode(\n        config=True,\n        allow_none=True,\n        help=\"\"\"Password for webhook kernel session manager API auth\"\"\",\n    )\n\n    @default(\"webhook_password\")\n    def _webhook_password_default(self) -> str | None:\n        return os.getenv(self.webhook_password_env, None)\n\n    # Auth Type\n    auth_type_env = \"EG_AUTH_TYPE\"\n    auth_type = CaselessStrEnum(\n        config=True,\n        allow_none=True,\n        values=[\"basic\", \"digest\"],\n        help=\"\"\"Authentication type for webhook kernel session manager API. Either basic, digest or None\"\"\",\n    )\n\n    @default(\"auth_type\")\n    def _auth_type_default(self) -> str | None:\n        return os.getenv(self.auth_type_env, None)\n\n    def __init__(self, kernel_manager: RemoteMappingKernelManager, **kwargs):  # noqa: F821\n        \"\"\"Initialize the manager.\"\"\"\n        super().__init__(kernel_manager, **kwargs)\n        if self.enable_persistence:\n            self.log.info(\"Webhook kernel session persistence activated\")\n            self.auth = \"\"\n            if self.auth_type:\n                if self.webhook_username and self.webhook_password:\n                    if self.auth_type == \"basic\":\n                        self.auth = HTTPBasicAuth(self.webhook_username, self.webhook_password)\n                    elif self.auth_type == \"digest\":\n                        self.auth = HTTPDigestAuth(self.webhook_username, self.webhook_password)\n                    elif self.auth_type is None:\n                        self.auth = \"\"\n                    else:\n                        self.log.error(\"No such option for auth_type/EG_AUTH_TYPE\")\n                else:\n                    self.log.error(\"Username and/or password aren't set\")\n\n    def delete_sessions(self, kernel_ids: list[str]) -> None:\n        \"\"\"\n        Deletes kernel sessions from database\n\n        :param list of strings kernel_ids: A list of kernel ids\n        \"\"\"\n        if self.enable_persistence:\n            response = requests.delete(\n                self.webhook_url, auth=self.auth, json=kernel_ids, timeout=60\n            )\n            self.log.debug(f\"Webhook kernel session deleting: {kernel_ids}\")\n            if response.status_code != 204:\n                self.log.error(response.raise_for_status())\n\n    def save_session(self, kernel_id: str) -> None:\n        \"\"\"\n        Saves kernel session to database\n\n        :param string kernel_id: A kernel id\n        \"\"\"\n        if self.enable_persistence and kernel_id is not None:\n            temp_session = {}\n            temp_session[kernel_id] = self._sessions[kernel_id]\n            body = KernelSessionManager.pre_save_transformation(temp_session)\n            response = requests.post(\n                f\"{self.webhook_url}/{kernel_id}\", auth=self.auth, json=body, timeout=60\n            )\n            self.log.debug(f\"Webhook kernel session saving: {kernel_id}\")\n            if response.status_code != 204:\n                self.log.error(response.raise_for_status())\n\n    def load_sessions(self) -> None:\n        \"\"\"\n        Loads kernel sessions from database\n        \"\"\"\n        if self.enable_persistence:\n            response = requests.get(self.webhook_url, auth=self.auth, timeout=60)\n            if response.status_code == 200:\n                kernel_sessions = response.json()\n                for kernel_session in kernel_sessions:\n                    self._load_session_from_response(kernel_session)\n            else:\n                self.log.error(response.raise_for_status())\n\n    def load_session(self, kernel_id: str) -> None:\n        \"\"\"\n        Loads a kernel session from database\n\n        :param string kernel_id: A kernel id\n        \"\"\"\n        if self.enable_persistence and kernel_id is not None:\n            response = requests.get(f\"{self.webhook_url}/{kernel_id}\", auth=self.auth, timeout=60)\n            if response.status_code == 200:\n                kernel_session = response.json()\n                self._load_session_from_response(kernel_session)\n            else:\n                self.log.error(response.raise_for_status())\n\n    def _load_session_from_response(self, kernel_session: dict) -> None:\n        \"\"\"\n        Loads kernel session to current session\n\n        :param dictionary kernel_session: Kernel session information\n        \"\"\"\n        self.log.debug(\"Loading saved session(s)\")\n        self._sessions.update(\n            KernelSessionManager.post_load_transformation(kernel_session[\"kernel_session\"])\n        )\n"
  },
  {
    "path": "enterprise_gateway/services/sessions/sessionmanager.py",
    "content": "\"\"\"Session manager that keeps all its metadata in memory.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport uuid\nfrom typing import Any, Hashable, List, Optional\n\nfrom tornado import web\nfrom traitlets.config.configurable import LoggingConfigurable\n\nfrom enterprise_gateway.services.kernels.remotemanager import RemoteMappingKernelManager\n\n\nclass SessionManager(LoggingConfigurable):\n    \"\"\"Simple implementation of the SessionManager interface that allows clients\n    to associate basic metadata with a kernel.\n\n    Parameters\n    ----------\n    kernel_manager : RemoteMappingKernelManager\n        Used to start a kernel when creating a session\n\n    Attributes\n    ----------\n    kernel_manager : RemoteMappingKernelManager\n        Used to start a kernel when creating a session\n    _sessions : list\n        Sessions\n    _columns : list\n        Session metadata key names\n    \"\"\"\n\n    def __init__(self, kernel_manager: RemoteMappingKernelManager, *args, **kwargs):\n        \"\"\"Initialize the session manager.\"\"\"\n        super().__init__(*args, **kwargs)\n        self.kernel_manager = kernel_manager\n        self._sessions = []\n        self._columns = [\"session_id\", \"path\", \"kernel_id\"]\n\n    def session_exists(self, path: str, *args, **kwargs) -> bool:\n        \"\"\"Checks to see if the session with the given path value exists.\n\n        Parameters\n        ----------\n        path : str\n            Session path value to search on\n\n        Returns\n        -------\n        bool\n        \"\"\"\n        return bool([item for item in self._sessions if item[\"path\"] == path])\n\n    def new_session_id(self) -> str:\n        \"\"\"Creates a uuid for a new session.\"\"\"\n        return str(uuid.uuid4())\n\n    async def create_session(\n        self,\n        path: Optional[str] = None,\n        kernel_name: Optional[str] = None,\n        kernel_id: Optional[str] = None,\n        *args,\n        **kwargs,\n    ) -> dict:\n        \"\"\"Creates a session and returns its model.\n\n        Launches a kernel and stores the session metadata for later lookup.\n\n        Parameters\n        ----------\n        path : str\n            Path value to store in the session metadata\n        kernel_name : str\n            Kernel spec name\n        kernel_id : str\n            Existing kernel ID to bind to the session (unsupported)\n\n        Returns\n        -------\n        dict\n            Session model\n        \"\"\"\n        session_id = self.new_session_id()\n        # allow nbm to specify kernels cwd\n        kernel_id = await self.kernel_manager.start_kernel(path=path, kernel_name=kernel_name)\n        return self.save_session(session_id, path=path, kernel_id=kernel_id)\n\n    def save_session(\n        self,\n        session_id: str,\n        path: Optional[str] = None,\n        kernel_id: Optional[str] = None,\n        *args,\n        **kwargs,\n    ) -> dict:\n        \"\"\"Saves the metadata for the session with the given `session_id`.\n\n        Given a `session_id` (and any other of the arguments), this method\n        appends a dictionary to the in-memory list of sessions.\n\n        Parameters\n        ----------\n        session_id : str\n            UUID for the session; this method must be given a session_id\n        path : str\n            Path for the given notebook\n        kernel_id : str\n            ID for the kernel associated with this session\n\n        Returns\n        -------\n        dict\n            Session model with `session_id`, `path`, and `kernel_id` keys\n        \"\"\"\n        self._sessions.append({\"session_id\": session_id, \"path\": path, \"kernel_id\": kernel_id})\n\n        return self.get_session(session_id=session_id)\n\n    def get_session_by_key(self, key: Hashable, val: Any, *args, **kwargs) -> Optional[dict]:\n        \"\"\"Gets the first session with the given key/value pair.\n\n        Parameters\n        ----------\n        key : hashable\n            Session metadata key to match\n        value : any\n            Session metadata value to match\n\n        Returns\n        -------\n        dict\n            Matching session model or None if not found\n        \"\"\"\n        s = [item for item in self._sessions if item[key] == val]\n        return None if not s else s[0]\n\n    def get_session(self, **kwargs) -> dict:\n        \"\"\"Returns the model for a particular session.\n\n        Takes a keyword argument and searches for the value in the in-memory\n        session store. Returns the entire session model.\n\n        Parameters\n        ----------\n        **kwargs : keyword argument\n            One of the key/value pairs from `_columns`\n\n        Raises\n        ------\n        TypeError\n            If there are no kwargs or none of them match a key/column used in\n            the metadata\n        tornado.web.HTTPError\n            404 Not Found if no session matches the provided metadata\n\n        Returns\n        -------\n        model : dict\n            All the information from the session described by the kwarg\n        \"\"\"\n        if not kwargs:\n            msg = \"Must specify a column to query\"\n            raise TypeError(msg)\n\n        for param in kwargs:\n            if param not in self._columns:\n                msg = f\"No such column: {param}\"\n                raise TypeError(msg)\n\n        # multiple columns are never passed into kwargs so just using the\n        # first and only one.\n        column = next(iter(kwargs.keys()))\n        row = self.get_session_by_key(column, kwargs[column])\n\n        if not row:\n            raise web.HTTPError(404, \"Session not found: %s\" % kwargs[column])\n\n        return self.row_to_model(row)\n\n    def update_session(self, session_id: str, *args, **kwargs) -> None:\n        \"\"\"Updates the values in the session store.\n\n        Update the values of the session model with the given `session_id`\n        with the values from the keyword arguments.\n\n        Parameters\n        ----------\n        session_id : str\n            UUID that identifies a session in the sqlite3 database\n        **kwargs : str\n            Key/value pairs to store\n\n        Raises\n        ------\n        KeyError\n            If no session matches the given `session_id`\n        \"\"\"\n        if not kwargs:\n            # no changes\n            return\n\n        row = self.get_session_by_key(\"session_id\", session_id)\n\n        if not row:\n            raise KeyError\n\n        self._sessions.remove(row)\n\n        if \"path\" in kwargs:\n            row[\"path\"] = kwargs[\"path\"]\n\n        if \"kernel_id\" in kwargs:\n            row[\"kernel_id\"] = kwargs[\"kernel_id\"]\n\n        self._sessions.append(row)\n\n    def row_to_model(self, row: dict, *args, **kwargs) -> dict:\n        \"\"\"Turns a \"row\" in the in-memory session store into a model dictionary.\n\n        Parameters\n        ----------\n        row : dict\n            Maps `id` to `session_id`, `notebook` to a dict containing the\n            `path`, and `kernel` to the kernel model looked up using the\n            `kernel_id`\n        \"\"\"\n        if row[\"kernel_id\"] not in self.kernel_manager:\n            # The kernel was killed or died without deleting the session.\n            # We can't use delete_session here because that tries to find\n            # and shut down the kernel.\n            self._sessions.remove(row)\n            raise KeyError\n\n        model = {\n            \"id\": row[\"session_id\"],\n            \"notebook\": {\"path\": row[\"path\"]},\n            \"kernel\": self.kernel_manager.kernel_model(row[\"kernel_id\"]),\n        }\n        return model\n\n    def list_sessions(self, *args, **kwargs) -> List[dict]:\n        \"\"\"Returns a list of dictionaries containing all the information from\n        the session store.\n\n        Returns\n        -------\n        list\n            Dictionaries from `row_to_model`\n        \"\"\"\n        return [self.row_to_model(r) for r in self._sessions]\n\n    async def delete_session(self, session_id: str, *args, **kwargs) -> None:\n        \"\"\"Deletes the session in the session store with given `session_id`.\n\n        Raises\n        ------\n        KeyError\n            If the `session_id` is not in the store\n        \"\"\"\n        # Check that session exists before deleting\n        s = self.get_session_by_key(\"session_id\", session_id)\n        if not s:\n            raise KeyError\n\n        await self.kernel_manager.shutdown_kernel(s[\"kernel_id\"])\n        self._sessions.remove(s)\n"
  },
  {
    "path": "enterprise_gateway/tests/__init__.py",
    "content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom tornado import ioloop\n\n\ndef teardown():\n    \"\"\"The test fixture appears to leak something on certain platforms that\n    endlessly tries an async socket connect and fails after the tests end.\n    As a stopgap, force a cleanup here.\n    \"\"\"\n    ioloop.IOLoop.current().stop()\n    # Close is not necessary since process termination closes the loop.  This was causing intermittent\n    # `Event loop is closed` exceptions.  These didn't affect the test resutls, but produced output that\n    # was otherwise misleading noise.\n    # ioloop.IOLoop.current().close(True)\n"
  },
  {
    "path": "enterprise_gateway/tests/resources/failing_code2.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import not-a-real-module\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 2\",\n   \"language\": \"python\",\n   \"name\": \"python2\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"2.7.10\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}\n"
  },
  {
    "path": "enterprise_gateway/tests/resources/failing_code3.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import not-a-real-module\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.4.3\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}\n"
  },
  {
    "path": "enterprise_gateway/tests/resources/kernel_api2.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# API Creation \\n\",\n    \"This notebook is a sample of how to author a REST API in the notebook environment.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import os, json\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"hello_message = 'hello {}'\\n\",\n    \"people = ['Corey', 'Nitin', 'Pete']\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Hello\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /hello\\n\",\n    \"print(hello_message.format('world'))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'args': { \\n\",\n    \"        'person' : people\\n\",\n    \"    }\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /hello/person\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"hello_person = req['args']['person'][0]\\n\",\n    \"print(hello_message.format(hello_person))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'args': { \\n\",\n    \"        'person' : people\\n\",\n    \"    }\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /hello/persons\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"hello_persons = req['args']['person']\\n\",\n    \"print(hello_message.format(', '.join(hello_persons)))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /hello/people\\n\",\n    \"hello_people = people\\n\",\n    \"print(hello_message.format(', '.join(hello_people)))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'path' : {\\n\",\n    \"        'person' : 'test_person'\\n\",\n    \"    }\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /hello/:person\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"print(hello_message.format(req['path']['person']))\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Messages\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /message\\n\",\n    \"print(hello_message)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'body' : 'test value'\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# PUT /message\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"hello_message = req['body']\\n\",\n    \"print(hello_message)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## People\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /people\\n\",\n    \"print(json.dumps(people))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'body' : ['Rick', 'Maggie', 'Glenn', 'Carol', 'Daryl']\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# POST /people\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"people = req['body']\\n\",\n    \"print(json.dumps(people))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'body' : 'Michonne'\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# PUT /people\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"people.append(req['body'])\\n\",\n    \"print(json.dumps(people))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'path' : {\\n\",\n    \"        'index' : 1\\n\",\n    \"    }\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# DELETE /people/:index\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"people.remove(people[int(req['path']['index'])])\\n\",\n    \"print(json.dumps(people))\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Error\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /error\\n\",\n    \"this cell should print an error in the reponse\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import sys\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /stderr\\n\",\n    \"print 'I am text on stdout'\\n\",\n    \"print >> sys.stderr, 'I am text on stderr'\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Misc\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'path' : {\\n\",\n    \"        'time': 1\\n\",\n    \"    }\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /sleep/:time\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"from time import sleep\\n\",\n    \"sleep_time = int(req['path']['time'])\\n\",\n    \"sleep(sleep_time)\\n\",\n    \"print(\\\"Slept for {} seconds\\\".format(sleep_time))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /execute_result\\n\",\n    \"1+1\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'headers' : {\\n\",\n    \"        'Content-Type': 'application/json'\\n\",\n    \"    }\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /content-type\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"print(req['headers']['Content-Type'])\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /multi\\n\",\n    \"x = 1\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /multi\\n\",\n    \"print('x is {}'.format(x))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /env_kernel_gateway\\n\",\n    \"print('KERNEL_GATEWAY is {}'.format(os.getenv('KERNEL_GATEWAY')))\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 2\",\n   \"language\": \"python\",\n   \"name\": \"python2\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"2.7.10\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}\n"
  },
  {
    "path": "enterprise_gateway/tests/resources/kernel_api3.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# API Creation \\n\",\n    \"This notebook is a sample of how to author a REST API in the notebook environment.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import os, json\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"hello_message = 'hello {}'\\n\",\n    \"people = ['Corey', 'Nitin', 'Pete']\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Hello\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /hello\\n\",\n    \"print(hello_message.format('world'))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'args': { \\n\",\n    \"        'person' : people\\n\",\n    \"    }\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /hello/person\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"hello_person = req['args']['person'][0]\\n\",\n    \"print(hello_message.format(hello_person))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'args': { \\n\",\n    \"        'person' : people\\n\",\n    \"    }\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /hello/persons\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"hello_persons = req['args']['person']\\n\",\n    \"print(hello_message.format(', '.join(hello_persons)))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /hello/people\\n\",\n    \"hello_people = people\\n\",\n    \"print(hello_message.format(', '.join(hello_people)))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'path' : {\\n\",\n    \"        'person' : 'test_person'\\n\",\n    \"    }\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /hello/:person\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"print(hello_message.format(req['path']['person']))\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Messages\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /message\\n\",\n    \"print(hello_message)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'body' : 'test value'\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# PUT /message\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"hello_message = req['body']\\n\",\n    \"print(hello_message)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## People\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /people\\n\",\n    \"print(json.dumps(people))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'body' : ['Rick', 'Maggie', 'Glenn', 'Carol', 'Daryl']\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# POST /people\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"people = req['body']\\n\",\n    \"print(json.dumps(people))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'body' : 'Michonne'\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# PUT /people\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"people.append(req['body'])\\n\",\n    \"print(json.dumps(people))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'path' : {\\n\",\n    \"        'index' : 1\\n\",\n    \"    }\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# DELETE /people/:index\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"people.remove(people[int(req['path']['index'])])\\n\",\n    \"print(json.dumps(people))\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Error\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /error\\n\",\n    \"this cell should print an error in the reponse\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import sys\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /stderr\\n\",\n    \"print('I am text on stdout')\\n\",\n    \"print('I am text on stderr', file=sys.stderr)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Misc\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'path' : {\\n\",\n    \"        'time': 1\\n\",\n    \"    }\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /sleep/:time\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"from time import sleep\\n\",\n    \"sleep_time = int(req['path']['time'])\\n\",\n    \"sleep(sleep_time)\\n\",\n    \"print(\\\"Slept for {} seconds\\\".format(sleep_time))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /execute_result\\n\",\n    \"1+1\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"REQUEST = json.dumps({\\n\",\n    \"    'headers' : {\\n\",\n    \"        'Content-Type': 'application/json'\\n\",\n    \"    }\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /content-type\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"print(req['headers']['Content-Type'])\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /multi\\n\",\n    \"x = 1\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /multi\\n\",\n    \"print('x is {}'.format(x))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /env_kernel_gateway\\n\",\n    \"print('KERNEL_GATEWAY is {}'.format(os.getenv('KERNEL_GATEWAY')))\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.5.1\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}\n"
  },
  {
    "path": "enterprise_gateway/tests/resources/kernels/kernel_defaults_test/kernel.json",
    "content": "{\n  \"display_name\": \"Kernel Defaults Testing\",\n  \"language\": \"python\",\n  \"env\": {\n    \"KERNEL_VAR1\": \"kernel_var1_default\",\n    \"KERNEL_VAR2\": \"kernel_var2_default\",\n    \"OTHER_VAR1\": \"other_var1_default\",\n    \"OTHER_VAR2\": \"other_var2_default\",\n    \"PROCESS_VAR1\": \"process_var1_default\",\n    \"PROCESS_VAR2\": \"process_var2_default\"\n  },\n  \"argv\": [\"python\", \"-m\", \"ipykernel_launcher\", \"-f\", \"{connection_file}\"]\n}\n"
  },
  {
    "path": "enterprise_gateway/tests/resources/public/index.html",
    "content": "<!doctype html>\n<html>\n  <head>\n    <title>Hello world!</title>\n  </head>\n  <body>\n    <h1>Hello world!</h1>\n  </body>\n</html>\n"
  },
  {
    "path": "enterprise_gateway/tests/resources/responses_2.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import json\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /json\\n\",\n    \"print '''{ \\\"hello\\\" : \\\"world\\\"}'''\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# ResponseInfo GET /json\\n\",\n    \"print json.dumps({\\n\",\n    \"    'headers' : {\\n\",\n    \"        'Content-Type' : 'application/json'\\n\",\n    \"    }\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /nocontent\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# ResponseInfo GET /nocontent\\n\",\n    \"print json.dumps({\\n\",\n    \"    'status' : 204\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /etag\\n\",\n    \"print '''{ \\\"hello\\\" : \\\"world\\\"}'''\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# ResponseInfo GET /etag\\n\",\n    \"print json.dumps({\\n\",\n    \"    'headers' : {\\n\",\n    \"        'Content-Type' : 'application/json',\\n\",\n    \"        'Etag' : '1234567890'\\n\",\n    \"    }\\n\",\n    \"})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": []\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 2\",\n   \"language\": \"python\",\n   \"name\": \"python2\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 2\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython2\",\n   \"version\": \"2.7.10\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}\n"
  },
  {
    "path": "enterprise_gateway/tests/resources/responses_3.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import json\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /json\\n\",\n    \"print('''{ \\\"hello\\\" : \\\"world\\\"}''')\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# ResponseInfo GET /json\\n\",\n    \"print(json.dumps({\\n\",\n    \"        'headers' : {\\n\",\n    \"            'Content-Type' : 'application/json'\\n\",\n    \"        }\\n\",\n    \"    })\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /nocontent\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# ResponseInfo GET /nocontent\\n\",\n    \"print(json.dumps({\\n\",\n    \"        'status' : 204\\n\",\n    \"    })\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /etag\\n\",\n    \"print('''{ \\\"hello\\\" : \\\"world\\\"}''')\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# ResponseInfo GET /etag\\n\",\n    \"print(json.dumps({\\n\",\n    \"        'headers' : {\\n\",\n    \"            'Content-Type' : 'application/json',\\n\",\n    \"            'Etag' : '1234567890'\\n\",\n    \"        }\\n\",\n    \"    })\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": []\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.4.3\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}\n"
  },
  {
    "path": "enterprise_gateway/tests/resources/simple_api2.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import json\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"name = 'Test Name'\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /name\\n\",\n    \"print name \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# POST /name\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"name = req['body']\\n\",\n    \"print(name)\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 2\",\n   \"language\": \"python\",\n   \"name\": \"python2\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3.0\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"2.7.10\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}\n"
  },
  {
    "path": "enterprise_gateway/tests/resources/simple_api3.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import json\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"name = 'Test Name'\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /name\\n\",\n    \"print(name)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# POST /name\\n\",\n    \"req = json.loads(REQUEST)\\n\",\n    \"name = req['body']\\n\",\n    \"print(name)\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.4.3\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}\n"
  },
  {
    "path": "enterprise_gateway/tests/resources/unknown_kernel.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# GET /fake\",\n    \"print 'I am not a real lang!'\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Fake Language 2000\",\n   \"language\": \"fakelang\",\n   \"name\": \"fakelang2000\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"fakelang\",\n    \"version\": 2000\n   },\n   \"file_extension\": \".fl\",\n   \"mimetype\": \"text/x-fake-lang\",\n   \"name\": \"fakelang\",\n   \"nbconvert_exporter\": \"fakelang\",\n   \"pygments_lexer\": \"fakelang\",\n   \"version\": \"2000\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}\n"
  },
  {
    "path": "enterprise_gateway/tests/resources/zen2.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import this\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 2\",\n   \"language\": \"python\",\n   \"name\": \"python2\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"2.7.10\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}\n"
  },
  {
    "path": "enterprise_gateway/tests/resources/zen3.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import this\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.4.3\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}\n"
  },
  {
    "path": "enterprise_gateway/tests/test_enterprise_gateway.py",
    "content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Tests for jupyter-enterprise-gateway.\"\"\"\n\nimport os\nimport time\nimport uuid\nfrom tempfile import TemporaryDirectory\n\nfrom tornado.escape import json_decode, url_escape\nfrom tornado.testing import gen_test\n\nfrom .test_handlers import TestHandlers\n\npjoin = os.path.join\n\n\nclass TestEnterpriseGateway(TestHandlers):\n    def setUp(self):\n        super().setUp()\n        # Enable debug logging if necessary\n        # app = self.get_app()\n        # app.settings['kernel_manager'].log.level = logging.DEBUG\n\n    @gen_test\n    def test_max_kernels_per_user(self):\n        \"\"\"\n        Number of kernels should be limited per user.\n        \"\"\"\n\n        self.get_app()\n        self.app.max_kernels_per_user = 1\n\n        # Request a kernel for bob\n        bob_response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"), method=\"POST\", body='{\"env\": {\"KERNEL_USERNAME\": \"bob\"} }'\n        )\n        self.assertEqual(bob_response.code, 201)\n\n        # Request a kernel for alice\n        alice_response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"),\n            method=\"POST\",\n            body='{\"env\": {\"KERNEL_USERNAME\": \"alice\"} }',\n        )\n        self.assertEqual(alice_response.code, 201)\n\n        # Request another for alice - 403 expected\n        failed_response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"),\n            method=\"POST\",\n            body='{\"env\": {\"KERNEL_USERNAME\": \"alice\"} }',\n            raise_error=False,\n        )\n        self.assertEqual(failed_response.code, 403)\n\n        # Shut down the kernel for alice\n        kernel = json_decode(alice_response.body)\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels/\" + url_escape(kernel[\"id\"])), method=\"DELETE\"\n        )\n        self.assertEqual(response.code, 204)\n\n        # Try again for alice - expect success\n        alice_response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"),\n            method=\"POST\",\n            body='{\"env\": {\"KERNEL_USERNAME\": \"alice\"} }',\n        )\n        self.assertEqual(alice_response.code, 201)\n\n    @gen_test\n    def test_authorization(self):\n        \"\"\"\n        Verify authorized users can start a kernel, unauthorized users cannot\n        \"\"\"\n\n        self.get_app()\n        self.app.authorized_users = {\"bob\", \"alice\", \"bad_guy\"}\n        self.app.unauthorized_users = {\"bad_guy\"}\n\n        # Request a kernel for alice\n        alice_response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"),\n            method=\"POST\",\n            body='{\"env\": {\"KERNEL_USERNAME\": \"alice\"} }',\n        )\n        self.assertEqual(alice_response.code, 201)\n\n        # Request a kernel for bad_guy - 403 expected\n        failed_response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"),\n            method=\"POST\",\n            body='{\"env\": {\"KERNEL_USERNAME\": \"bad_guy\"} }',\n            raise_error=False,\n        )\n        self.assertEqual(failed_response.code, 403)\n\n    @gen_test\n    def test_port_range(self):\n        \"\"\"\n        Verify port-range behaviors are correct\n        \"\"\"\n\n        app = self.get_app()\n        self.app.port_range = \"10000..10999\"  # range too small\n        # Request a kernel for alice - 500 expected\n        alice_response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"),\n            method=\"POST\",\n            body='{\"env\": {\"KERNEL_USERNAME\": \"alice\"} }',\n            raise_error=False,\n        )\n        self.assertEqual(alice_response.code, 500)\n\n        self.app.port_range = \"100..11099\"  # invalid lower port\n        # Request a kernel for alice - 500 expected\n        alice_response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"),\n            method=\"POST\",\n            body='{\"env\": {\"KERNEL_USERNAME\": \"alice\"} }',\n            raise_error=False,\n        )\n        self.assertEqual(alice_response.code, 500)\n\n        self.app.port_range = \"10000..65537\"  # invalid upper port\n        # Request a kernel for alice - 500 expected\n        alice_response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"),\n            method=\"POST\",\n            body='{\"env\": {\"KERNEL_USERNAME\": \"alice\"} }',\n            raise_error=False,\n        )\n        self.assertEqual(alice_response.code, 500)\n\n        self.app.port_range = \"30000..31000\"  # valid range\n        # Request a kernel for alice - 201 expected\n        alice_response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"),\n            method=\"POST\",\n            body='{\"env\": {\"KERNEL_USERNAME\": \"alice\"} }',\n        )\n        self.assertEqual(alice_response.code, 201)\n\n        # validate ports are in range\n        body = json_decode(alice_response.body)\n        kernel_id = body[\"id\"]\n        port_list = app.settings[\"kernel_manager\"]._kernels.get(kernel_id).ports\n\n        for port in port_list:\n            self.assertTrue(30000 <= port <= 31000)\n\n    @gen_test\n    def test_dynamic_updates(self):\n        app = self.app  # Get the actual EnterpriseGatewayApp instance\n        s1 = time.time()\n        name = app.config_file_name + \".py\"\n        with TemporaryDirectory(\"_1\") as td1:\n            os.environ[\"JUPYTER_CONFIG_DIR\"] = td1\n            config_file = pjoin(td1, name)\n            with open(config_file, \"w\") as f:\n                f.writelines(\n                    [\n                        \"c.EnterpriseGatewayApp.impersonation_enabled = False\\n\",\n                        \"c.AsyncMappingKernelManager.cull_connected = False\\n\",\n                    ]\n                )\n            #  app.jupyter_path.append(td1)\n            app.load_config_file()\n            app.add_dynamic_configurable(\"EnterpriseGatewayApp\", app)\n            app.add_dynamic_configurable(\"RemoteMappingKernelManager\", app.kernel_manager)\n            with self.assertRaises(RuntimeError):\n                app.add_dynamic_configurable(\"Bogus\", app.log)\n\n            self.assertEqual(app.impersonation_enabled, False)\n            self.assertEqual(app.kernel_manager.cull_connected, False)\n\n            # Ensure file update doesn't happen during same second as initial value.\n            # This is necessary on test systems that don't have finer-grained\n            # timestamps (of less than a second).\n            s2 = time.time()\n            if s2 - s1 < 1.0:\n                time.sleep(1.0 - (s2 - s1))\n            # update config file\n            with open(config_file, \"w\") as f:\n                f.writelines(\n                    [\n                        \"c.EnterpriseGatewayApp.impersonation_enabled = True\\n\",\n                        \"c.AsyncMappingKernelManager.cull_connected = True\\n\",\n                    ]\n                )\n\n            # trigger reload and verify updates\n            app.update_dynamic_configurables()\n            self.assertEqual(app.impersonation_enabled, True)\n            self.assertEqual(app.kernel_manager.cull_connected, True)\n\n            # repeat to ensure no unexpected changes occurred\n            app.update_dynamic_configurables()\n            self.assertEqual(app.impersonation_enabled, True)\n            self.assertEqual(app.kernel_manager.cull_connected, True)\n\n    @gen_test\n    def test_kernel_id_env_var(self):\n        \"\"\"\n        Verify kernel is created with the given kernel id\n        \"\"\"\n        expected_kernel_id = str(uuid.uuid4())\n        kernel_response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"),\n            method=\"POST\",\n            body='{\"env\": {\"KERNEL_ID\": \"%s\"}}' % expected_kernel_id,\n            raise_error=False,\n        )\n        self.assertEqual(kernel_response.code, 201)\n        kernel = json_decode(kernel_response.body)\n        self.assertEqual(expected_kernel_id, kernel[\"id\"])\n"
  },
  {
    "path": "enterprise_gateway/tests/test_gatewayapp.py",
    "content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Tests for basic gateway app behavior.\"\"\"\n\nimport logging\nimport os\nimport unittest\n\nfrom tornado.testing import AsyncHTTPTestCase, ExpectLog\n\nfrom enterprise_gateway.enterprisegatewayapp import EnterpriseGatewayApp\nfrom enterprise_gateway.mixins import EnterpriseGatewayConfigMixin\n\nRESOURCES = os.path.join(os.path.dirname(__file__), \"resources\")\n\n\nclass TestGatewayAppConfig(unittest.TestCase):\n    \"\"\"Tests configuration of the gateway app.\"\"\"\n\n    def setUp(self):\n        \"\"\"Saves a copy of the environment.\"\"\"\n        self.environ = dict(os.environ)\n\n    def tearDown(self):\n        \"\"\"Resets the environment.\"\"\"\n        os.environ.clear()\n        os.environ.update(self.environ)\n\n    def _assert_envs_to_traitlets(self, env_prefix: str):\n        app = EnterpriseGatewayApp()\n        app.init_configurables()\n\n        self.assertEqual(app.port, 1234)\n        self.assertEqual(app.port_retries, 4321)\n        self.assertEqual(app.ip, \"1.1.1.1\")\n        self.assertEqual(app.auth_token, \"fake-token\")\n        self.assertEqual(app.allow_credentials, \"true\")\n        self.assertEqual(app.allow_headers, \"Authorization\")\n        self.assertEqual(app.allow_methods, \"GET\")\n        self.assertEqual(app.allow_origin, \"*\")\n        self.assertEqual(app.expose_headers, \"X-Fake-Header\")\n        self.assertEqual(app.max_age, \"5\")\n        self.assertEqual(app.base_url, \"/fake/path\")\n        self.assertEqual(app.max_kernels, 1)\n        self.assertEqual(app.default_kernel_name, \"fake_kernel\")\n        self.assertEqual(app.keyfile, \"/test/fake.key\")\n        self.assertEqual(app.certfile, \"/test/fake.crt\")\n        self.assertEqual(app.client_ca, \"/test/fake_ca.crt\")\n        self.assertEqual(app.ssl_version, 3)\n        if env_prefix == \"EG_\":  # These options did not exist in JKG\n            self.assertEqual(app.kernel_session_manager.enable_persistence, True)\n            self.assertEqual(\n                app.availability_mode, EnterpriseGatewayConfigMixin.AVAILABILITY_REPLICATION\n            )\n\n    def test_config_env_vars_bc(self):\n        \"\"\"B/C env vars should be honored for traitlets.\"\"\"\n        # Environment vars are always strings\n        os.environ[\"KG_PORT\"] = \"1234\"\n        os.environ[\"KG_PORT_RETRIES\"] = \"4321\"\n        os.environ[\"KG_IP\"] = \"1.1.1.1\"\n        os.environ[\"KG_AUTH_TOKEN\"] = \"fake-token\"\n        os.environ[\"KG_ALLOW_CREDENTIALS\"] = \"true\"\n        os.environ[\"KG_ALLOW_HEADERS\"] = \"Authorization\"\n        os.environ[\"KG_ALLOW_METHODS\"] = \"GET\"\n        os.environ[\"KG_ALLOW_ORIGIN\"] = \"*\"\n        os.environ[\"KG_EXPOSE_HEADERS\"] = \"X-Fake-Header\"\n        os.environ[\"KG_MAX_AGE\"] = \"5\"\n        os.environ[\"KG_BASE_URL\"] = \"/fake/path\"\n        os.environ[\"KG_MAX_KERNELS\"] = \"1\"\n        os.environ[\"KG_DEFAULT_KERNEL_NAME\"] = \"fake_kernel\"\n        os.environ[\"KG_KEYFILE\"] = \"/test/fake.key\"\n        os.environ[\"KG_CERTFILE\"] = \"/test/fake.crt\"\n        os.environ[\"KG_CLIENT_CA\"] = \"/test/fake_ca.crt\"\n        os.environ[\"KG_SSL_VERSION\"] = \"3\"\n\n        self._assert_envs_to_traitlets(\"KG_\")\n\n    def test_config_env_vars(self):\n        \"\"\"Env vars should be honored for traitlets.\"\"\"\n        # Environment vars are always strings\n        os.environ[\"EG_PORT\"] = \"1234\"\n        os.environ[\"EG_PORT_RETRIES\"] = \"4321\"\n        os.environ[\"EG_IP\"] = \"1.1.1.1\"\n        os.environ[\"EG_AUTH_TOKEN\"] = \"fake-token\"\n        os.environ[\"EG_ALLOW_CREDENTIALS\"] = \"true\"\n        os.environ[\"EG_ALLOW_HEADERS\"] = \"Authorization\"\n        os.environ[\"EG_ALLOW_METHODS\"] = \"GET\"\n        os.environ[\"EG_ALLOW_ORIGIN\"] = \"*\"\n        os.environ[\"EG_EXPOSE_HEADERS\"] = \"X-Fake-Header\"\n        os.environ[\"EG_MAX_AGE\"] = \"5\"\n        os.environ[\"EG_BASE_URL\"] = \"/fake/path\"\n        os.environ[\"EG_MAX_KERNELS\"] = \"1\"\n        os.environ[\"EG_DEFAULT_KERNEL_NAME\"] = \"fake_kernel\"\n        os.environ[\"EG_KEYFILE\"] = \"/test/fake.key\"\n        os.environ[\"EG_CERTFILE\"] = \"/test/fake.crt\"\n        os.environ[\"EG_CLIENT_CA\"] = \"/test/fake_ca.crt\"\n        os.environ[\"EG_SSL_VERSION\"] = \"3\"\n        os.environ[\"EG_KERNEL_SESSION_PERSISTENCE\"] = (\n            \"True\"  # availability mode will be defaulted to replication\n        )\n\n        self._assert_envs_to_traitlets(\"EG_\")\n\n    def test_ssl_options_no_config(self):\n        app = EnterpriseGatewayApp()\n        ssl_options = app._build_ssl_options()\n        self.assertIsNone(ssl_options)\n\n    def test_authorizer_class_default(self):\n        \"\"\"Test that authorizer_class defaults to None when not configured.\"\"\"\n        app = EnterpriseGatewayApp()\n        app.init_configurables()\n        app.init_webapp()\n\n        # By default, should use AllowAllAuthorizer\n        from jupyter_server.auth.authorizer import AllowAllAuthorizer\n\n        authorizer = app.web_app.settings.get(\"authorizer\")\n        self.assertIsNotNone(authorizer)\n        self.assertIsInstance(authorizer, AllowAllAuthorizer)\n\n    def test_authorizer_class_env_var(self):\n        \"\"\"Test that authorizer_class can be configured via environment variable.\"\"\"\n        # Create a custom authorizer for testing\n        from jupyter_server.auth.authorizer import Authorizer\n\n        class CustomTestAuthorizer(Authorizer):\n            \"\"\"Test authorizer for validation\"\"\"\n\n            def is_authorized(self, handler, user, action, resource):\n                return True\n\n        # Set the environment variable to point to our custom authorizer\n        # We need to make it importable first\n        import sys\n        from types import ModuleType\n\n        # Create a test module\n        test_module = ModuleType(\"test_auth_module\")\n        test_module.CustomTestAuthorizer = CustomTestAuthorizer\n        sys.modules[\"test_auth_module\"] = test_module\n\n        try:\n            os.environ[\"EG_AUTHORIZER_CLASS\"] = \"test_auth_module.CustomTestAuthorizer\"\n\n            app = EnterpriseGatewayApp()\n            app.init_configurables()\n            app.init_webapp()\n\n            # Should use our custom authorizer\n            authorizer = app.web_app.settings.get(\"authorizer\")\n            self.assertIsNotNone(authorizer)\n            self.assertIsInstance(authorizer, CustomTestAuthorizer)\n        finally:\n            # Clean up\n            if \"test_auth_module\" in sys.modules:\n                del sys.modules[\"test_auth_module\"]\n\n\nclass TestGatewayAppBase(AsyncHTTPTestCase, ExpectLog):\n    \"\"\"Base class for integration style tests using HTTP/Websockets against an\n    instance of the gateway app.\n\n    Attributes\n    ----------\n    app : KernelGatewayApp\n        Instance of the app\n    \"\"\"\n\n    def tearDown(self):\n        \"\"\"Shuts down the app after test run.\"\"\"\n        if self.app:\n            self.app.shutdown()\n\n        super().tearDown()\n\n    def get_app(self):\n        \"\"\"Returns a tornado.web.Application for the Tornado test runner.\"\"\"\n        if hasattr(self, \"_app\"):\n            return self._app\n        self.app = EnterpriseGatewayApp(log_level=logging.CRITICAL)\n        self.setup_app()\n        self.app.init_configurables()\n        self.setup_configurables()\n        self.app.init_webapp()\n        return self.app.web_app\n\n    def setup_app(self):\n        \"\"\"Override to configure KernelGatewayApp instance before initializing\n        configurables and the web app.\n        \"\"\"\n        pass\n\n    def setup_configurables(self):\n        \"\"\"Override to configure further settings, such as the personality.\"\"\"\n        pass\n"
  },
  {
    "path": "enterprise_gateway/tests/test_handlers.py",
    "content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Tests for jupyter-websocket mode.\"\"\"\n\nimport json\nimport os\n\nfrom tornado.escape import json_decode, json_encode, url_escape\nfrom tornado.gen import Return, coroutine\nfrom tornado.httpclient import HTTPRequest\nfrom tornado.testing import gen_test\nfrom tornado.websocket import websocket_connect\n\nfrom .test_gatewayapp import RESOURCES, TestGatewayAppBase\n\n\nclass TestHandlers(TestGatewayAppBase):\n    \"\"\"\n    Base class for jupyter-websocket mode tests that spawn kernels.\n    \"\"\"\n\n    def setup_app(self):\n        \"\"\"Configure JUPYTER_PATH so that we can use local kernelspec files for testing.\"\"\"\n        os.environ[\"JUPYTER_PATH\"] = RESOURCES\n\n        # These are required for setup of test_kernel_defaults\n        # Note: We still reference the DEPRECATED config parameter and environment variable so that\n        # we can test client_envs and inherited_envs, respectively.\n        self.app.env_whitelist = [\"TEST_VAR\", \"OTHER_VAR1\", \"OTHER_VAR2\"]\n        os.environ[\"EG_ENV_PROCESS_WHITELIST\"] = \"PROCESS_VAR1,PROCESS_VAR2\"\n        os.environ[\"PROCESS_VAR1\"] = \"process_var1_override\"\n\n    def tearDown(self):\n        \"\"\"Shuts down the app after test run.\"\"\"\n\n        # Clean out items added to env\n        if \"JUPYTER_PATH\" in os.environ:\n            os.environ.pop(\"JUPYTER_PATH\")\n        if \"EG_ENV_PROCESS_WHITELIST\" in os.environ:\n            os.environ.pop(\"EG_ENV_PROCESS_WHITELIST\")\n        if \"PROCESS_VAR1\" in os.environ:\n            os.environ.pop(\"PROCESS_VAR1\")\n\n        super().tearDown()\n\n    @coroutine\n    def spawn_kernel(self, kernel_body=\"{}\"):\n        \"\"\"Spawns a kernel using the gateway API and connects a websocket\n        client to it.\n\n        Parameters\n        ----------\n        kernel_body : str\n            POST /api/kernels body\n\n        Returns\n        -------\n        Future\n            Promise of a WebSocketClientConnection\n        \"\"\"\n        # Request a kernel\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"), method=\"POST\", body=kernel_body\n        )\n        self.assertEqual(response.code, 201)\n\n        # Connect to the kernel via websocket\n        kernel = json_decode(response.body)\n        ws_url = \"ws://localhost:{}/api/kernels/{}/channels\".format(\n            self.get_http_port(), url_escape(kernel[\"id\"])\n        )\n\n        ws = yield websocket_connect(ws_url)\n        raise Return(ws)\n\n    def execute_request(self, code):\n        \"\"\"Creates an execute_request message.\n\n        Parameters\n        ----------\n        code : str\n            Code to execute\n\n        Returns\n        -------\n        dict\n            The message\n        \"\"\"\n        return {\n            \"header\": {\n                \"username\": \"\",\n                \"version\": \"5.0\",\n                \"session\": \"\",\n                \"msg_id\": \"fake-msg-id\",\n                \"msg_type\": \"execute_request\",\n            },\n            \"parent_header\": {},\n            \"channel\": \"shell\",\n            \"content\": {\n                \"code\": code,\n                \"silent\": False,\n                \"store_history\": False,\n                \"user_expressions\": {},\n            },\n            \"metadata\": {},\n            \"buffers\": {},\n        }\n\n    @coroutine\n    def await_stream(self, ws):\n        \"\"\"Returns stream output associated with an execute_request.\"\"\"\n        while 1:\n            msg = yield ws.read_message()\n            msg = json_decode(msg)\n            msg_type = msg[\"msg_type\"]\n            parent_msg_id = msg[\"parent_header\"][\"msg_id\"]\n            if msg_type == \"stream\" and parent_msg_id == \"fake-msg-id\":\n                raise Return(msg[\"content\"])\n\n\nclass TestDefaults(TestHandlers):\n    \"\"\"Tests gateway behavior.\"\"\"\n\n    @gen_test\n    def test_startup(self):\n        \"\"\"Root of kernels resource should be OK.\"\"\"\n        self.app.web_app.settings[\"eg_list_kernels\"] = True\n        response = yield self.http_client.fetch(self.get_url(\"/api/kernels\"))\n        self.assertEqual(response.code, 200)\n\n    @gen_test\n    def test_headless(self):\n        \"\"\"Other notebook resources should not exist.\"\"\"\n        response = yield self.http_client.fetch(self.get_url(\"/api/contents\"), raise_error=False)\n        self.assertEqual(response.code, 404)\n        response = yield self.http_client.fetch(self.get_url(\"/\"), raise_error=False)\n        self.assertEqual(response.code, 404)\n        response = yield self.http_client.fetch(self.get_url(\"/tree\"), raise_error=False)\n        self.assertEqual(response.code, 404)\n\n    @gen_test\n    def test_check_origin(self):\n        \"\"\"Allow origin setting should pass through to base handlers.\"\"\"\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernelspecs\"),\n            method=\"GET\",\n            headers={\"Origin\": \"fake.com:8888\"},\n            raise_error=False,\n        )\n        self.assertEqual(response.code, 404)\n\n        app = self.get_app()\n        app.settings[\"allow_origin\"] = \"*\"\n\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernelspecs\"),\n            method=\"GET\",\n            headers={\"Origin\": \"fake.com:8888\"},\n            raise_error=False,\n        )\n        self.assertEqual(response.code, 200)\n\n    @gen_test\n    def test_auth_token(self):\n        \"\"\"All server endpoints should check the configured auth token.\"\"\"\n        # Set token requirement\n        app = self.get_app()\n        app.settings[\"eg_auth_token\"] = \"fake-token\"\n\n        # Requst API without the token\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api\"), method=\"GET\", raise_error=False\n        )\n        self.assertEqual(response.code, 401)\n\n        # Now with it\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api\"),\n            method=\"GET\",\n            headers={\"Authorization\": \"token fake-token\"},\n            raise_error=False,\n        )\n        self.assertEqual(response.code, 200)\n\n        # Request kernelspecs without the token\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernelspecs\"), method=\"GET\", raise_error=False\n        )\n        self.assertEqual(response.code, 401)\n\n        # Now with it\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernelspecs\"),\n            method=\"GET\",\n            headers={\"Authorization\": \"token fake-token\"},\n            raise_error=False,\n        )\n        self.assertEqual(response.code, 200)\n\n        # Request a kernel without the token\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"), method=\"POST\", body=\"{}\", raise_error=False\n        )\n        self.assertEqual(response.code, 401)\n\n        # Request with the token now\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"),\n            method=\"POST\",\n            body=\"{}\",\n            headers={\"Authorization\": \"token fake-token\"},\n            raise_error=False,\n        )\n        self.assertEqual(response.code, 201)\n\n        kernel = json_decode(response.body)\n        # Request kernel info without the token\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels/\" + url_escape(kernel[\"id\"])),\n            method=\"GET\",\n            raise_error=False,\n        )\n        self.assertEqual(response.code, 401)\n\n        # Now with it\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels/\" + url_escape(kernel[\"id\"])),\n            method=\"GET\",\n            headers={\"Authorization\": \"token fake-token\"},\n            raise_error=False,\n        )\n        self.assertEqual(response.code, 200)\n\n        # Request websocket connection without the token\n        ws_url = \"ws://localhost:{}/api/kernels/{}/channels\".format(\n            self.get_http_port(), url_escape(kernel[\"id\"])\n        )\n        # No option to ignore errors so try/except\n        try:\n            ws = yield websocket_connect(ws_url)\n        except Exception as ex:\n            self.assertEqual(ex.code, 401)\n        else:\n            self.assertTrue(False, \"no exception raised\")\n\n        # Now request the websocket with the token\n        ws_req = HTTPRequest(ws_url, headers={\"Authorization\": \"token fake-token\"})\n        ws = yield websocket_connect(ws_req)\n        ws.close()\n\n    @gen_test\n    def test_cors_headers(self):\n        \"\"\"All kernel endpoints should respond with configured CORS headers.\"\"\"\n        app = self.get_app()\n        app.settings[\"eg_allow_credentials\"] = \"false\"\n        app.settings[\"eg_allow_headers\"] = \"Authorization,Content-Type\"\n        app.settings[\"eg_allow_methods\"] = \"GET,POST\"\n        app.settings[\"eg_allow_origin\"] = \"https://jupyter.org\"\n        app.settings[\"eg_expose_headers\"] = \"X-My-Fake-Header\"\n        app.settings[\"eg_max_age\"] = \"600\"\n        app.settings[\"eg_list_kernels\"] = True\n\n        # Get kernels to check headers\n        response = yield self.http_client.fetch(self.get_url(\"/api/kernels\"), method=\"GET\")\n        self.assertEqual(response.code, 200)\n        self.assertEqual(response.headers[\"Access-Control-Allow-Credentials\"], \"false\")\n        self.assertEqual(\n            response.headers[\"Access-Control-Allow-Headers\"], \"Authorization,Content-Type\"\n        )\n        self.assertEqual(response.headers[\"Access-Control-Allow-Methods\"], \"GET,POST\")\n        self.assertEqual(response.headers[\"Access-Control-Allow-Origin\"], \"https://jupyter.org\")\n        self.assertEqual(response.headers[\"Access-Control-Expose-Headers\"], \"X-My-Fake-Header\")\n        self.assertEqual(response.headers[\"Access-Control-Max-Age\"], \"600\")\n        self.assertEqual(response.headers.get(\"Content-Security-Policy\"), None)\n\n    @gen_test\n    def test_max_kernels(self):\n        \"\"\"Number of kernels should be limited.\"\"\"\n        app = self.get_app()\n        app.settings[\"eg_max_kernels\"] = 1\n\n        # Request a kernel\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"), method=\"POST\", body=\"{}\"\n        )\n        self.assertEqual(response.code, 201)\n\n        # Request another\n        response2 = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"), method=\"POST\", body=\"{}\", raise_error=False\n        )\n        self.assertEqual(response2.code, 403)\n\n        # Shut down the kernel\n        kernel = json_decode(response.body)\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels/\" + url_escape(kernel[\"id\"])), method=\"DELETE\"\n        )\n        self.assertEqual(response.code, 204)\n\n        # Try again\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"), method=\"POST\", body=\"{}\"\n        )\n        self.assertEqual(response.code, 201)\n\n    @gen_test\n    def test_get_api(self):\n        \"\"\"Server should respond with the API version metadata.\"\"\"\n        response = yield self.http_client.fetch(self.get_url(\"/api\"))\n        self.assertEqual(response.code, 200)\n        info = json_decode(response.body)\n        self.assertIn(\"version\", info)\n        self.assertIn(\"gateway_version\", info)\n\n    @gen_test\n    def test_get_kernelspecs(self):\n        \"\"\"Server should respond with kernel spec metadata.\"\"\"\n        response = yield self.http_client.fetch(self.get_url(\"/api/kernelspecs\"))\n        self.assertEqual(response.code, 200)\n        specs = json_decode(response.body)\n        self.assertIn(\"kernelspecs\", specs)\n        self.assertIn(\"default\", specs)\n\n    @gen_test\n    def test_get_kernels(self):\n        \"\"\"Server should respond with running kernel information.\"\"\"\n        self.app.web_app.settings[\"eg_list_kernels\"] = True\n        response = yield self.http_client.fetch(self.get_url(\"/api/kernels\"))\n        self.assertEqual(response.code, 200)\n        kernels = json_decode(response.body)\n        self.assertEqual(len(kernels), 0)\n\n        # Launch a kernel\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"), method=\"POST\", body=\"{}\"\n        )\n        self.assertEqual(response.code, 201)\n        kernel = json_decode(response.body)\n\n        # Check the list again\n        response = yield self.http_client.fetch(self.get_url(\"/api/kernels\"))\n        self.assertEqual(response.code, 200)\n        kernels = json_decode(response.body)\n        self.assertEqual(len(kernels), 1)\n        self.assertEqual(kernels[0][\"id\"], kernel[\"id\"])\n\n    @gen_test\n    def test_kernel_comm(self):\n        \"\"\"Default kernel should launch and accept commands.\"\"\"\n        ws = yield self.spawn_kernel()\n\n        # Send a request for kernel info\n        ws.write_message(\n            json_encode(\n                {\n                    \"header\": {\n                        \"username\": \"\",\n                        \"version\": \"5.0\",\n                        \"session\": \"\",\n                        \"msg_id\": \"fake-msg-id\",\n                        \"msg_type\": \"kernel_info_request\",\n                    },\n                    \"parent_header\": {},\n                    \"channel\": \"shell\",\n                    \"content\": {},\n                    \"metadata\": {},\n                    \"buffers\": {},\n                }\n            )\n        )\n\n        # Assert the reply comes back. Test will timeout if this hangs.\n        # Note that this range may be side-effected by upstream changes,\n        # so we will add a print (and increase its length to 8).\n        for _ in range(8):\n            msg = yield ws.read_message()\n            msg = json_decode(msg)\n            if msg[\"msg_type\"] == \"kernel_info_reply\":\n                break\n        else:\n            self.assertTrue(False, \"never received kernel_info_reply\")\n        ws.close()\n\n    @gen_test\n    def test_no_discovery(self):\n        \"\"\"The list of kernels / sessions should be forbidden by default.\"\"\"\n        response = yield self.http_client.fetch(self.get_url(\"/api/kernels\"), raise_error=False)\n        self.assertEqual(response.code, 403)\n\n        response = yield self.http_client.fetch(self.get_url(\"/api/sessions\"), raise_error=False)\n        self.assertEqual(response.code, 403)\n\n    @gen_test\n    def test_crud_sessions(self):\n        \"\"\"Server should create, list, and delete sessions.\"\"\"\n        app = self.get_app()\n        app.settings[\"eg_list_kernels\"] = True\n\n        # Ensure no sessions by default\n        response = yield self.http_client.fetch(self.get_url(\"/api/sessions\"))\n        self.assertEqual(response.code, 200)\n        sessions = json_decode(response.body)\n        self.assertEqual(len(sessions), 0)\n\n        # Launch a session\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/sessions\"),\n            method=\"POST\",\n            body='{\"id\":\"any\",\"notebook\":{\"path\":\"anywhere\"},\"kernel\":{\"name\":\"python\"}}',\n        )\n        self.assertEqual(response.code, 201)\n        session = json_decode(response.body)\n\n        # Check the list again\n        response = yield self.http_client.fetch(self.get_url(\"/api/sessions\"))\n        self.assertEqual(response.code, 200)\n        sessions = json_decode(response.body)\n        self.assertEqual(len(sessions), 1)\n        self.assertEqual(sessions[0][\"id\"], session[\"id\"])\n\n        # Delete the session\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/sessions/\" + session[\"id\"]), method=\"DELETE\"\n        )\n        self.assertEqual(response.code, 204)\n\n        # Make sure the list is empty\n        response = yield self.http_client.fetch(self.get_url(\"/api/sessions\"))\n        self.assertEqual(response.code, 200)\n        sessions = json_decode(response.body)\n        self.assertEqual(len(sessions), 0)\n\n    @gen_test\n    def test_json_errors(self):\n        \"\"\"Handlers should always return JSON errors.\"\"\"\n        # A handler that we override\n        response = yield self.http_client.fetch(self.get_url(\"/api/kernels\"), raise_error=False)\n        body = json_decode(response.body)\n        self.assertEqual(response.code, 403)\n        self.assertEqual(body[\"reason\"], \"Forbidden\")\n\n        # A handler from the notebook base\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels/1-2-3-4-5\"), raise_error=False\n        )\n        body = json_decode(response.body)\n        self.assertEqual(response.code, 404)\n        # Base handler json_errors decorator does not capture reason properly\n        # self.assertEqual(body['reason'], 'Not Found')\n        self.assertIn(\"1-2-3-4-5\", body[\"message\"])\n\n        # The last resort not found handler\n        response = yield self.http_client.fetch(self.get_url(\"/fake-endpoint\"), raise_error=False)\n        body = json_decode(response.body)\n        self.assertEqual(response.code, 404)\n        self.assertEqual(body[\"reason\"], \"Not Found\")\n\n    @gen_test\n    def test_kernel_env(self):\n        \"\"\"Kernel should start with environment vars defined in the request.\"\"\"\n        # Note: Only envs in request prefixed with KERNEL_ or in env_whitelist (TEST_VAR)\n        # with the exception of KERNEL_GATEWAY - which is \"system owned\".\n        kernel_body = json.dumps(\n            {\n                \"name\": \"python\",\n                \"env\": {\n                    \"KERNEL_FOO\": \"kernel-foo-value\",\n                    \"NOT_KERNEL\": \"ignored\",\n                    \"KERNEL_GATEWAY\": \"overridden\",\n                    \"TEST_VAR\": \"allowed\",\n                },\n            }\n        )\n        ws = yield self.spawn_kernel(kernel_body)\n        req = self.execute_request(\n            \"import os; \"\n            'print(os.getenv(\"KERNEL_FOO\"), '\n            'os.getenv(\"NOT_KERNEL\"), '\n            'os.getenv(\"KERNEL_GATEWAY\"), '\n            'os.getenv(\"TEST_VAR\"))'\n        )\n        ws.write_message(json_encode(req))\n        content = yield self.await_stream(ws)\n        self.assertEqual(content[\"name\"], \"stdout\")\n        self.assertIn(\"kernel-foo-value\", content[\"text\"])\n        self.assertNotIn(\"ignored\", content[\"text\"])\n        self.assertNotIn(\"overridden\", content[\"text\"])\n        self.assertIn(\"allowed\", content[\"text\"])\n\n        ws.close()\n\n    @gen_test\n    def test_kernel_defaults(self):\n        \"\"\"Kernel should start with env vars defined in request overriding env vars defined in kernelspec.\"\"\"\n\n        # Note: Only envs in request prefixed with KERNEL_ or in env_whitelist (OTHER_VAR1, OTHER_VAR2)\n        # with the exception of KERNEL_GATEWAY - which is \"system owned\" - will be set in kernel env.\n        # Since OTHER_VAR1 is not in the request, its existing value in kernel.json will be used.\n\n        # NOTE: This test requires use of the kernels/kernel_defaults_test/kernel.json file.\n        kernel_body = json.dumps(\n            {\n                \"name\": \"kernel_defaults_test\",\n                \"env\": {\n                    \"KERNEL_VAR1\": \"kernel_var1_override\",  # Ensure this value overrides that in kernel.json\n                    \"KERNEL_VAR3\": \"kernel_var3_value\",  # Any KERNEL_ flows to kernel\n                    \"OTHER_VAR2\": \"other_var2_override\",  # Ensure this value overrides that in kernel.json\n                    \"KERNEL_GATEWAY\": \"kernel_gateway_override\",  # Ensure KERNEL_GATEWAY is not overridden\n                },\n            }\n        )\n        ws = yield self.spawn_kernel(kernel_body)\n        req = self.execute_request(\n            'import os; print(os.getenv(\"KERNEL_VAR1\"), os.getenv(\"KERNEL_VAR2\"), '\n            'os.getenv(\"KERNEL_VAR3\"), os.getenv(\"KERNEL_GATEWAY\"), os.getenv(\"OTHER_VAR1\"), '\n            'os.getenv(\"OTHER_VAR2\"), os.getenv(\"PROCESS_VAR1\"), os.getenv(\"PROCESS_VAR2\"))'\n        )\n        ws.write_message(json_encode(req))\n        content = yield self.await_stream(ws)\n        self.assertEqual(content[\"name\"], \"stdout\")\n        self.assertIn(\"kernel_var1_override\", content[\"text\"])\n        self.assertIn(\"kernel_var2_default\", content[\"text\"])\n        self.assertIn(\"kernel_var3_value\", content[\"text\"])\n        self.assertNotIn(\"kernel_gateway_override\", content[\"text\"])\n        self.assertIn(\"other_var1_default\", content[\"text\"])\n        self.assertIn(\"other_var2_override\", content[\"text\"])\n        self.assertIn(\"process_var1_override\", content[\"text\"])\n        self.assertIn(\"process_var2_default\", content[\"text\"])\n        ws.close()\n\n    @gen_test\n    def test_get_swagger_yaml_spec(self):\n        \"\"\"Getting the swagger.yaml spec should be ok\"\"\"\n        response = yield self.http_client.fetch(self.get_url(\"/api/swagger.yaml\"))\n        self.assertEqual(response.code, 200)\n\n    @gen_test\n    def test_get_swagger_json_spec(self):\n        \"\"\"Getting the swagger.json spec should be ok\"\"\"\n        response = yield self.http_client.fetch(self.get_url(\"/api/swagger.json\"))\n        self.assertEqual(response.code, 200)\n\n    @gen_test\n    def test_kernel_env_auth_token(self):\n        \"\"\"Kernel should not have EG_AUTH_TOKEN in its environment.\"\"\"\n        os.environ[\"EG_AUTH_TOKEN\"] = \"fake-secret\"\n        ws = None\n        try:\n            ws = yield self.spawn_kernel()\n            req = self.execute_request('import os; print(os.getenv(\"EG_AUTH_TOKEN\"))')\n            ws.write_message(json_encode(req))\n            content = yield self.await_stream(ws)\n            self.assertNotIn(\"fake-secret\", content[\"text\"])\n        finally:\n            del os.environ[\"EG_AUTH_TOKEN\"]\n            if ws:\n                ws.close()\n\n\nclass TestCustomDefaultKernel(TestHandlers):\n    \"\"\"Tests gateway behavior when setting a custom default kernelspec.\"\"\"\n\n    def setup_app(self):\n        self.app.default_kernel_name = \"fake-kernel\"\n\n    @gen_test\n    def test_default_kernel_name(self):\n        \"\"\"The default kernel name should be used on empty requests.\"\"\"\n        # Request without an explicit kernel name\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"), method=\"POST\", body=\"\", raise_error=False\n        )\n        self.assertEqual(response.code, 500)\n        self.assertTrue(\"raise NoSuchKernel\" in str(response.body))\n\n\nclass TestEnableDiscovery(TestHandlers):\n    \"\"\"Tests gateway behavior with kernel listing enabled.\"\"\"\n\n    def setup_configurables(self):\n        \"\"\"Enables kernel listing for all tests.\"\"\"\n        self.app.list_kernels = True\n\n    @gen_test\n    def test_enable_kernel_list(self):\n        \"\"\"The list of kernels, sessions, and activities should be available.\"\"\"\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"),\n        )\n        self.assertEqual(response.code, 200)\n        self.assertTrue(\"[]\" in str(response.body))\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/sessions\"),\n        )\n        self.assertEqual(response.code, 200)\n        self.assertTrue(\"[]\" in str(response.body))\n\n\nclass TestBaseURL(TestHandlers):\n    \"\"\"Tests gateway behavior when a custom base URL is configured.\"\"\"\n\n    def setup_app(self):\n        \"\"\"Sets the custom base URL and enables kernel listing.\"\"\"\n        self.app.base_url = \"/fake/path\"\n\n    def setup_configurables(self):\n        \"\"\"Enables kernel listing for all tests.\"\"\"\n        self.app.list_kernels = True\n\n    @gen_test\n    def test_base_url(self):\n        \"\"\"Server should mount resources under configured base.\"\"\"\n        # Should not exist at root\n        response = yield self.http_client.fetch(\n            self.get_url(\"/api/kernels\"), method=\"GET\", raise_error=False\n        )\n        self.assertEqual(response.code, 404)\n\n        # Should exist under path\n        response = yield self.http_client.fetch(\n            self.get_url(\"/fake/path/api/kernels\"), method=\"GET\"\n        )\n        self.assertEqual(response.code, 200)\n\n\nclass TestRelativeBaseURL(TestHandlers):\n    \"\"\"Tests gateway behavior when a relative base URL is configured.\"\"\"\n\n    def setup_app(self):\n        \"\"\"Sets the custom base URL as a relative path.\"\"\"\n        self.app.base_url = \"fake/path\"\n\n    @gen_test\n    def test_base_url(self):\n        \"\"\"Server should mount resources under fixed base.\"\"\"\n        self.app.web_app.settings[\"eg_list_kernels\"] = True\n\n        # Should exist under path\n        response = yield self.http_client.fetch(\n            self.get_url(\"/fake/path/api/kernels\"), method=\"GET\"\n        )\n        self.assertEqual(response.code, 200)\n\n\nclass TestWildcardEnvs(TestHandlers):\n    \"\"\"Base class for jupyter-websocket mode tests that spawn kernels.\"\"\"\n\n    def setup_app(self):\n        \"\"\"Configure JUPYTER_PATH so that we can use local kernelspec files for testing.\"\"\"\n        super().setup_app()\n        # overwrite env_whitelist\n        self.app.env_whitelist = [\"*\"]\n\n    @gen_test\n    def test_kernel_wildcard_env(self):\n        \"\"\"Kernel should start with environment vars defined in the request.\"\"\"\n        # Note: Since env_whitelist == '*', all values should be present.\n        kernel_body = json.dumps(\n            {\n                \"name\": \"python\",\n                \"env\": {\n                    \"KERNEL_FOO\": \"kernel-foo-value\",\n                    \"OTHER_VAR1\": \"other-var1-value\",\n                    \"OTHER_VAR2\": \"other-var2-value\",\n                    \"TEST_VAR\": \"test-var-value\",\n                },\n            }\n        )\n        ws = yield self.spawn_kernel(kernel_body)\n        req = self.execute_request(\n            \"import os; \"\n            'print(os.getenv(\"KERNEL_FOO\"), '\n            'os.getenv(\"OTHER_VAR1\"), '\n            'os.getenv(\"OTHER_VAR2\"), '\n            'os.getenv(\"TEST_VAR\"))'\n        )\n        ws.write_message(json_encode(req))\n        content = yield self.await_stream(ws)\n        self.assertEqual(content[\"name\"], \"stdout\")\n        self.assertIn(\"kernel-foo-value\", content[\"text\"])\n        self.assertIn(\"other-var1-value\", content[\"text\"])\n        self.assertIn(\"other-var2-value\", content[\"text\"])\n        self.assertIn(\"test-var-value\", content[\"text\"])\n\n        ws.close()\n"
  },
  {
    "path": "enterprise_gateway/tests/test_kernelspec_cache.py",
    "content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Tests for KernelSpecCache.\"\"\"\n\nimport asyncio\nimport json\nimport os\nimport shutil\nimport sys\n\nimport jupyter_core.paths\nimport pytest\nfrom jupyter_client.kernelspec import KernelSpecManager, NoSuchKernel\n\nfrom enterprise_gateway.services.kernelspecs import KernelSpecCache\n\n\n# BEGIN - Remove once transition to jupyter_server occurs\ndef mkdir(tmp_path, *parts):\n    path = tmp_path.joinpath(*parts)\n    if not path.exists():\n        path.mkdir(parents=True)\n    return path\n\n\nhome_dir = pytest.fixture(lambda tmp_path: mkdir(tmp_path, \"home\"))\ndata_dir = pytest.fixture(lambda tmp_path: mkdir(tmp_path, \"data\"))\nconfig_dir = pytest.fixture(lambda tmp_path: mkdir(tmp_path, \"config\"))\nruntime_dir = pytest.fixture(lambda tmp_path: mkdir(tmp_path, \"runtime\"))\nsystem_jupyter_path = pytest.fixture(lambda tmp_path: mkdir(tmp_path, \"share\", \"jupyter\"))\nenv_jupyter_path = pytest.fixture(lambda tmp_path: mkdir(tmp_path, \"env\", \"share\", \"jupyter\"))\nsystem_config_path = pytest.fixture(lambda tmp_path: mkdir(tmp_path, \"etc\", \"jupyter\"))\nenv_config_path = pytest.fixture(lambda tmp_path: mkdir(tmp_path, \"env\", \"etc\", \"jupyter\"))\n\n\n@pytest.fixture\ndef environ(\n    monkeypatch,\n    tmp_path,\n    home_dir,\n    data_dir,\n    config_dir,\n    runtime_dir,\n    system_jupyter_path,\n    system_config_path,\n    env_jupyter_path,\n    env_config_path,\n):\n    monkeypatch.setenv(\"HOME\", str(home_dir))\n    monkeypatch.setenv(\"PYTHONPATH\", os.pathsep.join(sys.path))\n    monkeypatch.setenv(\"JUPYTER_NO_CONFIG\", \"1\")\n    monkeypatch.setenv(\"JUPYTER_CONFIG_DIR\", str(config_dir))\n    monkeypatch.setenv(\"JUPYTER_DATA_DIR\", str(data_dir))\n    monkeypatch.setenv(\"JUPYTER_RUNTIME_DIR\", str(runtime_dir))\n    monkeypatch.setattr(jupyter_core.paths, \"SYSTEM_JUPYTER_PATH\", [str(system_jupyter_path)])\n    monkeypatch.setattr(jupyter_core.paths, \"ENV_JUPYTER_PATH\", [str(env_jupyter_path)])\n    monkeypatch.setattr(jupyter_core.paths, \"SYSTEM_CONFIG_PATH\", [str(system_config_path)])\n    monkeypatch.setattr(jupyter_core.paths, \"ENV_CONFIG_PATH\", [str(env_config_path)])\n\n\n# END - Remove once transition to jupyter_server occurs\n\n\nkernelspec_json = {\n    \"argv\": [\"cat\", \"{connection_file}\"],\n    \"display_name\": \"Test kernel: {kernel_name}\",\n}\n\n\ndef _install_kernelspec(kernels_dir, kernel_name):\n    \"\"\"install a sample kernel in a kernels directory\"\"\"\n    kernelspec_dir = os.path.join(kernels_dir, kernel_name)\n    os.makedirs(kernelspec_dir)\n    json_file = os.path.join(kernelspec_dir, \"kernel.json\")\n    named_json = kernelspec_json.copy()\n    named_json[\"display_name\"] = named_json[\"display_name\"].format(kernel_name=kernel_name)\n    with open(json_file, \"w\") as f:\n        json.dump(named_json, f)\n    return kernelspec_dir\n\n\ndef _modify_kernelspec(kernelspec_dir, kernel_name):\n    json_file = os.path.join(kernelspec_dir, \"kernel.json\")\n    kernel_json = kernelspec_json.copy()\n    kernel_json[\"display_name\"] = f\"{kernel_name} modified!\"\n    with open(json_file, \"w\") as f:\n        json.dump(kernel_json, f)\n\n\nkernelspec_location = pytest.fixture(lambda data_dir: mkdir(data_dir, \"kernels\"))\nother_kernelspec_location = pytest.fixture(\n    lambda env_jupyter_path: mkdir(env_jupyter_path, \"kernels\")\n)\n\n\n@pytest.fixture\ndef setup_kernelspecs(environ, kernelspec_location):\n    # Only populate factory info\n    _install_kernelspec(str(kernelspec_location), \"test1\")\n    _install_kernelspec(str(kernelspec_location), \"test2\")\n    _install_kernelspec(str(kernelspec_location), \"test3\")\n\n\n@pytest.fixture\ndef kernel_spec_manager(environ, setup_kernelspecs):\n    yield KernelSpecManager(ensure_native_kernel=False)\n\n\n@pytest.fixture\ndef kernel_spec_cache(is_enabled, kernel_spec_manager):\n    kspec_cache = KernelSpecCache.instance(\n        kernel_spec_manager=kernel_spec_manager, cache_enabled=is_enabled\n    )\n    yield kspec_cache\n    kspec_cache = None\n    KernelSpecCache.clear_instance()\n\n\n@pytest.fixture(params=[False, True])  # Add types as needed\ndef is_enabled(request):\n    return request.param\n\n\nasync def tests_get_all_specs(kernel_spec_cache):\n    kspecs = await kernel_spec_cache.get_all_specs()\n    assert len(kspecs) == 3\n\n\nasync def tests_get_named_spec(kernel_spec_cache):\n    kspec = await kernel_spec_cache.get_kernel_spec(\"test2\")\n    assert kspec.display_name == \"Test kernel: test2\"\n\n\nasync def tests_get_modified_spec(kernel_spec_cache):\n    kspec = await kernel_spec_cache.get_kernel_spec(\"test2\")\n    assert kspec.display_name == \"Test kernel: test2\"\n\n    # Modify entry\n    _modify_kernelspec(kspec.resource_dir, \"test2\")\n    await asyncio.sleep(0.5)  # sleep for a half-second to allow cache to update item\n    kspec = await kernel_spec_cache.get_kernel_spec(\"test2\")\n    assert kspec.display_name == \"test2 modified!\"\n\n\nasync def tests_add_spec(kernel_spec_cache, kernelspec_location, other_kernelspec_location):\n    assert len(kernel_spec_cache.observed_dirs) == (1 if kernel_spec_cache.cache_enabled else 0)\n    assert (\n        str(kernelspec_location) in kernel_spec_cache.observed_dirs\n        if kernel_spec_cache.cache_enabled\n        else True\n    )\n\n    _install_kernelspec(str(other_kernelspec_location), \"added\")\n    kspec = await kernel_spec_cache.get_kernel_spec(\"added\")\n\n    # Ensure new location has been added to observed_dirs\n    assert len(kernel_spec_cache.observed_dirs) == (2 if kernel_spec_cache.cache_enabled else 0)\n    assert (\n        str(other_kernelspec_location) in kernel_spec_cache.observed_dirs\n        if kernel_spec_cache.cache_enabled\n        else True\n    )\n\n    assert kspec.display_name == \"Test kernel: added\"\n    assert kernel_spec_cache.cache_misses == (1 if kernel_spec_cache.cache_enabled else 0)\n\n    # Add another to an existing observed directory, no cache miss here\n    _install_kernelspec(str(kernelspec_location), \"added2\")\n    await asyncio.sleep(\n        0.5\n    )  # sleep for a half-second to allow cache to add item (no cache miss in this case)\n    kspec = await kernel_spec_cache.get_kernel_spec(\"added2\")\n\n    assert kspec.display_name == \"Test kernel: added2\"\n    assert kernel_spec_cache.cache_misses == (1 if kernel_spec_cache.cache_enabled else 0)\n\n\nasync def tests_remove_spec(kernel_spec_cache):\n    kspec = await kernel_spec_cache.get_kernel_spec(\"test2\")\n    assert kspec.display_name == \"Test kernel: test2\"\n\n    assert kernel_spec_cache.cache_misses == 0\n    shutil.rmtree(kspec.resource_dir)\n    await asyncio.sleep(0.5)  # sleep for a half-second to allow cache to remove item\n    with pytest.raises(NoSuchKernel):\n        await kernel_spec_cache.get_kernel_spec(\"test2\")\n\n    assert kernel_spec_cache.cache_misses == (1 if kernel_spec_cache.cache_enabled else 0)\n\n\nasync def tests_get_missing(kernel_spec_cache):\n    with pytest.raises(NoSuchKernel):\n        await kernel_spec_cache.get_kernel_spec(\"missing\")\n\n    assert kernel_spec_cache.cache_misses == (1 if kernel_spec_cache.cache_enabled else 0)\n"
  },
  {
    "path": "enterprise_gateway/tests/test_mixins.py",
    "content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Tests for handler mixins.\"\"\"\n\nimport json\nimport unittest\n\ntry:\n    from unittest.mock import Mock\nexcept ImportError:\n    # Python 2.7: use backport\n    from unittest.mock import Mock\n\nfrom tornado import web\n\nfrom enterprise_gateway.mixins import JSONErrorsMixin, TokenAuthorizationMixin\n\n\nclass SuperTokenAuthHandler:\n    \"\"\"Super class for the handler using TokenAuthorizationMixin.\"\"\"\n\n    is_prepared = False\n\n    def prepare(self):\n        # called by the mixin when authentication succeeds\n        self.is_prepared = True\n\n\nclass TestableTokenAuthHandler(TokenAuthorizationMixin, SuperTokenAuthHandler):\n    \"\"\"Implementation that uses the TokenAuthorizationMixin for testing.\"\"\"\n\n    __test__ = False\n\n    def __init__(self, token=\"\"):\n        self.settings = {\"eg_auth_token\": token}\n        self.arguments = {}\n        self.response = None\n        self.status_code = None\n\n    def send_error(self, status_code):\n        self.status_code = status_code\n\n    def get_argument(self, name, default=\"\"):\n        return self.arguments.get(name, default)\n\n\nclass TestTokenAuthMixin(unittest.TestCase):\n    \"\"\"Unit tests the Token authorization mixin.\"\"\"\n\n    def setUp(self):\n        \"\"\"Creates a handler that uses the mixin.\"\"\"\n        self.mixin = TestableTokenAuthHandler(\"YouKnowMe\")\n\n    def test_no_token_required(self):\n        \"\"\"No token required - status should be None.\"\"\"\n        self.mixin.settings[\"eg_auth_token\"] = \"\"\n        self.mixin.prepare()\n        self.assertEqual(self.mixin.is_prepared, True)\n        self.assertEqual(self.mixin.status_code, None)\n\n    def test_missing_token(self):\n        \"\"\"Missing token - tatus should be 'unauthorized'.\"\"\"\n        attrs = {\"headers\": {}}\n        self.mixin.request = Mock(**attrs)\n        self.mixin.prepare()\n        self.assertEqual(self.mixin.is_prepared, False)\n        self.assertEqual(self.mixin.status_code, 401)\n\n    def test_valid_header_token(self):\n        \"\"\"Valid header token - status should be None.\"\"\"\n        attrs = {\"headers\": {\"Authorization\": \"token YouKnowMe\"}}\n        self.mixin.request = Mock(**attrs)\n        self.mixin.prepare()\n        self.assertEqual(self.mixin.is_prepared, True)\n        self.assertEqual(self.mixin.status_code, None)\n\n    def test_wrong_header_token(self):\n        \"\"\"Wrong header token - status should be 'unauthorized'.\"\"\"\n        attrs = {\"headers\": {\"Authorization\": \"token NeverHeardOf\"}}\n        self.mixin.request = Mock(**attrs)\n        self.mixin.prepare()\n        self.assertEqual(self.mixin.is_prepared, False)\n        self.assertEqual(self.mixin.status_code, 401)\n\n    def test_valid_url_token(self):\n        \"\"\"Valid url token - status should be None.\"\"\"\n        self.mixin.arguments[\"token\"] = \"YouKnowMe\"\n        attrs = {\"headers\": {}}\n        self.mixin.request = Mock(**attrs)\n        self.mixin.prepare()\n        self.assertEqual(self.mixin.is_prepared, True)\n        self.assertEqual(self.mixin.status_code, None)\n\n    def test_wrong_url_token(self):\n        \"\"\"Wrong url token - tatus should be 'unauthorized'.\"\"\"\n        self.mixin.arguments[\"token\"] = \"NeverHeardOf\"\n        attrs = {\"headers\": {}}\n        self.mixin.request = Mock(**attrs)\n        self.mixin.prepare()\n        self.assertEqual(self.mixin.is_prepared, False)\n        self.assertEqual(self.mixin.status_code, 401)\n\n    def test_differing_tokens_valid_url(self):\n        \"\"\"Differing tokens - status should be None, URL token takes precedence\"\"\"\n        self.mixin.arguments[\"token\"] = \"YouKnowMe\"\n        attrs = {\"headers\": {\"Authorization\": \"token NeverHeardOf\"}}\n        self.mixin.request = Mock(**attrs)\n        self.mixin.prepare()\n        self.assertEqual(self.mixin.is_prepared, True)\n        self.assertEqual(self.mixin.status_code, None)\n\n    def test_differing_tokens_wrong_url(self):\n        \"\"\"Differing token w/ wrong url - status should be 'unauthorized', URL token takes precedence\"\"\"\n        attrs = {\"headers\": {\"Authorization\": \"token YouKnowMe\"}}\n        self.mixin.request = Mock(**attrs)\n        self.mixin.arguments[\"token\"] = \"NeverHeardOf\"\n        self.mixin.prepare()\n        self.assertEqual(self.mixin.is_prepared, False)\n        self.assertEqual(self.mixin.status_code, 401)\n\n\nclass TestableJSONErrorsHandler(JSONErrorsMixin):\n    \"\"\"Implementation that uses the JSONErrorsMixin for testing.\"\"\"\n\n    __test__ = False\n\n    def __init__(self):\n        self.headers = {}\n        self.response = None\n        self.status_code = None\n        self.reason = None\n\n    def finish(self, response):\n        self.response = response\n\n    def set_status(self, status_code, reason=None):\n        self.status_code = status_code\n        self.reason = reason\n\n    def set_header(self, name, value):\n        self.headers[name] = value\n\n\nclass TestJSONErrorsMixin(unittest.TestCase):\n    \"\"\"Unit tests the JSON errors mixin.\"\"\"\n\n    def setUp(self):\n        \"\"\"Creates a handler that uses the mixin.\"\"\"\n        self.mixin = TestableJSONErrorsHandler()\n\n    def test_status(self):\n        \"\"\"Status should be set on the response.\"\"\"\n        self.mixin.write_error(404)\n        response = json.loads(self.mixin.response)\n        self.assertEqual(self.mixin.status_code, 404)\n        self.assertEqual(response[\"reason\"], \"Not Found\")\n        self.assertEqual(response[\"message\"], \"\")\n\n    def test_custom_status(self):\n        \"\"\"Custom reason from exeception should be set in the response.\"\"\"\n        exc = web.HTTPError(500, reason=\"fake-reason\")\n        self.mixin.write_error(500, exc_info=[None, exc])\n\n        response = json.loads(self.mixin.response)\n        self.assertEqual(self.mixin.status_code, 500)\n        self.assertEqual(response[\"reason\"], \"fake-reason\")\n        self.assertEqual(response[\"message\"], \"\")\n\n    def test_log_message(self):\n        \"\"\"Custom message from exeception should be set in the response.\"\"\"\n        exc = web.HTTPError(410, log_message=\"fake-message\")\n        self.mixin.write_error(410, exc_info=[None, exc])\n\n        response = json.loads(self.mixin.response)\n        self.assertEqual(self.mixin.status_code, 410)\n        self.assertEqual(response[\"reason\"], \"Gone\")\n        self.assertEqual(response[\"message\"], \"fake-message\")\n"
  },
  {
    "path": "enterprise_gateway/tests/test_process_proxy.py",
    "content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Tests for process proxy functionality.\"\"\"\n\nimport os\nimport unittest\nfrom unittest.mock import Mock, patch\n\nfrom tornado import web\n\nfrom enterprise_gateway.services.processproxies.container import _parse_prohibited_ids\n\n# Mock Kubernetes configuration before importing the module\nwith patch('kubernetes.config.load_incluster_config'), patch('kubernetes.config.load_kube_config'):\n    from enterprise_gateway.services.processproxies.k8s import KubernetesProcessProxy\n\n\nclass TestParseProhibitedIds(unittest.TestCase):\n    \"\"\"Test parsing of prohibited UID/GID environment variables.\"\"\"\n\n    def test_default_value(self):\n        with patch.dict(os.environ, {}, clear=False):\n            os.environ.pop(\"TEST_IDS\", None)\n            result = _parse_prohibited_ids(\"TEST_IDS\", \"0\")\n        self.assertEqual(result, [0])\n\n    def test_multiple_values(self):\n        with patch.dict(os.environ, {\"TEST_IDS\": \"0,1000\"}):\n            result = _parse_prohibited_ids(\"TEST_IDS\", \"0\")\n        self.assertEqual(result, [0, 1000])\n\n    def test_values_with_spaces(self):\n        with patch.dict(os.environ, {\"TEST_IDS\": \"0, 1000, 65534\"}):\n            result = _parse_prohibited_ids(\"TEST_IDS\", \"0\")\n        self.assertEqual(result, [0, 1000, 65534])\n\n    def test_invalid_entries_raise_value_error(self):\n        with patch.dict(os.environ, {\"TEST_IDS\": \"0,abc,1000\"}):\n            with self.assertRaises(ValueError) as ctx:\n                _parse_prohibited_ids(\"TEST_IDS\", \"0\")\n            self.assertIn(\"abc\", str(ctx.exception))\n            self.assertIn(\"TEST_IDS\", str(ctx.exception))\n\n    def test_username_instead_of_uid_raises_value_error(self):\n        with patch.dict(os.environ, {\"TEST_IDS\": \"root\"}):\n            with self.assertRaises(ValueError) as ctx:\n                _parse_prohibited_ids(\"TEST_IDS\", \"0\")\n            self.assertIn(\"root\", str(ctx.exception))\n\n    def test_empty_entries_ignored(self):\n        with patch.dict(os.environ, {\"TEST_IDS\": \"0,,1000\"}):\n            result = _parse_prohibited_ids(\"TEST_IDS\", \"0\")\n        self.assertEqual(result, [0, 1000])\n\n\nclass TestContainerProxyProhibitedIds(unittest.TestCase):\n    \"\"\"Test UID/GID validation in ContainerProcessProxy.\"\"\"\n\n    def setUp(self):\n        self.mock_kernel_manager = Mock()\n        self.mock_kernel_manager.get_kernel_username.return_value = \"testuser\"\n        self.mock_kernel_manager.port_range = \"0..0\"\n        self.proxy_config = {\"kernel_id\": \"test-kernel-id\", \"kernel_name\": \"python3\"}\n        with patch(\n            'enterprise_gateway.services.processproxies.k8s.KernelSessionManager'\n        ) as mock_session_manager, patch(\n            'enterprise_gateway.services.processproxies.processproxy.ResponseManager'\n        ):\n            mock_session_manager.get_kernel_username.return_value = \"testuser\"\n            self.proxy = KubernetesProcessProxy(self.mock_kernel_manager, self.proxy_config)\n\n    def _make_kwargs(self, uid=None, gid=None):\n        env = {}\n        if uid is not None:\n            env[\"KERNEL_UID\"] = uid\n        if gid is not None:\n            env[\"KERNEL_GID\"] = gid\n        return {\"env\": env}\n\n    def test_valid_uid_gid_passes(self):\n        kwargs = self._make_kwargs(uid=\"1000\", gid=\"100\")\n        self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(kwargs[\"env\"][\"KERNEL_UID\"], \"1000\")\n        self.assertEqual(kwargs[\"env\"][\"KERNEL_GID\"], \"100\")\n\n    def test_defaults_used_when_not_provided(self):\n        kwargs = self._make_kwargs()\n        self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(kwargs[\"env\"][\"KERNEL_UID\"], \"1000\")\n        self.assertEqual(kwargs[\"env\"][\"KERNEL_GID\"], \"100\")\n\n    def test_prohibited_uid_exact_match(self):\n        kwargs = self._make_kwargs(uid=\"0\", gid=\"100\")\n        with self.assertRaises(web.HTTPError) as ctx:\n            self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(ctx.exception.status_code, 403)\n\n    def test_prohibited_gid_exact_match(self):\n        kwargs = self._make_kwargs(uid=\"1000\", gid=\"0\")\n        with self.assertRaises(web.HTTPError) as ctx:\n            self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(ctx.exception.status_code, 403)\n\n    def test_trailing_whitespace_uid_denied(self):\n        kwargs = self._make_kwargs(uid=\"0 \", gid=\"100\")\n        with self.assertRaises(web.HTTPError) as ctx:\n            self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(ctx.exception.status_code, 403)\n\n    def test_leading_whitespace_uid_denied(self):\n        kwargs = self._make_kwargs(uid=\" 0\", gid=\"100\")\n        with self.assertRaises(web.HTTPError) as ctx:\n            self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(ctx.exception.status_code, 403)\n\n    def test_leading_zeros_uid_denied(self):\n        kwargs = self._make_kwargs(uid=\"00\", gid=\"100\")\n        with self.assertRaises(web.HTTPError) as ctx:\n            self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(ctx.exception.status_code, 403)\n\n    def test_plus_sign_uid_denied(self):\n        kwargs = self._make_kwargs(uid=\"+0\", gid=\"100\")\n        with self.assertRaises(web.HTTPError) as ctx:\n            self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(ctx.exception.status_code, 403)\n\n    def test_non_numeric_uid_rejected(self):\n        kwargs = self._make_kwargs(uid=\"abc\", gid=\"100\")\n        with self.assertRaises(web.HTTPError) as ctx:\n            self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(ctx.exception.status_code, 403)\n\n    def test_empty_uid_rejected(self):\n        kwargs = self._make_kwargs(uid=\"\", gid=\"100\")\n        with self.assertRaises(web.HTTPError) as ctx:\n            self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(ctx.exception.status_code, 403)\n\n    def test_negative_uid_rejected(self):\n        kwargs = self._make_kwargs(uid=\"-1\", gid=\"100\")\n        with self.assertRaises(web.HTTPError) as ctx:\n            self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(ctx.exception.status_code, 403)\n        self.assertIn(\"must be in range\", ctx.exception.reason)\n\n    def test_negative_gid_rejected(self):\n        kwargs = self._make_kwargs(uid=\"1000\", gid=\"-1\")\n        with self.assertRaises(web.HTTPError) as ctx:\n            self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(ctx.exception.status_code, 403)\n        self.assertIn(\"must be in range\", ctx.exception.reason)\n\n    def test_uid_exceeding_uint32_max_rejected(self):\n        kwargs = self._make_kwargs(uid=\"4294967296\", gid=\"100\")\n        with self.assertRaises(web.HTTPError) as ctx:\n            self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(ctx.exception.status_code, 403)\n        self.assertIn(\"must be in range\", ctx.exception.reason)\n\n    def test_gid_exceeding_uint32_max_rejected(self):\n        kwargs = self._make_kwargs(uid=\"1000\", gid=\"4294967296\")\n        with self.assertRaises(web.HTTPError) as ctx:\n            self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(ctx.exception.status_code, 403)\n        self.assertIn(\"must be in range\", ctx.exception.reason)\n\n    def test_uid_at_uint32_max_allowed(self):\n        kwargs = self._make_kwargs(uid=\"4294967295\", gid=\"100\")\n        self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(kwargs[\"env\"][\"KERNEL_UID\"], \"4294967295\")\n\n    def test_normalized_values_stored(self):\n        kwargs = self._make_kwargs(uid=\" 1000 \", gid=\" 100 \")\n        self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(kwargs[\"env\"][\"KERNEL_UID\"], \"1000\")\n        self.assertEqual(kwargs[\"env\"][\"KERNEL_GID\"], \"100\")\n\n    def test_both_uid_and_gid_checked_independently(self):\n        kwargs = self._make_kwargs(uid=\"1000\", gid=\"0\")\n        with self.assertRaises(web.HTTPError) as ctx:\n            self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(ctx.exception.status_code, 403)\n        self.assertIn(\"GID\", ctx.exception.reason)\n\n    def test_trailing_whitespace_gid_denied(self):\n        kwargs = self._make_kwargs(uid=\"1000\", gid=\"0 \")\n        with self.assertRaises(web.HTTPError) as ctx:\n            self.proxy._enforce_prohibited_ids(**kwargs)\n        self.assertEqual(ctx.exception.status_code, 403)\n\n\nclass TestKubernetesProcessProxy(unittest.TestCase):\n    \"\"\"Test secure template substitution in Kubernetes process proxy.\"\"\"\n\n    def setUp(self):\n        \"\"\"Set up test fixtures.\"\"\"\n        self.mock_kernel_manager = Mock()\n        self.mock_kernel_manager.get_kernel_username.return_value = \"testuser\"\n        self.mock_kernel_manager.port_range = \"0..0\"  # Mock port range\n\n        # Mock proxy config\n        self.proxy_config = {\"kernel_id\": \"test-kernel-id\", \"kernel_name\": \"python3\"}\n        with patch(\n            'enterprise_gateway.services.processproxies.k8s.KernelSessionManager'\n        ) as mock_session_manager, patch(\n            'enterprise_gateway.services.processproxies.processproxy.ResponseManager'\n        ):\n            mock_session_manager.get_kernel_username.return_value = \"testuser\"\n            self.proxy = KubernetesProcessProxy(self.mock_kernel_manager, self.proxy_config)\n            self.proxy.kernel_id = \"test-kernel-id\"\n\n    def test_valid_template_substitution(self):\n        \"\"\"Test valid template variable substitution.\"\"\"\n        test_cases = [\n            # Basic variable substitution\n            (\"{{ kernel_id }}\", {\"kernel_id\": \"test-123\"}, \"test-123\"),\n            # Multiple variables\n            (\n                \"{{ kernel_namespace }}-{{ kernel_id }}\",\n                {\"kernel_namespace\": \"default\", \"kernel_id\": \"test-123\"},\n                \"default-test-123\",\n            ),\n            # Variables with underscores\n            (\"{{ kernel_image_pull_policy }}\", {\"kernel_image_pull_policy\": \"Always\"}, \"Always\"),\n            # Whitespace handling\n            (\"{{   kernel_id   }}\", {\"kernel_id\": \"test-123\"}, \"test-123\"),\n        ]\n\n        for template, variables, expected in test_cases:\n            with self.subTest(template=template):\n                result = self.proxy._safe_template_substitute(template, variables)\n                self.assertEqual(result, expected)\n\n    def test_missing_variables_fallback(self):\n        # Test the full pod name determination process\n        kwargs = {\n            \"env\": {\n                \"KERNEL_POD_NAME\": \"{{ missing_var }}\",\n                \"KERNEL_NAMESPACE\": \"production\",\n            }\n        }\n\n        with patch.object(self.proxy, 'log'), patch(\n            'enterprise_gateway.services.processproxies.k8s.KernelSessionManager'\n        ) as mock_session_manager:\n            mock_session_manager.get_kernel_username.return_value = \"testuser\"\n            result = self.proxy._determine_kernel_pod_name(**kwargs)\n            # Should fall back to default naming: kernel_username + \"-\" + kernel_id\n            self.assertEqual(result, \"testuser-test-kernel-id\")\n\n    def test_malicious_template_injection_prevention(self):\n        \"\"\"Test prevention of malicious template injection attacks.\"\"\"\n        malicious_templates = [\n            # Python code execution attempts\n            \"{{ ''.__class__.__mro__[1].__subclasses__()[104].__init__.__globals__['sys'].exit() }}\",\n            \"{{ __import__('os').system('rm -rf /') }}\",\n            \"{{ exec('print(\\\"pwned\\\")') }}\",\n            \"{{ eval('1+1') }}\",\n            # Attribute access attempts\n            \"{{ kernel_id.__class__ }}\",\n            \"{{ kernel_id.__dict__ }}\",\n            \"{{ kernel_id.__globals__ }}\",\n            # Function calls\n            \"{{ range(10) }}\",\n            \"{{ len(kernel_id) }}\",\n            \"{{ str.upper(kernel_id) }}\",\n            # Jinja2 filters and expressions\n            \"{{ kernel_id|upper }}\",\n            \"{{ kernel_id + '_suffix' }}\",\n            \"{{ 1 + 1 }}\",\n            # Complex expressions\n            \"{{ kernel_id if kernel_id else 'default' }}\",\n            \"{{ kernel_id[:5] }}\",\n        ]\n\n        variables = {\"kernel_id\": \"test-123\"}\n\n        for malicious_template in malicious_templates:\n            with self.subTest(template=malicious_template), patch.object(\n                self.proxy, 'log'\n            ) as mock_log:\n                result = self.proxy._safe_template_substitute(malicious_template, variables)\n                # All malicious templates should be treated as invalid and return None\n                self.assertIsNone(result)\n                mock_log.warning.assert_called_once()\n                # Should warn about unsupported expressions\n                self.assertIn(\"Invalid template syntax\", mock_log.warning.call_args[0][0])\n\n    def test_pod_name_determination_with_templates(self):\n        \"\"\"Test complete pod name determination with template processing.\"\"\"\n        kwargs = {\n            \"env\": {\n                \"KERNEL_POD_NAME\": \"{{ kernel_namespace }}-{{ kernel_id }}\",\n                \"KERNEL_NAMESPACE\": \"production\",\n                \"KERNEL_IMAGE\": \"python:3.11\",\n            }\n        }\n\n        with patch.object(self.proxy, 'log'):\n            result = self.proxy._determine_kernel_pod_name(**kwargs)\n            # Should get processed and DNS-normalized\n            self.assertEqual(result, \"production-test-kernel-id\")\n\n    def test_pod_name_determination_with_malicious_template(self):\n        \"\"\"Test pod name determination with malicious template falls back to default.\"\"\"\n        kwargs = {\n            \"env\": {\n                \"KERNEL_POD_NAME\": \"{{ __import__('os').system('evil') }}\",\n                \"KERNEL_NAMESPACE\": \"production\",\n            }\n        }\n\n        with patch.object(self.proxy, 'log'), patch(\n            'enterprise_gateway.services.processproxies.k8s.KernelSessionManager'\n        ) as mock_session_manager:\n            mock_session_manager.get_kernel_username.return_value = \"testuser\"\n            result = self.proxy._determine_kernel_pod_name(**kwargs)\n            # Should fall back to default naming\n            self.assertEqual(result, \"testuser-test-kernel-id\")\n\n    def test_pod_name_determination_with_missing_variables(self):\n        \"\"\"Test pod name determination with missing variables falls back to default.\"\"\"\n        kwargs = {\n            \"env\": {\n                \"KERNEL_POD_NAME\": \"{{ missing_var }}-{{ kernel_id }}\",\n                \"KERNEL_NAMESPACE\": \"production\",\n            }\n        }\n\n        with patch.object(self.proxy, 'log'), patch(\n            'enterprise_gateway.services.processproxies.k8s.KernelSessionManager'\n        ) as mock_session_manager:\n            mock_session_manager.get_kernel_username.return_value = \"testuser\"\n            result = self.proxy._determine_kernel_pod_name(**kwargs)\n            # Should fall back to default naming\n            self.assertEqual(result, \"testuser-test-kernel-id\")\n\n    def test_pod_name_without_template(self):\n        \"\"\"Test pod name determination without template syntax.\"\"\"\n        kwargs = {\"env\": {\"KERNEL_POD_NAME\": \"static-pod-name\", \"KERNEL_NAMESPACE\": \"production\"}}\n\n        with patch.object(self.proxy, 'log'):\n            result = self.proxy._determine_kernel_pod_name(**kwargs)\n            # Should use as-is and DNS-normalize\n            self.assertEqual(result, \"static-pod-name\")\n\n    def test_pod_name_dns_normalization(self):\n        \"\"\"Test DNS name normalization of pod names.\"\"\"\n        kwargs = {\n            \"env\": {\n                \"KERNEL_POD_NAME\": \"{{ kernel_namespace }}_{{ kernel_id }}\",\n                \"KERNEL_NAMESPACE\": \"Test-Namespace\",\n                \"KERNEL_IMAGE\": \"python:3.11\",\n            }\n        }\n\n        with patch.object(self.proxy, 'log'):\n            result = self.proxy._determine_kernel_pod_name(**kwargs)\n            # Should be DNS-normalized (lowercase, dashes only)\n            self.assertEqual(result, \"test-namespace-test-kernel-id\")\n\n    def test_regex_pattern_validation(self):\n        \"\"\"Test that only valid variable names are matched by regex.\"\"\"\n        valid_vars = [\n            \"kernel_id\",\n            \"kernel_namespace\",\n            \"kernel_image_pull_policy\",\n            \"a\",\n            \"var123\",\n            \"KERNEL_ID\",\n        ]\n\n        # Variables that should be blocked by the regex pattern\n        invalid_vars = [\n            \"123invalid\",  # starts with number\n            \"invalid-var\",  # contains dash\n            \"invalid.var\",  # contains dot\n            \"invalid var\",  # contains space\n            \"invalid@var\",  # contains special char\n            \"_private_var\",  # starts with underscore (security risk)\n            \"__class__\",  # magic method (security risk)\n            \"__dict__\",  # magic method (security risk)\n            \"__globals__\",  # magic method (security risk)\n        ]\n\n        variables = {var: \"value\" for var in valid_vars}\n        # Also add underscore variables to test they're not substituted even if present\n        variables.update(\n            {\"_private_var\": \"private\", \"__class__\": \"dangerous\", \"__dict__\": \"dangerous\"}\n        )\n\n        # Valid variables should be substituted\n        for var in valid_vars:\n            template = f\"{{{{ {var} }}}}\"\n            result = self.proxy._safe_template_substitute(template, variables)\n            self.assertEqual(result, \"value\", f\"Valid variable {var} should be substituted\")\n\n        # Invalid variables should be treated as having invalid syntax\n        for var in invalid_vars:\n            template = f\"{{{{ {var} }}}}\"\n            with patch.object(self.proxy, 'log') as mock_log:\n                result = self.proxy._safe_template_substitute(template, variables)\n                self.assertIsNone(result, f\"Invalid variable {var} should be rejected\")\n                mock_log.warning.assert_called_once()\n                # Should warn about unsupported expressions since invalid var names don't match regex\n                self.assertIn(\"Invalid template syntax\", mock_log.warning.call_args[0][0])\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "enterprise_gateway/tests/test_yaml_injection.py",
    "content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Tests for YAML injection vulnerability fix (GHSA-cfw7-6c5v-2wjq).\"\"\"\n\nimport os\nimport unittest\n\nimport yaml\nfrom jinja2 import Environment, FileSystemLoader, select_autoescape\n\nTEMPLATE_DIR = os.path.join(\n    os.path.dirname(__file__),\n    \"..\",\n    \"..\",\n    \"etc\",\n    \"kernel-launchers\",\n    \"kubernetes\",\n    \"scripts\",\n)\n\nOPERATOR_TEMPLATE_DIR = os.path.join(\n    os.path.dirname(__file__),\n    \"..\",\n    \"..\",\n    \"etc\",\n    \"kernel-launchers\",\n    \"operators\",\n    \"scripts\",\n)\n\nYAML_PARSED_KERNEL_VARS = {\"KERNEL_VOLUME_MOUNTS\", \"KERNEL_VOLUMES\"}\n\nALLOWED_K8S_KINDS = {\n    \"Pod\",\n    \"Secret\",\n    \"PersistentVolumeClaim\",\n    \"PersistentVolume\",\n    \"Service\",\n    \"ConfigMap\",\n}\n\n\ndef yaml_safe_str(value):\n    \"\"\"Escape a value for safe inclusion in a YAML template.\"\"\"\n    if isinstance(value, str):\n        return yaml.dump(value, default_style='\"', width=10000).strip()\n    if isinstance(value, (dict, list)):\n        return yaml.dump(value, default_flow_style=True, width=10000).strip()\n    # yaml.dump appends a document-end marker (\"...\\n\") for scalars; strip it\n    return yaml.dump(value, width=10000).replace(\"\\n...\", \"\").strip()\n\n\ndef _build_keywords(env_overrides: dict) -> dict:\n    \"\"\"Build a keywords dict from env_overrides using the fixed parsing logic.\"\"\"\n    keywords = {}\n    for name, value in env_overrides.items():\n        if name.startswith(\"KERNEL_\"):\n            if name in YAML_PARSED_KERNEL_VARS:\n                parsed = yaml.safe_load(value)\n                if isinstance(parsed, list) and all(isinstance(item, dict) for item in parsed):\n                    keywords[name.lower()] = parsed\n            else:\n                keywords[name.lower()] = value\n    return keywords\n\n\ndef _render_pod_template(keywords: dict) -> str:\n    \"\"\"Render the kernel-pod.yaml.j2 template with the yaml_safe filter.\"\"\"\n    j_env = Environment(\n        loader=FileSystemLoader(os.path.normpath(TEMPLATE_DIR)),\n        trim_blocks=True,\n        lstrip_blocks=True,\n        autoescape=select_autoescape(\n            disabled_extensions=(\"j2\", \"yaml\"),\n            default_for_string=True,\n            default=True,\n        ),\n    )\n    j_env.filters[\"yaml_safe\"] = yaml_safe_str\n    return j_env.get_template(\"/kernel-pod.yaml.j2\").render(**keywords)\n\n\ndef _base_env() -> dict:\n    return {\n        \"KERNEL_POD_NAME\": \"test-pod\",\n        \"KERNEL_NAMESPACE\": \"default\",\n        \"KERNEL_ID\": \"aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee\",\n        \"KERNEL_IMAGE\": \"elyra/kernel-py:3.2.3\",\n        \"KERNEL_SERVICE_ACCOUNT_NAME\": \"default\",\n        \"KERNEL_UID\": \"1000\",\n        \"KERNEL_GID\": \"100\",\n    }\n\n\nclass TestYamlSafeStrFilter(unittest.TestCase):\n    \"\"\"Test the yaml_safe_str Jinja2 filter.\"\"\"\n\n    def test_normal_string(self):\n        result = yaml_safe_str(\"/home/jovyan\")\n        self.assertEqual(result, '\"/home/jovyan\"')\n\n    def test_string_with_quotes(self):\n        result = yaml_safe_str('hello \"world\"')\n        self.assertIn(\"hello\", result)\n        parsed = yaml.safe_load(f\"key: {result}\")\n        self.assertEqual(parsed[\"key\"], 'hello \"world\"')\n\n    def test_string_with_newlines_escaped(self):\n        result = yaml_safe_str(\"line1\\nline2\\nline3\")\n        self.assertNotIn(\"\\n\", result.strip('\"'))\n        parsed = yaml.safe_load(f\"key: {result}\")\n        self.assertEqual(parsed[\"key\"], \"line1\\nline2\\nline3\")\n\n    def test_document_boundary_escaped(self):\n        result = yaml_safe_str(\"before\\n---\\nafter\")\n        parsed_docs = list(yaml.safe_load_all(f\"key: {result}\"))\n        self.assertEqual(len(parsed_docs), 1)\n        self.assertEqual(parsed_docs[0][\"key\"], \"before\\n---\\nafter\")\n\n    def test_end_of_document_marker_escaped(self):\n        result = yaml_safe_str(\"before\\n...\\nafter\")\n        parsed = yaml.safe_load(f\"key: {result}\")\n        self.assertIn(\"...\", parsed[\"key\"])\n\n    def test_none_serialized_as_yaml_null(self):\n        result = yaml_safe_str(None)\n        self.assertEqual(result, \"null\")\n        parsed = yaml.safe_load(f\"key: {result}\")\n        self.assertIsNone(parsed[\"key\"])\n\n    def test_bool_serialized_as_yaml_bool(self):\n        self.assertEqual(yaml_safe_str(True), \"true\")\n        self.assertEqual(yaml_safe_str(False), \"false\")\n        parsed_true = yaml.safe_load(f\"key: {yaml_safe_str(True)}\")\n        parsed_false = yaml.safe_load(f\"key: {yaml_safe_str(False)}\")\n        self.assertIs(parsed_true[\"key\"], True)\n        self.assertIs(parsed_false[\"key\"], False)\n\n    def test_numeric_serialized_correctly(self):\n        self.assertEqual(yaml_safe_str(1000), \"1000\")\n        self.assertEqual(yaml_safe_str(3.14), \"3.14\")\n        parsed_int = yaml.safe_load(f\"key: {yaml_safe_str(1000)}\")\n        parsed_float = yaml.safe_load(f\"key: {yaml_safe_str(3.14)}\")\n        self.assertEqual(parsed_int[\"key\"], 1000)\n        self.assertAlmostEqual(parsed_float[\"key\"], 3.14)\n\n    def test_dict_rendered_as_flow_mapping(self):\n        result = yaml_safe_str({\"name\": \"data\", \"mountPath\": \"/data\"})\n        parsed = yaml.safe_load(f\"- {result}\")\n        self.assertEqual(parsed[0][\"name\"], \"data\")\n        self.assertEqual(parsed[0][\"mountPath\"], \"/data\")\n\n    def test_empty_string(self):\n        result = yaml_safe_str(\"\")\n        parsed = yaml.safe_load(f\"key: {result}\")\n        self.assertEqual(parsed[\"key\"], \"\")\n\n    def test_image_name_with_tag(self):\n        result = yaml_safe_str(\"registry.example.com/org/image:v1.2.3\")\n        parsed = yaml.safe_load(f\"key: {result}\")\n        self.assertEqual(parsed[\"key\"], \"registry.example.com/org/image:v1.2.3\")\n\n\nclass TestEnvVarParsing(unittest.TestCase):\n    \"\"\"Test that env var parsing correctly distinguishes scalar vs structured vars.\"\"\"\n\n    def test_scalar_vars_remain_strings(self):\n        env = {\"KERNEL_IMAGE\": \"nginx:latest\", \"KERNEL_UID\": \"1000\"}\n        keywords = _build_keywords(env)\n        self.assertEqual(keywords[\"kernel_image\"], \"nginx:latest\")\n        self.assertIsInstance(keywords[\"kernel_image\"], str)\n        self.assertEqual(keywords[\"kernel_uid\"], \"1000\")\n        self.assertIsInstance(keywords[\"kernel_uid\"], str)\n\n    def test_volume_mounts_parsed_as_list(self):\n        env = {\n            \"KERNEL_VOLUME_MOUNTS\": '[{\"name\": \"data\", \"mountPath\": \"/data\"}]',\n        }\n        keywords = _build_keywords(env)\n        self.assertIsInstance(keywords[\"kernel_volume_mounts\"], list)\n        self.assertEqual(keywords[\"kernel_volume_mounts\"][0][\"name\"], \"data\")\n\n    def test_volumes_parsed_as_list(self):\n        env = {\n            \"KERNEL_VOLUMES\": '[{\"name\": \"data\", \"emptyDir\": {}}]',\n        }\n        keywords = _build_keywords(env)\n        self.assertIsInstance(keywords[\"kernel_volumes\"], list)\n\n    def test_non_list_volume_rejected(self):\n        env = {\"KERNEL_VOLUME_MOUNTS\": \"not-a-list\"}\n        keywords = _build_keywords(env)\n        self.assertNotIn(\"kernel_volume_mounts\", keywords)\n\n    def test_list_of_strings_volume_rejected(self):\n        \"\"\"List of strings (not dicts) should be rejected to prevent injection via loop items.\"\"\"\n        env = {\"KERNEL_VOLUME_MOUNTS\": '[\"name: data\\\\nmountPath: /data\"]'}\n        keywords = _build_keywords(env)\n        self.assertNotIn(\"kernel_volume_mounts\", keywords)\n\n    def test_mixed_list_volume_rejected(self):\n        \"\"\"List containing both dicts and strings should be rejected.\"\"\"\n        env = {\"KERNEL_VOLUME_MOUNTS\": '[{\"name\": \"ok\"}, \"injected\\\\nstring\"]'}\n        keywords = _build_keywords(env)\n        self.assertNotIn(\"kernel_volume_mounts\", keywords)\n\n    def test_yaml_safe_load_not_applied_to_scalars(self):\n        env = {\"KERNEL_WORKING_DIR\": '\"injected\\\\nvalue\"'}\n        keywords = _build_keywords(env)\n        self.assertEqual(keywords[\"kernel_working_dir\"], '\"injected\\\\nvalue\"')\n        self.assertNotIn(\"\\n\", keywords[\"kernel_working_dir\"])\n\n\nclass TestSecurityContextInjection(unittest.TestCase):\n    \"\"\"Test that securityContext injection via KERNEL_WORKING_DIR is blocked.\"\"\"\n\n    def test_security_context_not_overridden(self):\n        env = _base_env()\n        env[\"KERNEL_WORKING_DIR\"] = (\n            '\"/tmp\\\\\"\\\\n\\\\nsecurityContext:\\\\n  runAsUser: 0\\\\n  runAsGroup: 0\\\\n  fsGroup: 100\\\\n\"'\n        )\n        keywords = _build_keywords(env)\n        rendered = _render_pod_template(keywords)\n        docs = list(yaml.safe_load_all(rendered))\n\n        self.assertEqual(len(docs), 1)\n        sc = docs[0][\"spec\"][\"securityContext\"]\n        self.assertEqual(sc[\"runAsUser\"], 1000)\n        self.assertEqual(sc[\"runAsGroup\"], 100)\n\n    def test_injection_via_kernel_image(self):\n        env = _base_env()\n        env[\"KERNEL_IMAGE\"] = 'nginx\"\\nsecurityContext:\\n  runAsUser: 0'\n        keywords = _build_keywords(env)\n        rendered = _render_pod_template(keywords)\n        docs = list(yaml.safe_load_all(rendered))\n\n        self.assertEqual(len(docs), 1)\n        sc = docs[0][\"spec\"][\"securityContext\"]\n        self.assertEqual(sc[\"runAsUser\"], 1000)\n\n    def test_injection_via_kernel_namespace(self):\n        env = _base_env()\n        env[\"KERNEL_NAMESPACE\"] = 'default\"\\nsecurityContext:\\n  runAsUser: 0'\n        keywords = _build_keywords(env)\n        rendered = _render_pod_template(keywords)\n        docs = list(yaml.safe_load_all(rendered))\n\n        self.assertEqual(len(docs), 1)\n        sc = docs[0][\"spec\"][\"securityContext\"]\n        self.assertEqual(sc[\"runAsUser\"], 1000)\n\n    def test_injection_via_volume_mounts_string_list_blocked_at_l1(self):\n        \"\"\"L1: list-of-strings in KERNEL_VOLUME_MOUNTS is rejected during parsing.\"\"\"\n        env = _base_env()\n        env[\"KERNEL_VOLUME_MOUNTS\"] = (\n            '[\"{name: data, mountPath: /data}\\\\n  securityContext:\\\\n    runAsUser: 0\"]'\n        )\n        keywords = _build_keywords(env)\n        self.assertNotIn(\"kernel_volume_mounts\", keywords)\n\n    def test_injection_via_volume_mounts_blocked_at_l2(self):\n        \"\"\"L2: even if a string slips into volume_mounts, yaml_safe filter escapes it.\"\"\"\n        env = _base_env()\n        keywords = _build_keywords(env)\n        keywords[\"kernel_volume_mounts\"] = [\n            \"{name: data, mountPath: /data}\\n  securityContext:\\n    runAsUser: 0\"\n        ]\n        rendered = _render_pod_template(keywords)\n        docs = list(yaml.safe_load_all(rendered))\n\n        self.assertEqual(len(docs), 1)\n        sc = docs[0][\"spec\"][\"securityContext\"]\n        self.assertEqual(sc[\"runAsUser\"], 1000)\n        env[\"KERNEL_WORKING_DIR\"] = (\n            '/tmp\\n...\\n---\\napiVersion: v1\\nkind: Pod\\nmetadata:\\n'  # noqa: S108\n            '  name: injected-pod\\nspec:\\n  containers:\\n'\n            '  - name: evil\\n    image: nginx\\n    securityContext:\\n'\n            '      privileged: true\\n...\\n'\n        )\n        keywords = _build_keywords(env)\n        rendered = _render_pod_template(keywords)\n        docs = [d for d in yaml.safe_load_all(rendered) if d is not None]\n\n        self.assertEqual(len(docs), 1, \"Injected document should not create extra YAML documents\")\n        self.assertEqual(docs[0][\"kind\"], \"Pod\")\n        self.assertEqual(docs[0][\"metadata\"][\"name\"], \"test-pod\")\n\n    def test_all_rendered_kinds_are_allowed(self):\n        env = _base_env()\n        keywords = _build_keywords(env)\n        rendered = _render_pod_template(keywords)\n        docs = [d for d in yaml.safe_load_all(rendered) if d is not None]\n\n        for doc in docs:\n            self.assertIn(\n                doc.get(\"kind\"),\n                ALLOWED_K8S_KINDS,\n                f\"Unexpected kind: {doc.get('kind')}\",\n            )\n\n    def test_duplicate_pod_kind_detected(self):\n        \"\"\"L3: if an attacker somehow injected a second Pod, document count validation catches it.\"\"\"\n        multi_pod_yaml = (\n            \"apiVersion: v1\\nkind: Pod\\nmetadata:\\n  name: legit\\n\"\n            \"---\\n\"\n            \"apiVersion: v1\\nkind: Pod\\nmetadata:\\n  name: evil\\n\"\n        )\n        docs = list(yaml.safe_load_all(multi_pod_yaml))\n        kind_counts: dict[str, int] = {}\n        for doc in docs:\n            if doc:\n                kind = doc.get(\"kind\")\n                kind_counts[kind] = kind_counts.get(kind, 0) + 1\n\n        self.assertEqual(kind_counts.get(\"Pod\"), 2)\n        self.assertGreater(kind_counts[\"Pod\"], 1, \"Should detect duplicate Pod documents\")\n\n\nclass TestNormalOperation(unittest.TestCase):\n    \"\"\"Test that the fix preserves normal kernel launch functionality.\"\"\"\n\n    def test_basic_pod_renders_correctly(self):\n        env = _base_env()\n        keywords = _build_keywords(env)\n        rendered = _render_pod_template(keywords)\n        docs = list(yaml.safe_load_all(rendered))\n\n        self.assertEqual(len(docs), 1)\n        pod = docs[0]\n        self.assertEqual(pod[\"kind\"], \"Pod\")\n        self.assertEqual(pod[\"metadata\"][\"name\"], \"test-pod\")\n        self.assertEqual(pod[\"metadata\"][\"namespace\"], \"default\")\n        self.assertEqual(pod[\"spec\"][\"containers\"][0][\"image\"], \"elyra/kernel-py:3.2.3\")\n        self.assertEqual(pod[\"spec\"][\"serviceAccountName\"], \"default\")\n\n    def test_working_dir_set_correctly(self):\n        env = _base_env()\n        env[\"KERNEL_WORKING_DIR\"] = \"/home/jovyan/work\"\n        keywords = _build_keywords(env)\n        rendered = _render_pod_template(keywords)\n        pod = yaml.safe_load(rendered)\n\n        self.assertEqual(pod[\"spec\"][\"containers\"][0][\"workingDir\"], \"/home/jovyan/work\")\n\n    def test_resource_limits_rendered(self):\n        env = _base_env()\n        env[\"KERNEL_CPUS\"] = \"500m\"\n        env[\"KERNEL_MEMORY\"] = \"1Gi\"\n        env[\"KERNEL_CPUS_LIMIT\"] = \"1\"\n        env[\"KERNEL_MEMORY_LIMIT\"] = \"2Gi\"\n        keywords = _build_keywords(env)\n        rendered = _render_pod_template(keywords)\n        pod = yaml.safe_load(rendered)\n\n        resources = pod[\"spec\"][\"containers\"][0][\"resources\"]\n        self.assertEqual(resources[\"requests\"][\"cpu\"], \"500m\")\n        self.assertEqual(resources[\"requests\"][\"memory\"], \"1Gi\")\n        self.assertEqual(resources[\"limits\"][\"cpu\"], \"1\")\n        self.assertEqual(resources[\"limits\"][\"memory\"], \"2Gi\")\n\n    def test_security_context_with_uid_gid(self):\n        env = _base_env()\n        keywords = _build_keywords(env)\n        rendered = _render_pod_template(keywords)\n        pod = yaml.safe_load(rendered)\n\n        sc = pod[\"spec\"][\"securityContext\"]\n        self.assertEqual(sc[\"runAsUser\"], 1000)\n        self.assertEqual(sc[\"runAsGroup\"], 100)\n        self.assertEqual(sc[\"fsGroup\"], 100)\n\n    def test_volume_mounts_rendered(self):\n        env = _base_env()\n        env[\"KERNEL_VOLUME_MOUNTS\"] = '[{\"name\": \"data-vol\", \"mountPath\": \"/data\"}]'\n        env[\"KERNEL_VOLUMES\"] = '[{\"name\": \"data-vol\", \"emptyDir\": {}}]'\n        keywords = _build_keywords(env)\n        rendered = _render_pod_template(keywords)\n        pod = yaml.safe_load(rendered)\n\n        mounts = pod[\"spec\"][\"containers\"][0][\"volumeMounts\"]\n        self.assertEqual(len(mounts), 1)\n        self.assertEqual(mounts[0][\"name\"], \"data-vol\")\n\n        volumes = pod[\"spec\"][\"volumes\"]\n        self.assertEqual(len(volumes), 1)\n        self.assertEqual(volumes[0][\"name\"], \"data-vol\")\n\n\nclass TestSparkOperatorTemplate(unittest.TestCase):\n    \"\"\"Test that the Spark operator template is also protected.\"\"\"\n\n    def _render_operator_template(self, keywords: dict) -> str:\n        j_env = Environment(\n            loader=FileSystemLoader(os.path.normpath(OPERATOR_TEMPLATE_DIR)),\n            trim_blocks=True,\n            lstrip_blocks=True,\n            autoescape=select_autoescape(\n                disabled_extensions=(\"j2\", \"yaml\"),\n                default_for_string=True,\n                default=True,\n            ),\n        )\n        j_env.filters[\"yaml_safe\"] = yaml_safe_str\n        return j_env.get_template(\"/sparkoperator.k8s.io-v1beta2.yaml.j2\").render(**keywords)\n\n    def test_injection_via_kernel_image_blocked(self):\n        keywords = {\n            \"kernel_resource_name\": \"test-spark\",\n            \"kernel_image\": 'nginx\\nmalicious:\\n  key: value',\n            \"kernel_id\": \"test-id\",\n            \"spark_context_initialization_mode\": \"none\",\n            \"eg_response_address\": \"1.2.3.4:8080\",\n            \"eg_port_range\": \"0..0\",\n            \"eg_public_key\": \"testkey\",\n            \"kernel_service_account_name\": \"default\",\n            \"kernel_executor_image\": \"elyra/kernel-py:3.2.3\",\n        }\n        rendered = self._render_operator_template(keywords)\n        doc = yaml.safe_load(rendered)\n\n        self.assertEqual(doc[\"kind\"], \"SparkApplication\")\n        self.assertIn(\"\\n\", doc[\"spec\"][\"image\"])\n        self.assertNotIn(\"malicious\", doc)\n\n    def test_normal_spark_app_renders(self):\n        keywords = {\n            \"kernel_resource_name\": \"test-spark\",\n            \"kernel_image\": \"elyra/kernel-spark-py:3.2.3\",\n            \"kernel_id\": \"test-id-123\",\n            \"spark_context_initialization_mode\": \"lazy\",\n            \"eg_response_address\": \"10.0.0.1:8080\",\n            \"eg_port_range\": \"10000..11000\",\n            \"eg_public_key\": \"abc123\",\n            \"kernel_service_account_name\": \"spark-sa\",\n            \"kernel_executor_image\": \"elyra/kernel-spark-py:3.2.3\",\n        }\n        rendered = self._render_operator_template(keywords)\n        doc = yaml.safe_load(rendered)\n\n        self.assertEqual(doc[\"kind\"], \"SparkApplication\")\n        self.assertEqual(doc[\"metadata\"][\"name\"], \"test-spark\")\n        self.assertEqual(doc[\"spec\"][\"image\"], \"elyra/kernel-spark-py:3.2.3\")\n        self.assertEqual(doc[\"spec\"][\"driver\"][\"serviceAccount\"], \"spark-sa\")\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "etc/Makefile",
    "content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n.PHONY: help clean clean-images clean-enterprise-gateway clean-enterprise-gateway-demo clean-demo-base \\\n    clean-kernel-images clean-py clean-tf-py clean-tf-gpu-py clean-r clean-spark-r clean-scala toree-launcher \\\n    kernelspecs_all kernelspecs_yarn kernelspecs_conductor kernelspecs_kubernetes kernelspecs_docker clean-kernel-image-puller\n\nSA?=source activate\nENV:=enterprise-gateway-dev\nSHELL:=/bin/bash\nSUPPORTED_ARCHS=linux/arm64 linux/amd64\nPLATFORM_ARCHS=`echo ${SUPPORTED_ARCHS} | sed \"s/ /,/g\"`\n\n# Docker attributes - hub organization and tag.  Modify accordingly\nHUB_ORG:=elyra\n\n# Set NO_CACHE=--no-cache to force docker build to not use cached layers\nNO_CACHE?=\n\nhelp:\n# http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html\n\t@grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = \":.*?## \"}; {printf \"\\033[36m%-30s\\033[0m %s\\n\", $$1, $$2}'\n\nclean: ## Make a clean source tree\n\t-rm -rf kernel-launchers/scala/lib\n\t-rm -rf kernel-launchers/scala/toree-launcher/project/project/\n\n#\n# Kernelspec build section *************************************************\n#\n\nKERNELSPECS := kernelspecs_all kernelspecs_yarn kernelspecs_conductor kernelspecs_kubernetes kernelspecs_docker\nkernelspecs: $(KERNELSPECS) kernel_image_files\n\nFILE_kernelspecs_all:=../dist/jupyter_enterprise_gateway_kernelspecs-$(VERSION).tar.gz\nFILE_kernelspecs_yarn:=../dist/jupyter_enterprise_gateway_kernelspecs_yarn-$(VERSION).tar.gz\nFILE_kernelspecs_conductor:=../dist/jupyter_enterprise_gateway_kernelspecs_conductor-$(VERSION).tar.gz\nFILE_kernelspecs_kubernetes:=../dist/jupyter_enterprise_gateway_kernelspecs_kubernetes-$(VERSION).tar.gz\nFILE_kernelspecs_docker:=../dist/jupyter_enterprise_gateway_kernelspecs_docker-$(VERSION).tar.gz\n\nFILES_kernelspecs_all:=$(shell find kernel-launchers kernelspecs -type f -name '*')\n\nTOREE_LAUNCHER_FILES:=$(shell find kernel-launchers/scala/toree-launcher/src -type f -name '*')\n\n../build/kernelspecs: kernel-launchers/scala/lib  $(FILES_kernelspecs_all)\n\t@rm -rf ../build/kernelspecs\n\t@mkdir -p ../build/kernelspecs\n\t\t# Seed the build tree with initial files\n\tcp -r kernelspecs ../build\n    # Distribute language and config-sensitive files.\n    # On-prem kernelspecs get launcher files in the kernelspec hierarchy\n\t@echo ../build/kernelspecs/python_distributed | xargs -t -n 1 cp -r kernel-launchers/python/scripts\n\t@echo ../build/kernelspecs/dask_python_* | xargs -t -n 1 cp -r kernel-launchers/python/scripts\n\t@echo ../build/kernelspecs/spark_python_{conductor*,yarn*} | xargs -t -n 1 cp -r kernel-launchers/python/scripts\n\t@echo ../build/kernelspecs/spark_R_{conductor*,yarn*} | xargs -t -n 1 cp -r kernel-launchers/R/scripts\n\t@echo ../build/kernelspecs/spark_scala_{conductor*,yarn*} | xargs -t -n 1 cp -r kernel-launchers/scala/lib\n\t# Container-based kernelspecs (and operators) just get the container launchers\n\t@echo ../build/kernelspecs/{python,R,scala,python_tf,python_tf_gpu}_kubernetes | xargs -t -n 1 cp -r kernel-launchers/kubernetes/*\n\t@echo ../build/kernelspecs/spark_{python,R,scala}_kubernetes | xargs -t -n 1 cp -r kernel-launchers/kubernetes/*\n\t@echo ../build/kernelspecs/{python,R,scala,python_tf,python_tf_gpu}_docker | xargs -t -n 1 cp -r kernel-launchers/docker/*\n\t@echo ../build/kernelspecs/spark_python_operator | xargs -t -n 1 cp -r kernel-launchers/operators/*\n        # Populate kernel resources.  Because tensorflow is also python, it should be last.\n\t@echo ../build/kernelspecs/*R* | xargs -t -n 1 cp -r kernel-resources/ir/*\n\t@echo ../build/kernelspecs/*scala* | xargs -t -n 1 cp -r kernel-resources/apache_toree/*\n\t@echo ../build/kernelspecs/*python* | xargs -t -n 1 cp -r kernel-resources/python/*\n\t@echo ../build/kernelspecs/*tf* | xargs -t -n 1 cp -r kernel-resources/tensorflow/*\n    # Perform the copy again to enable local, per-kernel, overrides\n\tcp -r kernelspecs ../build\n\t@(cd ../build/kernelspecs; find . -name 'kernel.json' -print0 | xargs -0 sed -i.bak \"s/VERSION/$(TAG)/g\"; find . -name *.bak -print0 | xargs -0 rm -f)\n\t@mkdir -p ../dist\n\nPATTERN_kernelspecs_all := *\nPATTERN_kernelspecs_yarn := *_yarn_*\nPATTERN_kernelspecs_conductor := *_conductor_*\nPATTERN_kernelspecs_kubernetes := {*_kubernetes,*_operator}\nPATTERN_kernelspecs_docker := *_docker\n\ndefine BUILD_KERNELSPEC\n$1: $$(FILE_$1)\n$$(FILE_$1): ../build/kernelspecs\n\trm -f $$(FILE_$1)\n\t@( cd ../build/kernelspecs; tar -pvczf \"../$$(FILE_$1)\" $$(PATTERN_$1) )\nendef\n$(foreach kernelspec,$(KERNELSPECS),$(eval $(call BUILD_KERNELSPEC,$(kernelspec))))\n\nkernel-launchers/scala/lib: $(TOREE_LAUNCHER_FILES)\n\t-rm -rf kernel-launchers/scala/lib\n\tmkdir -p kernel-launchers/scala/lib\n\t@(cd kernel-launchers/scala/toree-launcher; sbt -Dversion=$(VERSION) -Dspark_version=$(SPARK_VERSION) package; cp target/scala-2.12/*.jar ../lib)\n\tcurl -L https://repository.apache.org/content/repositories/releases/org/apache/toree/toree-assembly/0.5.0-incubating/toree-assembly-0.5.0-incubating.jar -o kernel-launchers/scala/lib/toree-assembly-0.5.0-incubating.jar\n\nKERNEL_IMAGE_FILE:=../dist/jupyter_enterprise_gateway_kernel_image_files-$(VERSION).tar.gz\nkernel_image_files: ../build/kernel_image_files\n\trm -f $(KERNEL_IMAGE_FILE)\n\t@( cd ../build/kernel_image_files; tar -pvczf \"../$(KERNEL_IMAGE_FILE)\" . )\n\n../build/kernel_image_files: kernel-launchers/scala/lib kernel-launchers/bootstrap/bootstrap-kernel.sh\n\t@rm -rf ../build/kernel_image_files\n\t@mkdir -p ../build/kernel_image_files/kernel-launchers\n\tcp kernel-launchers/bootstrap/* ../build/kernel_image_files\n\tcp -r kernel-launchers/{python,R,scala} ../build/kernel_image_files/kernel-launchers\n\trm -rf ../build/kernel_image_files/kernel-launchers/scala/{\\.*DS*,toree-launcher}  # leave only lib\n\n#\n# Docker image build section ***********************************************\n#\n\nKERNEL_IMAGES := kernel-py kernel-spark-py kernel-r kernel-spark-r kernel-scala kernel-tf-py kernel-tf-gpu-py\nDOCKER_IMAGES := demo-base enterprise-gateway-demo enterprise-gateway kernel-image-puller $(KERNEL_IMAGES)\nPUSHED_IMAGES := demo-base enterprise-gateway-demo enterprise-gateway kernel-image-puller $(KERNEL_IMAGES)\n\ndocker-images: $(DOCKER_IMAGES)\nkernel-images: $(KERNEL_IMAGES)\n\npush-images: push-enterprise-gateway-demo push-enterprise-gateway push-kernel-py push-kernel-spark-py push-kernel-tf-py push-kernel-r push-kernel-spark-r push-kernel-scala push-kernel-image-puller\n\nclean-images: clean-enterprise-gateway-demo clean-demo-base clean-enterprise-gateway clean-kernel-image-puller clean-kernel-images\nclean-kernel-images: clean-kernel-py clean-kernel-spark-py clean-kernel-tf-py clean-kernel-tf-gpu-py clean-kernel-r clean-kernel-spark-r clean-kernel-scala\n\n# Extra dependencies for each docker image...\nDEPENDS_demo-base:\nDEPENDS_enterprise-gateway-demo: $(FILE_kernelspecs_all)\nDEPENDS_enterprise-gateway: $(FILE_kernelspecs_all)\nDEPENDS_kernel-image-puller:\nDEPENDS_kernel-py DEPENDS_kernel-spark-py DEPENDS_kernel-r DEPENDS_kernel-spark-r DEPENDS_kernel-scala DEPENDS_kernel-tf-py DEPENDS_kernel-tf-gpu-py: $(FILE_kernelspecs_kubernetes) $(FILE_kernelspecs_docker)\n\n# Extra targets for each docker image...\nTARGETS_demo-base:\nTARGETS_kernel-image-puller:\nTARGETS_enterprise-gateway TARGETS_enterprise-gateway-demo: kernelspecs\n\t@make -C .. bdist\nTARGETS_kernel-py TARGETS_kernel-spark-py TARGETS_kernel-r TARGETS_kernel-spark-r TARGETS_kernel-scala TARGETS_kernel-tf-py TARGETS_kernel-tf-gpu-py: kernelspecs\n\n# Extra files for each docker image...\nFILES_demo-base :=\nFILES_kernel-image-puller :=\nFILES_enterprise-gateway-demo := ../dist/jupyter_enterprise_gateway_kernelspecs-* ../dist/jupyter_enterprise_gateway*.whl\nFILES_enterprise-gateway := ../dist/jupyter_enterprise_gateway_kernel_image_files* ../dist/jupyter_enterprise_gateway_kernelspecs-* ../dist/jupyter_enterprise_gateway*.whl\nFILES_kernel-py := ../dist/jupyter_enterprise_gateway_kernel_image_files*\nFILES_kernel-spark-py := ../dist/jupyter_enterprise_gateway_kernel_image_files*\nFILES_kernel-tf-py := ../dist/jupyter_enterprise_gateway_kernel_image_files*\nFILES_kernel-tf-gpu-py := ../dist/jupyter_enterprise_gateway_kernel_image_files*\nFILES_kernel-r := ../dist/jupyter_enterprise_gateway_kernel_image_files*\nFILES_kernel-spark-r := ../dist/jupyter_enterprise_gateway_kernel_image_files*\nFILES_kernel-scala := ../dist/jupyter_enterprise_gateway_kernel_image_files*\n\n# Generate image creation targets for each entry in $(DOCKER_IMAGES).  Switch 'eval' to 'info' to see what is produced.\ndefine BUILD_IMAGE\n$1: ../.image-$1\n../.image-$1: docker/$1/* DEPENDS_$1\n\t@make clean-$1 TARGETS_$1\n\t@mkdir -p ../build/docker/$1\n\t@cp -r docker/$1/* $$(FILES_$1) ../build/docker/$1\nifdef MULTIARCH_BUILD\n\t@echo \"starting buildx builder for $1\"\n\t-@(docker buildx rm $1)\n\t(docker buildx create --use --name $1)\n\t(cd ../build/docker/$1; docker buildx build ${NO_CACHE} --platform $(PLATFORM_ARCHS) --build-arg HUB_ORG=${HUB_ORG} --build-arg TAG=${TAG} --build-arg SPARK_VERSION=${SPARK_VERSION} -t $(HUB_ORG)/$1:$(TAG) . --push)\n\t@echo \"remove builder instance $1\"\n\t-(docker buildx rm $1)\nelse ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), $(SUPPORTED_ARCHS)))\n\t@echo \"Building docker image for $(TARGET_ARCH)\"\n\t(cd ../build/docker/$1; docker build ${NO_CACHE} --platform ${TARGET_ARCH} --build-arg HUB_ORG=${HUB_ORG} --build-arg TAG=${TAG} --build-arg SPARK_VERSION=${SPARK_VERSION} -t $(HUB_ORG)/$1:$(TAG) .)\n\t@-docker images $(HUB_ORG)/$1:$(TAG)\nelse\n\t@echo \"TARGET_ARCH not defined or not in supported platforms: $(PLATFORM_ARCHS). Building docker image for default platform\"\n\t(cd ../build/docker/$1; docker build ${NO_CACHE} --build-arg HUB_ORG=${HUB_ORG} --build-arg TAG=${TAG} --build-arg SPARK_VERSION=${SPARK_VERSION} -t $(HUB_ORG)/$1:$(TAG) .)\n\t@-docker images $(HUB_ORG)/$1:$(TAG)\nendif\n\t@touch ../.image-$1\nendef\n$(foreach image,$(DOCKER_IMAGES),$(eval $(call BUILD_IMAGE,$(image))))\n\n# Generate clean-xxx targets for each entry in $(DOCKER_IMAGES).  Switch 'eval' to 'info' to see what is produced.\ndefine CLEAN_IMAGE\nclean-$1:\n\t@rm -f ../.image-$1\n\t@-docker rmi -f $(HUB_ORG)/$1:$(TAG)\nendef\n$(foreach image,$(DOCKER_IMAGES),$(eval $(call CLEAN_IMAGE,$(image))))\n\n# Publish each publish image on $(PUSHED_IMAGES) to DockerHub.  Switch 'eval' to 'info' to see what is produced.\ndefine PUSH_IMAGE\npush-$1:\n\tdocker push $(HUB_ORG)/$1:$(TAG)\nendef\n$(foreach image,$(PUSHED_IMAGES),$(eval $(call PUSH_IMAGE,$(image))))\n"
  },
  {
    "path": "etc/docker/demo-base/Dockerfile",
    "content": "ARG BASE_CONTAINER=continuumio/miniconda3:24.1.2-0\nFROM $BASE_CONTAINER\n\nARG SPARK_VERSION\nARG SPARKR_VERSION=3.1.2\nARG NB_USER=\"jovyan\"\nARG NB_UID=\"1000\"\nARG NB_GID=\"100\"\n\nUSER root\n\nENV HADOOP_HOME=/usr/hdp/current/hadoop \\\n    ANACONDA_HOME=/opt/conda\n\nENV SHELL=/bin/bash \\\n    NB_USER=$NB_USER \\\n    NB_UID=$NB_UID \\\n    NB_GID=$NB_GID \\\n    LC_ALL=en_US.UTF-8 \\\n    LANG=en_US.UTF-8 \\\n    LANGUAGE=en_US.UTF-8 \\\n    JAVA_HOME=/usr/lib/jvm/java \\\n    SPARK_HOME=/usr/hdp/current/spark2-client \\\n    PYSPARK_PYTHON=$ANACONDA_HOME/bin/python \\\n    HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop\n\nENV HOME=/home/$NB_USER \\\n    PATH=$JAVA_HOME/bin:$ANACONDA_HOME/bin:$HADOOP_HOME/bin:$SPARK_HOME/bin:$PATH\n\nENV SPARK_VER=$SPARK_VERSION\nENV HADOOP_VER=3.3.1\n\n# INSTALL / DOWNLOAD ALL NEEDED PACKAGES\nRUN dpkg --purge --force-depends ca-certificates-java \\\n    && apt-get update && apt-get -yq dist-upgrade \\\n    && apt-get install -yq --no-install-recommends \\\n    wget \\\n    bzip2 \\\n    tar \\\n    curl \\\n    less \\\n    nano \\\n    ca-certificates \\\n    libkrb5-dev \\\n    sudo \\\n    locales \\\n    gcc \\\n    fonts-liberation \\\n    unzip \\\n    libsm6 \\\n    libxext-dev \\\n    libxrender1 \\\n    openssh-server \\\n    openssh-client \\\n    openjdk-11-jdk-headless \\\n    ca-certificates-java \\\n    && apt-get clean \\\n    && rm -rf /var/lib/apt/lists/*\n\nRUN ln -s $(readlink -f /usr/bin/javac | sed \"s:/bin/javac::\") ${JAVA_HOME}\n\nRUN echo \"en_US.UTF-8 UTF-8\" > /etc/locale.gen && \\\n    locale-gen\n\nADD fix-permissions /usr/local/bin/fix-permissions\n# Create jovyan user with UID=1000 and in the 'users' group\n# and make sure these dirs are writable by the `users` group.\nRUN groupadd wheel -g 11 && \\\n    echo \"auth required pam_wheel.so use_uid\" >> /etc/pam.d/su && \\\n    useradd -m -s /bin/bash -N -u $NB_UID $NB_USER && \\\n    mkdir -p /usr/hdp/current && \\\n    mkdir -p /usr/local/share/jupyter && \\\n    chown $NB_USER:$NB_GID $ANACONDA_HOME && \\\n    chmod g+w /etc/passwd && \\\n    chmod +x /usr/local/bin/fix-permissions && \\\n    fix-permissions $HOME && \\\n    fix-permissions $ANACONDA_HOME && \\\n    fix-permissions /usr/hdp/current && \\\n    fix-permissions /usr/local/share/jupyter\n\n# Create service user 'jovyan'. Pin uid/gid to 1000.\nRUN useradd -m -s /bin/bash -N -u 1111 elyra && \\\n    useradd -m -s /bin/bash -N -u 1112 bob  && \\\n    useradd -m -s /bin/bash -N -u 1113 alice\n\nUSER $NB_UID\n\n# Setup work directory for backward-compatibility\nRUN mkdir /home/$NB_USER/work && \\\n    fix-permissions /home/$NB_USER\n\n# DOWNLOAD HADOOP AND SPARK\nRUN curl -sL https://archive.apache.org/dist/hadoop/common/hadoop-$HADOOP_VER/hadoop-$HADOOP_VER.tar.gz | tar -xz -C /usr/hdp/current\nRUN curl -sL https://archive.apache.org/dist/spark/spark-$SPARK_VER/spark-$SPARK_VER-bin-hadoop3.2.tgz | tar -xz -C /usr/hdp/current\n# SETUP SPARK AND HADOOP SYMLINKS\nRUN cd /usr/hdp/current && ln -s ./hadoop-$HADOOP_VER hadoop && ln -s ./spark-$SPARK_VER-bin-hadoop3.2 spark2-client\n\nUSER root\n\nRUN conda install mamba -n base -c conda-forge && \\\n    mamba install --yes --quiet -c conda-forge \\\n    'jupyter' \\\n    'r-devtools' \\\n    'r-stringr' \\\n    'r-argparse' && \\\n    mamba clean -y --all &&\\\n    fix-permissions $ANACONDA_HOME && \\\n    fix-permissions /home/$NB_USER\n\nUSER $NB_UID\n\n#Package ‘SparkR’ currently supports 3.1.2, so we'll set its own ARG\n#https://cran.r-project.org/src/contrib/Archive/SparkR/\n\nRUN Rscript -e 'install.packages(\"IRkernel\", repos=\"https://mirror.las.iastate.edu/CRAN/\", lib=\"/opt/conda/lib/R/library\")' \\\n            -e 'IRkernel::installspec(prefix = \"/usr/local\")' \\\n            -e 'download.file(url = \"https://cran.r-project.org/src/contrib/Archive/SparkR/SparkR_$SPARKR_VERSION.tar.gz\", destfile = \"SparkR_$SPARKR_VERSION.tar.gz\")' \\\n            -e 'install.packages(pkgs=\"SparkR_$SPARKR_VERSION.tar.gz\", type=\"source\", repos=NULL, lib=\"/opt/conda/lib/R/library\")' \\\n            -e 'unlink(\"SparkR_$SPARKR_VERSION.tar.gz\")'\n\n# SETUP HADOOP CONFIGS\nRUN sed -i '/^export JAVA_HOME/ s:.*:export JAVA_HOME=/usr/lib/jvm/java\\nexport HADOOP_HOME=/usr/hdp/current/hadoop\\nexport HADOOP_HOME=/usr/hdp/current/hadoop\\n:' $HADOOP_HOME/etc/hadoop/hadoop-env.sh\nRUN sed -i '/^export HADOOP_CONF_DIR/ s:.*:export HADOOP_CONF_DIR=/usr/hdp/current/hadoop/etc/hadoop/:' $HADOOP_HOME/etc/hadoop/hadoop-env.sh\n# SETUP PSEUDO - DISTRIBUTED CONFIGS FOR HADOOP\nCOPY [\"core-site.xml.template\", \"hdfs-site.xml\", \"mapred-site.xml\", \"yarn-site.xml.template\", \\\n      \"$HADOOP_HOME/etc/hadoop/\"]\n\n# working around docker.io build error\nRUN ls -la /usr/hdp/current/hadoop/etc/hadoop/*-env.sh && \\\n    chmod +x /usr/hdp/current/hadoop/etc/hadoop/*-env.sh && \\\n    ls -la /usr/hdp/current/hadoop/etc/hadoop/*-env.sh\n\n# Install Toree\nRUN cd /tmp && \\\n    curl -O https://archive.apache.org/dist/incubator/toree/0.5.0-incubating/toree-pip/toree-0.5.0.tar.gz && \\\n    pip install --upgrade setuptools --user && \\\n    pip install /tmp/toree-0.5.0.tar.gz && \\\n    jupyter toree install --spark_home=$SPARK_HOME --kernel_name=\"Spark $SPARK_VER\" --interpreters=Scala && \\\n    rm -f /tmp/toree-0.5.0.tar.gz && \\\n    fix-permissions $ANACONDA_HOME && \\\n    fix-permissions /home/$NB_USER\n\n# SETUP PASSWORDLESS SSH FOR $NB_USER\nRUN ssh-keygen -q -N \"\" -t rsa -f /home/$NB_USER/.ssh/id_rsa && \\\n    cp /home/$NB_USER/.ssh/id_rsa.pub /home/$NB_USER/.ssh/authorized_keys && \\\n    chmod 0700 /home/$NB_USER\n\nUSER root\n\n# SETUP PASSWORDLESS SSH\nRUN yes y | ssh-keygen -q -N \"\" -t dsa -f /etc/ssh/ssh_host_dsa_key && \\\n    yes y | ssh-keygen -q -N \"\" -t rsa -f /etc/ssh/ssh_host_rsa_key && \\\n    yes y | ssh-keygen -q -N \"\" -t rsa -f /root/.ssh/id_rsa && \\\n    cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys\n\nRUN ssh-keygen -A\nCOPY ssh_config /root/.ssh/config\nRUN chmod 600 /root/.ssh/config && \\\n    chown root:root /root/.ssh/config && \\\n    echo \"Port 2122\" >> /etc/ssh/sshd_config && \\\n    echo \"${NB_USER} ALL=(ALL) NOPASSWD: ALL\" >> /etc/sudoers\nRUN service ssh restart\n\nCOPY ssh_config /home/$NB_USER/.ssh/config\nRUN chmod 600 /home/$NB_USER/.ssh/config && \\\n    chown $NB_USER: /home/$NB_USER/.ssh/config\n\nCOPY bootstrap-yarn-spark.sh /usr/local/bin/\nRUN chown $NB_USER: /usr/local/bin/bootstrap-yarn-spark.sh && \\\n    chmod 0700 /usr/local/bin/bootstrap-yarn-spark.sh\n\nCMD [\"/usr/local/bin/bootstrap-yarn-spark.sh\"]\n\nLABEL Hadoop.version=$HADOOP_VER\nLABEL Spark.version=$SPARK_VER\n\n# Hdfs ports\nEXPOSE 50010 50020 50070 50075 50090 8020 9000 \\\n# Mapred ports\n19888 \\\n#Yarn ports\n8030 8031 8032 8033 8040 8042 8088 \\\n#Other ports\n49707 2122\n\nUSER $NB_USER\n"
  },
  {
    "path": "etc/docker/demo-base/README.md",
    "content": "# What this image Gives You\n\n- Ubuntu base image : bionic\n- Hadoop 2.7.7\n- Apache Spark 2.4.6\n- Java 1.8 runtime\n- Mini-conda latest (python 3.11) with R packages\n- Toree 0.4.0-incubating\n- `jovyan` service user, with system users `elyra`, `bob`, and `alice`. The jovyan uid is `1000` to match other jupyter\n  images.\n- Password-less ssh for service user\n- Users have HDFS folder setup at startup\n\n# Basic Use\n\nAs of the 0.9.0 release of [Jupyter Enterprise Gateway](https://github.com/jupyter-server/enterprise_gateway/releases)\nthis image can be started as a separate YARN cluster to better demonstrate remote kernel capabilities. See section\n[Dual Mode](https://hub.docker.com/r/elyra/enterprise-gateway/#dual_mode) on the enterprise-gateway page for command\nusage.\n"
  },
  {
    "path": "etc/docker/demo-base/bootstrap-yarn-spark.sh",
    "content": "#!/bin/bash\n\n# This file is a copy of /etc/bootstrap.sh but sets up the YARN cluster in its \"deamon\" case.\n# It also checks for --help or no options before starting anything...\n\nFROM=${FROM:-\"YARN\"}\n\nCMD=${1:-\"--help\"}\nif [[ \"$CMD\" == \"--help\" ]];\nthen\n\techo \"\"\n\techo \"usage: docker run {-it|-d} --rm -h <container-hostname> -p 8088:8088 -p 8042:8042 <docker-opts> <docker-image> <command>\"\n\techo \"\"\n\techo \"where <command> is:\"\n\techo \"    --yarn  ... Runs container as standalone YARN master - assumed to be used with Enterprise Gateway\"\n\techo \"    --help  ... Produces this message.\"\n\techo \"    <other> ... Invokes '<other>'.  Use <other>='/bin/bash' to explore within the container.\"\n\techo \"\"\n\techo \"Tips:\"\n\techo \"1) You can target a different YARN cluster by using '-e YARN_HOST=<myOtherYarnMaster>'\"\n\techo \"2) You can \\\"bring your own kernels\\\" by mounting to /tmp/byok/kernels (e.g., -v my-kernels-dir:/tmp/byok/kernels)\"\n\techo \"3) It is advised that ports '8088' and '8042' be mapped to host ports, although the host port numbers are not\"\n\techo \"   required to be '8088' and '8042'. \"\n\texit 0\nfi\n\n: ${HADOOP_HOME:=/usr/hdp/current/hadoop}\n: ${YARN_HOST:=$HOSTNAME}\n: ${SPARK_HOME:=/usr/hdp/current/spark2-client}\n: ${SPARK_VER:=3.2.1}\n: ${JAVA_HOME:=/usr/lib/jvm/java}\n\necho \"export JAVA_HOME=${JAVA_HOME}\" >> $HADOOP_HOME/etc/hadoop/hadoop-env.sh\n\n# Set all the hadoop envs for this shell\n$HADOOP_HOME/etc/hadoop/hadoop-env.sh\n\nrm -f /tmp/*.pid\n\n# installing libraries if any - (resource urls added comma separated to the ACP system variable)\ncd $HADOOP_HOME/share/hadoop/common ; for cp in ${ACP//,/ }; do  echo == $cp; curl -LO $cp ; done; cd -\n\n## altering the hostname in core-site and enterprise-gateway startup configuration\nsed s/HOSTNAME/$YARN_HOST/ /usr/hdp/current/hadoop/etc/hadoop/core-site.xml.template > /usr/hdp/current/hadoop/etc/hadoop/core-site.xml\nsed s/HOSTNAME/$YARN_HOST/ /usr/hdp/current/hadoop/etc/hadoop/yarn-site.xml.template > /usr/hdp/current/hadoop/etc/hadoop/yarn-site.xml\n#\n# setting spark defaults\ncp $SPARK_HOME/conf/spark-defaults.conf.template  $SPARK_HOME/conf/spark-defaults.conf\n# set spark.yarn.jars so spark will stop uploaded jars to hdfs everytime\necho \"spark.yarn.jars hdfs://$YARN_HOST:9000/spark/*.jar\" >>  $SPARK_HOME/conf/spark-defaults.conf\n# place metastore db and derby.log in /tmp\necho \"spark.driver.extraJavaOptions -Dderby.system.home=/tmp\" >>  $SPARK_HOME/conf/spark-defaults.conf\n\n##/usr/sbin/rsyslog\necho \"********** STARTING SSH DAEMON ***********\"\nsudo service ssh restart\n\n# If we're not running in standalone mode, don't run as jovyan.\n# If we're running in standalone mode, startup yarn, hdfs, etc.\nif [[ \"$YARN_HOST\" == \"$HOSTNAME\" || \"$FROM\" == \"YARN\" ]];\nthen\n    echo \"********** FORMATTING NAMENODE ***********\"\n    $HADOOP_HOME/bin/hdfs namenode -format\n    $HADOOP_HOME/sbin/start-dfs.sh\n    $HADOOP_HOME/sbin/start-yarn.sh\n\n    echo \"********** LEAVING HDFS SAFE MODE...... ***********\"\n    $HADOOP_HOME/bin/hadoop dfsadmin -safemode leave\n\n    echo \"********** UPLOADING SPARK JARS TO HDFS..... ***********\"\n    hdfs dfs -put $SPARK_HOME/jars /spark\n\n    ## Add HDFS folders for our users (jovyan, bob, alice)...\n    echo \"Setting up HDFS folders for Enterprise Gateway users...\"\n    hdfs dfs -mkdir -p /user/{jovyan,bob,alice,root} /tmp/hive\n    hdfs dfs -chown jovyan:jovyan /user/jovyan\n    hdfs dfs -chown bob:bob /user/bob\n    hdfs dfs -chown alice:alice /user/alice\n    hdfs dfs -chmod 0777 /tmp/hive\n\nelif [[ \"$CMD\" == \"--yarn\" ]];\nthen\n    echo \"YARN_HOST cannot be different from HOSTNAME when using --yarn! YARN_HOST=$YARN_HOST != HOSTNAME=$HOSTNAME\"\n    exit 1\nfi\n\nif [[ \"$CMD\" == \"--yarn\" ]];\nthen\n    echo \"YARN application logs can be found at '/usr/hdp/current/hadoop/logs/userlogs'\"\n    prev_count=0\n    while [ 1 ]\n    do\n        # Every minute list any new application directories that have been created since\n        # last time.\n        sleep 60\n        if ls -ld /usr/hdp/current/hadoop/logs/userlogs/application* > /dev/null 2>&1;\n        then\n            count=`ls -ld /usr/hdp/current/hadoop/logs/userlogs/application*|wc -l`\n            if [[ $count > $prev_count ]];\n            then\n                new_apps=`expr $count - $prev_count`\n                ls -ldt /usr/hdp/current/hadoop/logs/userlogs/application*|head --lines=$new_apps\n            fi\n            # reset each time in case count < prev_count\n            prev_count=$count\n        fi\n    done\nelif [[ \"$FROM\" == \"YARN\" ]];\nthen\n    echo \"\"\n    echo \"Note:  YARN application logs can be found at '/usr/hdp/current/hadoop/logs/userlogs'\"\n    \"$*\"\nfi\n\nexit 0\n"
  },
  {
    "path": "etc/docker/demo-base/core-site.xml.template",
    "content": "  <configuration>\n      <property>\n          <name>fs.defaultFS</name>\n          <value>hdfs://HOSTNAME:9000</value>\n      </property>\n  </configuration>\n"
  },
  {
    "path": "etc/docker/demo-base/fix-permissions",
    "content": "#!/bin/bash\n# set permissions on a directory\n# after any installation, if a directory needs to be (human) user-writable,\n# run this script on it.\n# It will make everything in the directory owned by the group $NB_GID\n# and writable by that group.\n# Deployments that want to set a specific user id can preserve permissions\n# by adding the `--group-add users` line to `docker run`.\n\n# uses find to avoid touching files that already have the right permissions,\n# which would cause massive image explosion\n\n# right permissions are:\n# group=$NB_GID\n# AND permissions include group rwX (directory-execute)\n# AND directories have setuid,setgid bits set\n\nset -e\n\nfor d in \"$@\"; do\n  find \"$d\" \\\n    ! \\( \\\n      -group $NB_GID \\\n      -a -perm -g+rwX  \\\n    \\) \\\n    -exec chgrp $NB_GID {} \\; \\\n    -exec chmod g+rwX {} \\;\n  # setuid,setgid *on directories only*\n  find \"$d\" \\\n    \\( \\\n        -type d \\\n        -a ! -perm -6000  \\\n    \\) \\\n    -exec chmod +6000 {} \\;\ndone\n"
  },
  {
    "path": "etc/docker/demo-base/hdfs-site.xml",
    "content": "<configuration>\n    <property>\n        <name>dfs.replication</name>\n        <value>1</value>\n    </property>\n</configuration>\n"
  },
  {
    "path": "etc/docker/demo-base/mapred-site.xml",
    "content": "<configuration>\n    <property>\n        <name>mapreduce.framework.name</name>\n        <value>yarn</value>\n    </property>\n</configuration>\n"
  },
  {
    "path": "etc/docker/demo-base/ssh_config",
    "content": "Host *\n  UserKnownHostsFile /dev/null\n  StrictHostKeyChecking no\n  LogLevel quiet\n  Port 2122\n"
  },
  {
    "path": "etc/docker/demo-base/yarn-site.xml.template",
    "content": "\n<configuration>\n\n    <property>\n        <name>yarn.nodemanager.vmem-check-enabled</name>\n        <value>false</value>\n    </property>\n\n    <property>\n        <name>yarn.nodemanager.aux-services</name>\n        <value>mapreduce_shuffle</value>\n    </property>\n\n    <property>\n    <description>\n      Number of seconds after an application finishes before the nodemanager's\n      DeletionService will delete the application's localized file directory\n      and log directory.\n\n      To diagnose Yarn application problems, set this property's value large\n      enough (for example, to 600 = 10 minutes) to permit examination of these\n      directories. After changing the property's value, you must restart the\n      nodemanager in order for it to have an effect.\n\n      The roots of Yarn applications' work directories is configurable with\n      the yarn.nodemanager.local-dirs property (see below), and the roots\n      of the Yarn applications' log directories is configurable with the\n      yarn.nodemanager.log-dirs property (see also below).\n    </description>\n    <name>yarn.nodemanager.delete.debug-delay-sec</name>\n    <value>600</value>\n  </property>\n\n  <property>\n    <name>yarn.resourcemanager.scheduler.address</name>\n    <value>HOSTNAME:8030</value>\n  </property>\n  <property>\n    <name>yarn.resourcemanager.address</name>\n    <value>HOSTNAME:8032</value>\n  </property>\n  <property>\n    <name>yarn.resourcemanager.webapp.address</name>\n    <value>HOSTNAME:8088</value>\n  </property>\n  <property>\n    <name>yarn.resourcemanager.resource-tracker.address</name>\n    <value>HOSTNAME:8031</value>\n  </property>\n  <property>\n    <name>yarn.resourcemanager.admin.address</name>\n    <value>HOSTNAME:8033</value>\n  </property>\n  <property>\n      <name>yarn.application.classpath</name>\n      <value> /usr/hdp/current/hadoop/etc/hadoop, /usr/hdp/current/hadoop/share/hadoop/common/*, /usr/hdp/current/hadoop/share/hadoop/common/lib/*, /usr/hdp/current/hadoop/share/hadoop/hdfs/*, /usr/hdp/current/hadoop/share/hadoop/hdfs/lib/*, /usr/hdp/current/hadoop/share/hadoop/mapreduce/*, /usr/hdp/current/hadoop/share/hadoop/mapreduce/lib/*, /usr/hdp/current/hadoop/share/hadoop/yarn/*, /usr/hdp/current/hadoop/share/hadoop/yarn/lib/*</value>\n  </property>\n\n</configuration>\n"
  },
  {
    "path": "etc/docker/docker-compose.yml",
    "content": "version: \"3.5\"\n\n# A docker user network is created and referenced by the service.  This network\n# must also get conveyed to launched kernel containers and that occurs via the env variable: EG_DOCKER_NETWORK\n\n# Notes (FIXMEs):\n# 1. We need to address the need to run as UID 0 (root).  This appears to be required inorder to create containers/services from within.\n# 2. Using endpoint-mode dnsrr (which appears to be required inorder for kernel container to send the connection info response back)\n# also required mode=host on any published ports. :-(\n# 3. We only use one replica since session affinity is another point of investigation in Swarm\nservices:\n  enterprise-gateway:\n    image: elyra/enterprise-gateway:dev\n    user: root\n    volumes:\n      - \"/var/run/docker.sock:/var/run/docker.sock\"\n      # It's often helpful to mount the kernelspec files from the host into the container.\n      # Since this could be a deployed to a swarm cluster, it is recommended in this case that these be mounted on an\n      # NFS volume available to all nodes of the cluster, or a volume plugin is used instead of a bind mount.\n      # - /usr/local/share/jupyter/kernels:/usr/local/share/jupyter/kernels\n    environment:\n      - \"EG_DOCKER_NETWORK=${EG_DOCKER_NETWORK:-enterprise-gateway_enterprise-gateway}\"\n      - \"EG_KERNEL_LAUNCH_TIMEOUT=${EG_KERNEL_LAUNCH_TIMEOUT:-60}\"\n      - \"EG_KERNEL_INFO_TIMEOUT=${EG_KERNEL_INFO_TIMEOUT:-60}\"\n      - \"EG_CULL_IDLE_TIMEOUT=${EG_CULL_IDLE_TIMEOUT:-3600}\"\n      # Use double-defaulting for B/C.  Support for EG_KERNEL_WHITELIST will be removed in a future release\n      - \"EG_ALLOWED_KERNELS=${EG_ALLOWED_KERNELS:-${EG_KERNEL_WHITELIST:-'r_docker','python_docker','python_tf_docker','python_tf_gpu_docker','scala_docker'}}\"\n      - \"EG_MIRROR_WORKING_DIRS=${EG_MIRROR_WORKING_DIRS:-False}\"\n      - \"EG_RESPONSE_PORT=${EG_RESPONSE_PORT:-8877}\"\n      - \"KG_PORT=${KG_PORT:-8888}\"\n    networks:\n      - \"enterprise-gateway\"\n    labels:\n      app: \"enterprise-gateway\"\n      component: \"enterprise-gateway\"\n    deploy:\n      replicas: 1\n      endpoint_mode: dnsrr\n      labels:\n        app: \"enterprise-gateway\"\n        component: \"enterprise-gateway\"\n\n  enterprise-gateway-proxy:\n    image: haproxy:alpine\n    ports:\n      - ${KG_PORT:-8888}:8888\n      - 9088:9088\n    networks:\n      - \"enterprise-gateway\"\n    entrypoint: \"\"\n    command:\n      - /bin/sh\n      - -c\n      - |\n        cat <<EOF > /usr/local/etc/haproxy/haproxy.cfg\n        global\n          maxconn 4096\n          daemon\n          log stdout format raw local0\n\n        defaults\n          log global\n          option httplog\n          mode http\n          option  http-server-close\n          option  dontlognull\n          option  redispatch\n          option  contstats\n          retries 3\n          backlog 10000\n          timeout client          25s\n          timeout connect          5s\n          timeout server          25s\n          timeout tunnel        3600s\n          timeout http-keep-alive  1s\n          timeout http-request    15s\n          timeout queue           30s\n          timeout tarpit          60s\n          default-server inter 3s rise 2 fall 3\n          option forwardfor\n\n        listen stats\n          bind :9088\n          mode http\n          stats enable\n          stats refresh 10s\n          stats realm Haproxy\\ Statistics\n          stats show-node\n          stats uri /\n\n        resolvers docker\n          nameserver dns 127.0.0.11:53\n          hold valid 1s\n\n        frontend proxy\n          bind 0.0.0.0:8888 maxconn 10000\n          option forwardfor\n          default_backend enterprise-gateway\n\n        backend enterprise-gateway\n          dynamic-cookie-key ENTERPRISE_KEY\n          cookie SRVID insert dynamic\n          server-template enterprise-gateway 2 enterprise-gateway:8888 check resolvers docker\n        EOF\n        exec /docker-entrypoint.sh haproxy -f /usr/local/etc/haproxy/haproxy.cfg\n\nnetworks:\n  enterprise-gateway:\n    name: enterprise-gateway\n    driver: overlay\n"
  },
  {
    "path": "etc/docker/enterprise-gateway/Dockerfile",
    "content": "ARG BASE_CONTAINER=jupyter/minimal-notebook:2023-03-13\n\nFROM $BASE_CONTAINER\n\nARG SPARK_VERSION\n\nENV SPARK_VER=$SPARK_VERSION\nENV SPARK_HOME=/opt/spark\n\n\nRUN mamba install --quiet --yes \\\n    cffi \\\n    send2trash \\\n    requests \\\n    future \\\n    pycryptodomex && \\\n    conda clean --all && \\\n    fix-permissions $CONDA_DIR && \\\n    fix-permissions /home/$NB_USER\n\nUSER root\n\nRUN apt update && apt install -yq curl openjdk-8-jdk\n\nENV JAVA_HOME=/usr/lib/jvm/java\nRUN ln -s $(readlink -f /usr/bin/javac | sed \"s:/bin/javac::\") ${JAVA_HOME}\n\n# Download and install Spark\nRUN curl -s https://archive.apache.org/dist/spark/spark-${SPARK_VER}/spark-${SPARK_VER}-bin-hadoop2.7.tgz | \\\n    tar -xz -C /opt && \\\n    ln -s ${SPARK_HOME}-${SPARK_VER}-bin-hadoop2.7 $SPARK_HOME && \\\n    mkdir -p /usr/hdp/current && \\\n    ln -s ${SPARK_HOME}-${SPARK_VER}-bin-hadoop2.7 /usr/hdp/current/spark2-client\n\n# Install Enterprise Gateway wheel and kernelspecs\nCOPY jupyter_enterprise_gateway*.whl /tmp/\nRUN pip install /tmp/jupyter_enterprise_gateway*.whl && \\\n\trm -f /tmp/jupyter_enterprise_gateway*.whl\n\nADD jupyter_enterprise_gateway_kernelspecs*.tar.gz /usr/local/share/jupyter/kernels/\nADD jupyter_enterprise_gateway_kernel_image_files*.tar.gz /usr/local/bin/\n\nCOPY start-enterprise-gateway.sh /usr/local/bin/\n\nRUN chown jovyan:users /usr/local/bin/start-enterprise-gateway.sh && \\\n\tchmod 0755 /usr/local/bin/start-enterprise-gateway.sh && \\\n\ttouch /usr/local/share/jupyter/enterprise-gateway.log && \\\n\tchown -R jovyan:users /usr/local/share/jupyter /usr/local/bin/kernel-launchers && \\\n\tchmod 0666 /usr/local/share/jupyter/enterprise-gateway.log && \\\n\trm -f /usr/local/bin/bootstrap-kernel.sh\n\nUSER jovyan\n\nCMD [\"/usr/local/bin/start-enterprise-gateway.sh\"]\n\nEXPOSE 8888\n\nWORKDIR /usr/local/bin\n"
  },
  {
    "path": "etc/docker/enterprise-gateway/README.md",
    "content": "This image adds support for [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster. It is currently built on jupyter/minimal-notebook as a base with Apache Spark 2.4.6 installed on top.\n\n**Note: If you're looking for the YARN-based image of this name, it has been moved to [elyra/enterprise-gateway-demo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/).**\n\n# What it Gives You\n\n- [Jupyter Enterprise Gateway](https://github.com/jupyter-server/enterprise_gateway)\n- Python/R/Toree kernels that can be launched and distributed across a managed cluster.\n\n# Basic Use\n\nPull this image, along with all of the elyra/kernel-\\* images to each of your managed nodes. Although manual seeding of images across the cluster is not required, it is highly recommended since kernel startup times can timeout and image downloads can seriously undermine that window.\n\n## Kubernetes\n\nEnterprise Gateway is deployed into Kubernetes using [Helm](https://helm.sh/). See the [Kubernetes section of our Operator's Guide](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/operators/deploy-kubernetes.html) for further details.\n\n## Docker Swarm\n\nDownload the [`docker-compose.yml`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/docker/docker-compose.yml) file and make any necessary changes for your configuration. The compose file consists of three pieces, the Enterprise Gateway container itself, a proxy layer container, and a Docker network. We recommend that a volume be used so that the kernelspec files can be accessed outside of the container since we've found those to require post-deployment modifications from time to time.\n\n## Docker (Traditional)\n\nSame instructions as for Docker Swarm using [`docker-compose.yml`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/docker/docker-compose.yml). Please note that you can still run Enterprise Gateway as a traditional docker container within a Docker Swarm cluster, yet have the kernel containers launched as Docker Swarm services since how the kernels are launched is a function of their configured process proxy class.\n\nFor more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/).\n"
  },
  {
    "path": "etc/docker/enterprise-gateway/start-enterprise-gateway.sh",
    "content": "#!/bin/bash\n\n#export ANACONDA_HOME=/opt/conda\n#export JAVA_HOME=/usr/java/default\n#export PYSPARK_PYTHON=${ANACONDA_HOME}/bin/python\n#export PATH=${ANACONDA_HOME}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:${JAVA_HOME}/bin\n\n# Enterprise Gateway variables\nexport EG_SSH_PORT=${EG_SSH_PORT:-2122}\n\n# Kernel Gateway looks for KG_ for the following.  For the sake of consistency\n# we want to use EG_.  The following produces the default value in EG_ (unless\n# set in the env), with the ultimate override of KG_ from the env.\nexport EG_IP=${EG_IP:-0.0.0.0}\nexport KG_IP=${KG_IP:-${EG_IP}}\nexport EG_PORT=${EG_PORT:-8888}\nexport KG_PORT=${KG_PORT:-${EG_PORT}}\nexport EG_PORT_RETRIES=${EG_PORT_RETRIES:-0}\nexport KG_PORT_RETRIES=${KG_PORT_RETRIES:-${EG_PORT_RETRIES}}\n\n# To use tunneling set this variable to 'True' (may need to run as root).\nexport EG_ENABLE_TUNNELING=${EG_ENABLE_TUNNELING:-False}\n\nexport EG_LIST_KERNELS=${EG_LIST_KERNELS:-True}\nexport EG_LOG_LEVEL=${EG_LOG_LEVEL:-DEBUG}\nexport EG_CULL_IDLE_TIMEOUT=${EG_CULL_IDLE_TIMEOUT:-43200}  # default to 12 hours\nexport EG_CULL_INTERVAL=${EG_CULL_INTERVAL:-60}\nexport EG_CULL_CONNECTED=${EG_CULL_CONNECTED:-False}\nEG_ALLOWED_KERNELS=${EG_ALLOWED_KERNELS:-${EG_KERNEL_WHITELIST:-\"null\"}}\nexport EG_ALLOWED_KERNELS=`echo ${EG_ALLOWED_KERNELS} | sed 's/[][]//g'` # sed is used to strip off surrounding brackets as they should no longer be included.\nexport EG_DEFAULT_KERNEL_NAME=${EG_DEFAULT_KERNEL_NAME:-python_docker}\nexport EG_KERNEL_INFO_TIMEOUT=${EG_KERNEL_INFO_TIMEOUT:-60}\n\n# Determine whether the kernels-allowed list should be added to the start command.\n# This is conveyed via a 'null' value for the env - which indicates no kernel names\n# were used in the helm chart or docker-compose yaml.\nallowed_kernels_option=\"\"\nif [ \"${EG_ALLOWED_KERNELS}\" != \"null\" ]; then\n  # Update to --KernelSpecManager.allowed_kernelspecs once jupyter_client >= 7 can be supported\n\tallowed_kernels_option=\"--KernelSpecManager.whitelist=[${EG_ALLOWED_KERNELS}]\"\nfi\n\necho \"Starting Jupyter Enterprise Gateway...\"\n\nexec jupyter enterprisegateway \\\n\t--log-level=${EG_LOG_LEVEL} ${allowed_kernels_option} \\\n\t--RemoteMappingKernelManager.cull_idle_timeout=${EG_CULL_IDLE_TIMEOUT} \\\n\t--RemoteMappingKernelManager.cull_interval=${EG_CULL_INTERVAL} \\\n\t--RemoteMappingKernelManager.cull_connected=${EG_CULL_CONNECTED} \\\n\t--RemoteMappingKernelManager.default_kernel_name=${EG_DEFAULT_KERNEL_NAME} \\\n\t--RemoteMappingKernelManager.kernel_info_timeout=${EG_KERNEL_INFO_TIMEOUT}\n"
  },
  {
    "path": "etc/docker/enterprise-gateway-demo/Dockerfile",
    "content": "ARG HUB_ORG\nARG SPARK_VERSION\n\nARG BASE_CONTAINER=${HUB_ORG}/demo-base:${SPARK_VERSION}\nFROM $BASE_CONTAINER\n\n# An ARG declared before a FROM is outside of a build stage,\n# so it can’t be used in any instruction after a FROM.\n# To use the default value of an ARG declared before the first FROM\n# use an ARG instruction without a value inside of a build stage:\nARG SPARK_VERSION\n\nENV NB_USER=\"jovyan\"\nENV SPARK_VER=${SPARK_VERSION}\n\nUSER $NB_USER\n\n# Install Enterprise Gateway wheel and kernelspecs\nCOPY jupyter_enterprise_gateway*.whl /tmp/\nRUN pip install /tmp/jupyter_enterprise_gateway*.whl\n\nADD jupyter_enterprise_gateway_kernelspecs*.tar.gz /usr/local/share/jupyter/kernels/\n\nUSER root\nRUN fix-permissions /usr/local/share/jupyter/kernels/\n\nCOPY start-enterprise-gateway.sh.template /usr/local/bin/start-enterprise-gateway.sh\nRUN chown $NB_USER: /usr/local/bin/start-enterprise-gateway.sh && \\\n    chmod +x /usr/local/bin/start-enterprise-gateway.sh\n\nUSER $NB_USER\n\n# Massage kernelspecs to docker image env...\n# Create symbolic link to preserve hdp-related directories\n# Copy toree jar from install to scala kernelspec lib directory\n# Add YARN_CONF_DIR to each env stanza, Add alternate-sigint to vanilla toree\nRUN mkdir -p /tmp/byok/kernels && \\\n\tcp /usr/local/share/jupyter/kernels/spark_${SPARK_VER}_scala/lib/*.jar /usr/local/share/jupyter/kernels/spark_scala_yarn_cluster/lib && \\\n\tcp /usr/local/share/jupyter/kernels/spark_${SPARK_VER}_scala/lib/*.jar /usr/local/share/jupyter/kernels/spark_scala_yarn_client/lib && \\\n\tcd /usr/local/share/jupyter/kernels && \\\n\tfor dir in spark_*; do cat $dir/kernel.json | sed s/'\"env\": {'/'\"env\": {|    \"YARN_CONF_DIR\": \"\\/usr\\/hdp\\/current\\/hadoop\\/etc\\/hadoop\",'/ | tr '|' '\\n' > xkernel.json; mv xkernel.json $dir/kernel.json; done && \\\n\tcat spark_${SPARK_VER}_scala/kernel.json | sed s/'\"__TOREE_OPTS__\": \"\",'/'\"__TOREE_OPTS__\": \"--alternate-sigint USR2\",'/ | tr '|' '\\n' > xkernel.json; mv xkernel.json spark_${SPARK_VER}_scala/kernel.json && \\\n\ttouch /usr/local/share/jupyter/enterprise-gateway.log && \\\n\tchmod 0666 /usr/local/share/jupyter/enterprise-gateway.log\n\nUSER root\n\n# install boot script\nCOPY bootstrap-enterprise-gateway.sh /usr/local/bin/bootstrap-enterprise-gateway.sh\nRUN chown $NB_USER: /usr/local/bin/bootstrap-enterprise-gateway.sh && \\\n\tchmod 0700 /usr/local/bin/bootstrap-enterprise-gateway.sh\n\nENTRYPOINT [\"/usr/local/bin/bootstrap-enterprise-gateway.sh\"]\nCMD [\"--help\"]\n\nEXPOSE 8888\n\nUSER $NB_USER\n"
  },
  {
    "path": "etc/docker/enterprise-gateway-demo/README.md",
    "content": "Built on [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/), this image adds support for [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) to better demonstrate running Python, R and Scala kernels in YARN-cluster mode.\n\n# What it Gives You\n\n- [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/) base functionality\n- [Jupyter Enterprise Gateway](https://github.com/jupyter-incubator/enterprise_gateway)\n- Python/R/Toree kernels that target YARN-cluster mode\n\n# Basic Use\n\n**elyra/enterprise-gateway-demo** can be used as a combined YARN cluster in which the kernels run locally in YARN-cluster mode, or combined with a different instance of itself or an [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/) instance to more easily view that kernels are running remotely.\n\nPrior to using either mode, we recommend you create a local docker network. This better isolates the container(s) and avoids port collisions that might come into play if you're using a gateway-enabled Notebook image on the same host. Here's a simple way to create a docker network...\n\n`docker network create -d bridge jeg`\n\nOnce created, you just add `--net jeg` to the enterprise gateway run commands. Using `--net jeg` when creating instances of the gateway-enabled Notebook image are not necessary.\n\n### Combined Mode\n\nTo run the image as a combined YARN/Enterprise Gateway instance, use the following command:\n\n`docker run -itd --rm -p 8888:8888 -p 8088:8088 -p 8042:8042 --net=jeg elyra/enterprise-gateway-demo --elyra`\n\nTo produce a general usage statement, the following can used...\n\n`docker run --rm elyra/enterprise-gateway-demo --help`\n\nTo run the enterprise-gateway-demo container in an interactive mode, where enterprise gateway is manually started within the container, use the following...\n\n`docker run -it --rm -p 8888:8888 -p 8088:8088 -p 8042:8042 --net=jeg elyra/enterprise-gateway-demo /bin/bash`\n\nOnce in the container, enterprise-gateway-demo can be started using `sudo -u jovyan /usr/local/bin/start-enterprise-gateway.sh`\n\n### Dual Mode\n\nTo get a better idea that kernels are running remote, you can invoke elyra/enterprise-gateway-demo to be the YARN master or use [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/).\n\nTo invoke the YARN master using elyra/demo-base...\n\n`docker run -d --rm -h yarnmaster --name yarnmaster -p 8088:8088 -p 8042:8042 --net jeg elyra/demo-base --yarn`\n\nor using elyra/enterprise-gateway-demo...\n\n`docker run -d --rm -h yarnmaster --name yarnmaster -p 8088:8088 -p 8042:8042 --net jeg elyra/enterprise-gateway-demo --yarn`\n\nThen, invoke elyra/enterprise-gateway-demo as purely an Enterprise Gateway host that indicates the name of its YARN master...\n\n`docker run -it --rm -h elyra --name elyra -p 8888:8888 --net jeg -e YARN_HOST=yarnmaster elyra/enterprise-gateway-demo --elyra`\n\n**Tip:** YARN logs can be accessed via host system's public IP on port `8042` rather than using container's `hostname:8042`, while YARN Resource manager can be accessed via container's `hostname:8088` port.\n\n#### Bring Your Own Kernels\n\nelyra/enterprise-gateway-demo sets up `JUPYTER_PATH` to point to `/tmp/byok`. This enables the ability to use docker volumes to mount your own set of kernelspec files. The kernelspecs must reside in a `kernels` directory. You can mount to the appropriate point in one of two ways via the docker `-v` option:\n\n`-v <host_directory_containing_kernels_directory>:/tmp/byok`\n\nor\n\n`-v <host_kernels_directory>:/tmp/byok/kernels`\n\nTo confirm Enterprise Gateway is detecting the new kernelspecs, monitor the log (`docker logs -f <container_name>`) and issue a refresh from the gateway-enabled Notebook instance. Each refresh of the notebook's tree view triggers a refresh of the set of kernelspecs in Enterprise Gateway.\n\n# Connecting a client notebook\n\nYou can use any gateway-enabled notebook server to hit the running docker container.\n\nNote: Given the size of the enterprise-gateway-demo when combined with a YARN/Spark installation, it is recommended that you have at least 4GB of memory allocated for your docker image in order to run kernels (particularly the Toree/Scala kernel).\n\n# Recognized Environment Variables\n\nThe following environment variables are recognized during startup of the container and can be specified via docker's `-e` option. These will rarely need to be modified.\n\n`KG_IP`: specifies the IP address of enterprise gateway. This should be a public IP. Default = 0.0.0.0\n`KG_PORT`: specifies the port that enterprise gateway is listening on. This port should be mapped to a host port via `-p`. Default = 8888\n`KG_PORT_RETRIES`: specifies the number of retries due to port conflicts that will be attempted. Default = 0\n\n`EG_REMOTE_HOSTS`: specifies a comma-separated lists of hostnames which can be used to run YARN-client kernels. Default = <container-hostname>\n`EG_YARN_ENDPOINT`: specifies the HTTP endpoint of the YARN Resource Manager. Default = http://<hostname>:8088/ws/v1/cluster}\n`EG_SSH_PORT=`: specifies the port of the SSH server. This container is setup to use port `2122`. This value should not be changed. Default = 2122\n\n`EG_ENABLE_TUNNELING`: specifies whether port tunneling will be used. This value is currently `False` because ssh tunneling is not working unless Enterprise Gateway is run as the root user. This can be accomplished by starting the container with `bash` as the command and running `start-enterprise-gateway.sh` directly (sans `sudo`).\n\nNOTE: Dual Mode functionality is only available in tags 0.9.0+\n"
  },
  {
    "path": "etc/docker/enterprise-gateway-demo/bootstrap-enterprise-gateway.sh",
    "content": "#!/bin/bash\n\n# This file is a copy of /etc/bootstrap.sh but invokes Jupyter Enterprise Gateway in its \"deamon\" case.\n# It also checks for --help or no options before starting anything...\n\n\nCMD=${1:-\"--help\"}\nif [[ \"$CMD\" == \"--help\" ]]; then\n\techo \"\"\n\techo \"usage: docker run -it[d] --rm -h <container-hostname> -p 8888:8888 [-p 8088:8088 -p 8042:8042] <docker-opts> <docker-image> <command>\"\n\techo \"\"\n\techo \"where <command> is:\"\n\techo \"    --gateway ... Invokes Enterprise Gateway as user 'jovyan' directly.  Useful for daemon behavior.\"\n\techo \"    --yarn  ... Runs container as standalone YARN master - no Enterprise Gateway is started.\"\n\techo \"    --help  ... Produces this message.\"\n\techo \"    <other> ... Invokes '<other>'.  Use <other>='/bin/bash' to explore within the container.\"\n\techo \"\"\n\techo \"Tips:\"\n\techo \"1) You can target a different YARN cluster by using '-e YARN_HOST=<myOtherYarnMaster>'\"\n\techo \"2) You can \\\"bring your own kernels\\\" by mounting to /tmp/byok/kernels (e.g., -v my-kernels-dir:/tmp/byok/kernels)\"\n\techo \"3) It is advised that port '8888' be mapped to a host port, although the host port number is not\"\n\techo \"   required to be '8888'.  Mapping of ports '8088' and '8042' is also strongly recommended\"\n\techo \"   for YARN application monitoring if running standalone.\"\n\texit 0\nelif [[ \"$CMD\" != \"--gateway\" && \"$CMD\" != \"--yarn\" ]]; then  # invoke <other> w/o starting YARN\n    \"$*\"\n    exit 0\nfi\n\n: ${YARN_HOST:=$HOSTNAME}\nexport FROM=\"EG\"\n/usr/local/bin/bootstrap-yarn-spark.sh $*\n\n# Note that '--yarn' functionality is a subset of '--gateway' functionality\n\nif [[ \"$CMD\" == \"--gateway\" ]];\nthen\n    sudo sed -i \"s/HOSTNAME/$YARN_HOST/\" /usr/local/bin/start-enterprise-gateway.sh\n    /usr/local/bin/start-enterprise-gateway.sh\nfi\n\nexit 0\n"
  },
  {
    "path": "etc/docker/enterprise-gateway-demo/start-enterprise-gateway.sh.template",
    "content": "#!/bin/bash\n\n# Allow for mounts of kernelspecs to /tmp/byok/kernels\nexport JUPYTER_PATH=${JUPYTER_PATH:-/tmp/byok}\n\n# Enterprise Gateway variables\nexport EG_REMOTE_HOSTS=${EG_REMOTE_HOSTS:-HOSTNAME}\nexport EG_SSH_PORT=${EG_SSH_PORT:-2122}\nexport KG_IP=${KG_IP:-0.0.0.0}\nexport KG_PORT=${KG_PORT:-8888}\nexport KG_PORT_RETRIES=${KG_PORT_RETRIES:-0}\n\n# To use tunneling set this variable to 'True' and run as root.\nexport EG_ENABLE_TUNNELING=${EG_ENABLE_TUNNELING:-False}\n\nexport EG_LOG_LEVEL=${EG_LOG_LEVEL:-DEBUG}\nexport EG_CULL_IDLE_TIMEOUT=${EG_CULL_IDLE_TIMEOUT:-43200}  # default to 12 hours\nexport EG_CULL_CONNECTED=${EG_CULL_CONNECTED:-True}\n\necho \"Starting Jupyter Enterprise Gateway...\"\n\njupyter enterprisegateway \\\n\t--log-level=${EG_LOG_LEVEL} \\\n\t--EnterpriseGatewayApp.inherited_envs=PYSPARK_PYTHON \\\n\t--MappingKernelManager.cull_idle_timeout=${EG_CULL_IDLE_TIMEOUT} \\\n\t--MappingKernelManager.cull_interval=30 \\\n\t--MappingKernelManager.cull_connected=${EG_CULL_CONNECTED} 2>&1 | tee /usr/local/share/jupyter/enterprise-gateway.log\n"
  },
  {
    "path": "etc/docker/kernel-image-puller/Dockerfile",
    "content": "ARG BASE_CONTAINER=python:3.10-bookworm\nFROM $BASE_CONTAINER\n\nWORKDIR /usr/src/app\n\nCOPY requirements.txt ./\nRUN pip install --no-cache-dir -r requirements.txt\n\nCOPY kernel_image_puller.py ./\nCOPY image_fetcher.py ./\nARG OS=Debian_12\n\n# Install crictl for use by KIP when non-docker installations are encountered.\nRUN mkdir -p /etc/apt/keyrings && \\\n    curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubic.gpg && \\\n    echo \"deb [signed-by=/etc/apt/keyrings/kubic.gpg] https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/${OS}/ /\" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list && \\\n    apt-get update && apt-get install -y cri-tools\n\nRUN echo $PATH\n# The following environment variables are supported - defaults provided.  Override as needed.\nENV KIP_GATEWAY_HOST=http://localhost:8888\nENV KIP_INTERVAL=300\nENV KIP_LOG_LEVEL=INFO\nENV KIP_NUM_PULLERS=2\nENV KIP_NUM_RETRIES=3\nENV KIP_PULL_POLICY='IfNotPresent'\n\nCMD [ \"python\", \"./kernel_image_puller.py\" ]\n"
  },
  {
    "path": "etc/docker/kernel-image-puller/README.md",
    "content": "This image is responsible for contacting the configured [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) instance within a Kubernetes or Docker Swarm cluster and pulling the set of kernel-based images to the node on which it is running.\n\n# What it Gives You\n\n- The ability to add new nodes and have kernel images on those nodes automatically populated.\n- The ability to configure new kernelspecs that use different images and have those images pulled to all cluster nodes.\n\n# Basic Use\n\nDeploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment.\n\nAs part of that deployment, Kernel Image Puller (KIP) will be launched on each node. On Kubernetes, this will be accomplished via a DaemonSet. On Docker Swarm, it will be via a global service. KIP will then contact the configured Enterprise Gateway instance, fetch the set of in-use kernelspecs, parse out the image names and pull those images.\n\nThere are a few points of configuration listed below - all of which are environment variables (defaults in parenthesis).\n\n- `KIP_GATEWAY_HOST` (`http://localhost:8888`)\n- `KIP_INTERVAL` (`300`)\n- `KIP_LOG_LEVEL` (`INFO`)\n- `KIP_NUM_PULLERS` (`2`)\n- `KIP_NUM_RETRIES` (`3`)\n- `KIP_PULL_POLICY` (`IfNotPresent`)\n- `KIP_IMAGE_FETCHER` (`KernelSpecsFetcher`)\n\nFor more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/).\n"
  },
  {
    "path": "etc/docker/kernel-image-puller/image_fetcher.py",
    "content": "\"\"\"image name fetcher abstract class and concrete implementation\"\"\"\n\nimport abc\nimport importlib\nimport os\n\nimport requests\nimport yaml\nfrom kubernetes import client, config\nfrom kubernetes.client import ApiException\n\n\nclass ImageNameFetcher(metaclass=abc.ABCMeta):\n    \"\"\"\n    abstract class to extend for fetch image names\n    \"\"\"\n\n    @abc.abstractmethod\n    def fetch_image_names(self) -> set[str]:\n        \"\"\"\n        Abstract method to fetch image names.\n\n        :return: A set of image names.\n        \"\"\"\n        pass\n\n\nclass KernelSpecsFetcher(ImageNameFetcher):\n    \"\"\"Fetches the image names by hitting the /api/kernelspecs endpoint of the Gateway.\n\n    For process-proxy kernelspecs, the image names are contained in the config stanza - which\n    resides in the process-proxy stanza located in the metadata.\n    \"\"\"\n\n    def __init__(self, logger):\n        \"\"\"\n        KIP_AUTH_TOKEN: enterprise-gateway auth token\n        KIP_GATEWAY_HOST: enterprise-gateway host\n        KIP_VALIDATE_CERT: validate cert or not\n        \"\"\"\n        self.logger = logger\n        self.auth_token = os.getenv(\"KIP_AUTH_TOKEN\", None)\n        self.gateway_host = os.getenv(\"KIP_GATEWAY_HOST\", \"http://localhost:18888\")\n        self.validate_cert = os.getenv(\"KIP_VALIDATE_CERT\", \"False\").lower() == \"true\"\n\n    def get_kernel_specs(self):\n        \"\"\"Fetches the set of kernelspecs from the gateway, returning a dict of configured kernel specs\"\"\"\n        end_point = f\"{self.gateway_host}/api/kernelspecs\"\n        self.logger.info(f\"Fetching kernelspecs from '{end_point}' ...\")\n        headers = {\"Content-Type\": \"application/json\"}\n        if self.auth_token:\n            end_point += f\"?token={self.auth_token}\"\n            headers.update({\"Authorization\": f\"token {self.auth_token}\"})\n        resp = requests.get(end_point, headers=headers, verify=self.validate_cert, timeout=60)\n        if not resp.ok:\n            msg = f\"Gateway server response: {resp.status_code}\"\n            raise requests.exceptions.HTTPError(msg)\n        return resp.json()\n\n    def fetch_image_names(self) -> set[str]:\n        \"\"\"\n        fetch image names from enterprise gateway kernelspecs\n        \"\"\"\n        k_specs = None\n        try:\n            k_specs_response = self.get_kernel_specs()\n            k_specs = k_specs_response.get(\"kernelspecs\")\n        except Exception as ex:\n            self.logger.error(\n                f\"Got exception attempting to retrieve kernelspecs - retrying. Exception was: {ex}\"\n            )\n\n        if k_specs is None:\n            return None\n\n        # Locate the configured images within the kernel_specs and add to set for duplicate management\n        images = set()\n        for key in k_specs:\n            metadata = k_specs.get(key).get(\"spec\").get(\"metadata\")\n            if metadata is not None:\n                config_parent = metadata.get(\"process_proxy\")\n                if config_parent is None:  # See if this is a provisioner\n                    config_parent = metadata.get(\"kernel_provisioner\")\n                if config_parent is not None:\n                    config = config_parent.get(\"config\")\n                    if config is not None:\n                        image_name = config.get(\"image_name\")\n                        if image_name is not None:\n                            images.add(image_name)\n                        executor_image_name = config.get(\"executor_image_name\")\n                        if executor_image_name is not None:\n                            images.add(executor_image_name)\n        return images\n\n\nclass StaticListFetcher(ImageNameFetcher):\n    \"\"\"\n    A class for fetching image names from a static list of images provided by an environment variable.\n\n    Inherits from `ImageNameFetcher`, which defines a `fetch_images()` method that must be implemented.\n\n    This class reads the `KIP_IMAGES` environment variable, which should be a comma-separated list of image names.\n    It then splits the list into individual image names and returns them as a set.\n\n    Attributes:\n        logger (logging.Logger): The logger to use for logging messages.\n\n    Methods:\n        fetch_images(): Fetches image names from the `KIP_IMAGES` environment variable and returns them as a set.\n    \"\"\"\n\n    def __init__(self, logger) -> None:\n        \"\"\"\n        init method\n        \"\"\"\n        self.logger = logger\n\n    def fetch_image_names(self) -> set[str]:\n        \"\"\"\n        KIP_IMAGES: comma seperated list of image names\n        \"\"\"\n        images = os.getenv(\"KIP_IMAGES\", \"\").split(\",\")\n        return set(images)\n\n\nclass ConfigMapImagesFetcher(ImageNameFetcher):\n    \"\"\"\n    A class for fetching image names from a Kubernetes ConfigMap.\n\n    Inherits from `ImageNameFetcher`, which defines a `fetch_images()` method that must be implemented.\n\n    This class reads the `KIP_CM_NAMESPACE`, `KIP_CM_NAME`, and `KIP_CM_KEY_NAME` environment variables to determine\n    the namespace, name, and key name of the ConfigMap containing the image names. It then reads the specified\n    ConfigMap and extracts the image names from the specified key, which should be a YAML list of image names.\n\n    Attributes:\n        logger (logging.Logger): The logger to use for logging messages.\n        namespace (str): The namespace containing the ConfigMap.\n        name (str): The name of the ConfigMap.\n        key_name (str): The name of the key containing the YAML list of image names.\n\n    Methods:\n        fetch_images(): Fetches image names from the specified ConfigMap and key and returns them as a set.\n    \"\"\"\n\n    def __init__(self, logger) -> None:\n        \"\"\"\n        Initializes a new instance of the class with the specified logger and environment variables.\n        KIP_CM_NAMESPACE: namespace the configmap is in\n        KIP_CM_NAME: the name of the config map\n        KIP_CM_KEY_NAME: the key name\n        \"\"\"\n        self.logger = logger\n        self.namespace = os.getenv(\"KIP_CM_NAMESPACE\", \"enterprise-gateway\")\n        self.name = os.getenv(\"KIP_CM_NAME\", \"kernel-images\")\n        self.key_name = os.getenv(\"KIP_CM_KEY_NAME\", \"image-names\")\n\n    def fetch_image_names(self) -> set[str]:\n        \"\"\"\n        fetch image names by parsing the configmap\n        this will load the in-cluster context, your service account of the pod should have access to get configmap\n        \"\"\"\n        config.load_incluster_config()\n        v1 = client.CoreV1Api()\n        config_map = None\n        try:\n            config_map = v1.read_namespaced_config_map(name=self.name, namespace=self.namespace)\n        except ApiException as e:\n            if e.status == 404:\n                self.logger.error(f\"ConfigMap {self.name} not found in namespace {self.namespace}\")\n            else:\n                # Handle other ApiException errors\n                self.logger.error(f\"Error retrieving ConfigMap: {e}\")\n        if config_map and self.key_name in config_map.data:\n            images = config_map.data[self.key_name]\n            image_list = []\n            try:\n                image_list = yaml.safe_load(images)\n            except yaml.YAMLError as e:\n                self.logger(\"Error parsing YAML:\", e)\n            return image_list\n        return []\n\n\nclass CombinedImagesFetcher(ImageNameFetcher):\n    \"\"\"\n    A class for fetching image names from multiple fetchers.\n    Inherits from `ImageNameFetcher`, which defines a `fetch_images()` method that must be implemented.\n\n    This class initializes a list of fetchers based on the `KIP_INTERNAL_FETCHERS` environment variable, which should\n    be a comma-separated list of fetcher class names. It then calls the `fetch_images()` method on each fetcher and\n    combines the results into a set of unique image names.\n\n    Attributes:\n        logger (logging.Logger): The logger to use for logging messages.\n        fetchers (list): A list of fetchers to use for fetching image names.\n\n    Methods:\n        fetch_images(): Fetches image names from all fetchers and returns them as a set.\n    \"\"\"\n\n    def __init__(self, logger):\n        \"\"\"\n        KIP_INTERNAL_FETCHERS: fetchers used internally to get image names\n        \"\"\"\n        self.logger = logger\n        fetcher_names = os.getenv(\"KIP_INTERNAL_FETCHERS\", \"KernelSpecsFetcher\").split(',')\n        self.fetchers = []\n        module = importlib.import_module(\"image_fetcher\")\n        args = (logger,)\n        for f in fetcher_names:\n            fetcher = getattr(module, f)(*args)\n            self.fetchers.append(fetcher)\n\n    def fetch_image_names(self) -> set[str]:\n        \"\"\"\n        fetch image names from internal fetchers\n        \"\"\"\n        images = set()\n        for f in self.fetchers:\n            images.update(f.fetch_image_names())\n        return images\n"
  },
  {
    "path": "etc/docker/kernel-image-puller/kernel_image_puller.py",
    "content": "\"\"\"A kernel image puller.\"\"\"\n\nimport importlib\nimport logging\nimport os\nimport queue\nimport time\nfrom subprocess import CalledProcessError, run\nfrom threading import Thread\nfrom typing import List, Optional\n\nfrom docker.client import DockerClient\nfrom docker.errors import NotFound\n\n# initialize root logger\nlogging.basicConfig(format=\"[%(levelname)1.1s %(asctime)s %(name)s.%(threadName)s] %(message)s\")\nlog_level = os.getenv(\"KIP_LOG_LEVEL\", \"INFO\")\n\n\nclass KernelImagePuller:\n    \"\"\"A kernel image puller.\"\"\"\n\n    POLICY_IF_NOT_PRESENT = \"IfNotPresent\"\n    POLICY_ALWAYS = \"Always\"\n    policies = (POLICY_IF_NOT_PRESENT, POLICY_ALWAYS)\n\n    DOCKER_CLIENT = \"docker\"\n    CONTAINERD_CLIENT = \"containerd\"\n    supported_container_runtimes = (DOCKER_CLIENT, CONTAINERD_CLIENT)\n\n    def __init__(self, kip_logger, image_fetcher):\n        \"\"\"Initialize the puller.\"\"\"\n        self.interval = None\n        self.container_runtime = None\n        self.runtime_endpoint = None\n        self.default_container_registry = None\n        self.log = kip_logger\n        self.worker_queue = None\n        self.threads = []\n        self.pulled_images = set()\n        self.num_pullers = None\n        self.num_retries = None\n        self.policy = None\n        self.image_fetcher = image_fetcher\n        self.load_static_env_values()\n\n    def load_static_env_values(self):\n        \"\"\"Load the static environment values.\"\"\"\n        self.num_pullers = int(os.getenv(\"KIP_NUM_PULLERS\", \"2\"))\n        self.num_retries = int(os.getenv(\"KIP_NUM_RETRIES\", \"3\"))\n        self.policy = os.getenv(\"KIP_PULL_POLICY\", KernelImagePuller.POLICY_IF_NOT_PRESENT)\n        self.default_container_registry = os.getenv(\"KIP_DEFAULT_CONTAINER_REGISTRY\", \"\")\n        self.runtime_endpoint = os.getenv(\n            \"KIP_CRI_ENDPOINT\", \"unix:///run/containerd/containerd.sock\"\n        )\n        self.container_runtime = self.get_container_runtime()\n        # Add authentication token support to KIP\n        self.interval = int(os.getenv(\"KIP_INTERVAL\", \"300\"))\n\n        if self.policy not in KernelImagePuller.policies:\n            logger.warning(\n                f\"Invalid pull policy detected in KIP_PULL_POLICY: '{self.policy}'.  \"\n                f\"Using policy '{KernelImagePuller.POLICY_IF_NOT_PRESENT}'.\"\n            )\n            self.policy = KernelImagePuller.POLICY_IF_NOT_PRESENT\n\n        logger.info(\"Starting Kernel Image Puller with the following parameters:\")\n        logger.info(f\"KIP_INTERVAL: {self.interval} secs\")\n        logger.info(f\"KIP_NUM_PULLERS: {self.num_pullers}\")\n        logger.info(f\"KIP_NUM_RETRIES: {self.num_retries}\")\n        logger.info(f\"KIP_PULL_POLICY: {self.policy}\")\n        logger.info(f\"KIP_LOG_LEVEL: {log_level}\")\n        # logger.info(f\"KIP_AUTH_TOKEN: {self.auth_token}\")  # Do not print\n        logger.info(f\"KIP_DEFAULT_CONTAINER_REGISTRY: '{self.default_container_registry}'\")\n        logger.info(f\"KIP_CRI_ENDPOINT: {self.runtime_endpoint}\")\n\n        if self.is_runtime_endpoint_recognized():\n            logger.info(f\"Detected container runtime: {self.container_runtime}\")\n        else:\n            logger.warning(\n                f\"This node's container runtime interface could not be detected from \"\n                f\"endpoint: {self.runtime_endpoint}, proceeding with {self.container_runtime} client...\"\n            )\n\n    def start(self):\n        \"\"\"Start the puller.\"\"\"\n        self.log.info(\"Starting Kernel Image Puller process.\")\n        self.initialize_workers()\n        wait_interval = 5  # Start with 5 seconds to ensure EG service gets started...\n        time.sleep(wait_interval)\n        # Fetch the image names, then wait for name queue to drain.  Once drained, or if there were issues\n        # fetching the image names, wait the interval number of seconds and perform the operation again.\n        while True:\n            fetched = self.fetch_image_names()\n            if fetched:\n                # Once we have fetched kernelspecs, update wait_interval\n                wait_interval = self.interval\n                self.worker_queue.join()\n            elif not self.is_runtime_endpoint_recognized():\n                # Increase the interval since we shouldn't pound the service for kernelspecs\n                wait_interval = self.interval\n\n            logger.info(f\"Sleeping {wait_interval} seconds to fetch image names...\\n\")\n            time.sleep(wait_interval)\n\n    def initialize_workers(self):\n        \"\"\"Initialize the workers.\"\"\"\n        self.worker_queue = queue.Queue()\n        for i in range(self.num_pullers):\n            t = Thread(target=self.image_puller, name=f\"t{(i + 1)}\")\n            t.start()\n            self.threads.append(t)\n\n    def get_container_runtime(self) -> Optional[str]:\n        \"\"\"Determine the container runtime from the KIP_CRI_ENDPOINT env.\"\"\"\n\n        if KernelImagePuller.DOCKER_CLIENT in self.runtime_endpoint:\n            return KernelImagePuller.DOCKER_CLIENT\n\n        # This will essentially be the default to use in case we don't recognized the endpoint.\n        return KernelImagePuller.CONTAINERD_CLIENT\n\n    def is_runtime_endpoint_recognized(self) -> bool:\n        \"\"\"Check if the runtime endpoint is recognized.\"\"\"\n        return (\n            KernelImagePuller.DOCKER_CLIENT in self.runtime_endpoint\n            or KernelImagePuller.CONTAINERD_CLIENT in self.runtime_endpoint\n        )\n\n    def fetch_image_names(self):\n        \"\"\"\n        Fetches image names and adds them to a worker queue for processing.\n        Returns:\n            bool: True if at least one image name was found and added to the worker queue, False otherwise.\n        \"\"\"\n        # Locate the configured image_names within the kernel_specs and add to set for duplicate management\n        image_names = self.image_fetcher.fetch_image_names()\n\n        if not image_names:\n            return False\n\n        # Add the image names to the name queue to be pulled\n        for image_name in image_names:\n            self.worker_queue.put_nowait(image_name)\n        return True\n\n    def image_puller(self):\n        \"\"\"Thread-based puller.\n\n        Gets image name from the queue and attempts to pull the image. Any issues, except\n        for NotFound, are retried up to num_retries times. Once the image has been pulled, it's not found or the\n        retries have been exceeded, the queue task is marked as done.\n        \"\"\"\n        while True:\n            logger.debug(\"Waiting for new image to pull\")\n            image_name = self.worker_queue.get()\n            self.log.info(f\"Task received to pull image: {image_name}\")\n            if image_name is None:\n                break\n            i = 0\n            while i < self.num_retries:\n                try:\n                    self.pull_image(image_name)\n                    break\n                except Exception as ex:\n                    i += 1\n                    if i < self.num_retries:\n                        logger.warning(\n                            f\"Attempt {i} to pull image '{image_name}' encountered exception - retrying.  \"\n                            f\"Exception was: {ex}.\"\n                        )\n                    else:\n                        logger.error(\n                            f\"Attempt {i} to pull image '{image_name}' failed with exception: {ex}\"\n                        )\n            self.worker_queue.task_done()\n\n    def pull_image(self, image_name):\n        \"\"\"Pulls the image.\n\n        If the policy is `IfNotPresent` the set of pulled image names is\n        checked and, if present, the method returns.  Otherwise, the pull attempt is made\n        and the set of pulled images is updated, when successful.\n        \"\"\"\n        if self.policy == KernelImagePuller.POLICY_IF_NOT_PRESENT:\n            if image_name in self.pulled_images:\n                # Image has been pulled, but make sure it still exists.  If it doesn't exist\n                # let this drop through to actual pull\n                logger.info(\n                    f\"Image '{image_name}' already pulled and policy is '{self.policy}'.  Checking existence.\"\n                )\n                if self.image_exists(image_name):\n                    return\n                self.pulled_images.remove(image_name)\n                logger.warning(\n                    f\"Previously pulled image '{image_name}' was not found - attempting pull...\"\n                )\n            elif self.image_exists(image_name):  # Yet to be pulled, consider pulled if exists\n                policy = self.policy\n                logger.info(\n                    f\"Image '{image_name}' has not been pulled but exists, and policy is '{policy}'. Skipping pull.\"\n                )\n                self.pulled_images.add(image_name)\n                return\n\n        logger.info(f\"Pulling image '{image_name}'...\")\n        if self.download_image(image_name):\n            self.pulled_images.add(image_name)\n        else:\n            logger.warning(f\"Image '{image_name}' was not downloaded!\")\n\n    def get_absolute_image_name(self, image_name: str) -> str:\n        \"\"\"Ensures the image name is prefixed with a \"registry\".\"\"\"\n        # We will check for the form 'registry/repo/image:tag' if the 'registry/' prefix\n        # is missing (based on the absence of two slashes), then we'll prefix the image\n        # name with the KIP_DEFAULT_CONTAINER_REGISTRY env value.\n        image_pieces = image_name.split(\"/\")\n        # we're missing a registry specifier, use default if present\n        if len(image_pieces) < 3 and self.default_container_registry:\n            return f\"{self.default_container_registry}/{image_name}\"\n        return image_name  # take our chances\n\n    def image_exists(self, image_name: str) -> bool:\n        \"\"\"Checks for the existence of the named image using the configured container runtime.\"\"\"\n        result = True\n        absolute_image_name = self.get_absolute_image_name(image_name)\n        t0 = time.time()\n        if self.container_runtime == KernelImagePuller.DOCKER_CLIENT:\n            try:\n                DockerClient.from_env().images.get(absolute_image_name)\n            except NotFound:\n                result = False\n        elif self.container_runtime == KernelImagePuller.CONTAINERD_CLIENT:\n            argv = [\"crictl\", \"-r\", self.runtime_endpoint, \"inspecti\", \"-q\", absolute_image_name]\n            result = self.execute_cmd(argv)\n        else:  # invalid container runtime\n            logger.error(f\"Invalid container runtime detected: '{self.container_runtime}'!\")\n            result = False\n        t1 = time.time()\n        logger.debug(\n            f\"Checked existence of image '{image_name}' in {(t1 - t0):.3f} secs.  exists = {result}\"\n        )\n        return result\n\n    def download_image(self, image_name: str) -> bool:\n        \"\"\"Downloads (pulls) the named image using the configured container runtime.\"\"\"\n        result = True\n        absolute_image_name = self.get_absolute_image_name(image_name)\n        t0 = time.time()\n        if self.container_runtime == KernelImagePuller.DOCKER_CLIENT:\n            try:\n                DockerClient.from_env().images.pull(absolute_image_name)\n            except NotFound:\n                result = False\n        elif self.container_runtime == KernelImagePuller.CONTAINERD_CLIENT:\n            argv = [\"crictl\", \"-r\", self.runtime_endpoint, \"pull\", absolute_image_name]\n            result = self.execute_cmd(argv)\n        else:  # invalid container runtime\n            logger.error(f\"Invalid container runtime detected: '{self.container_runtime}'!\")\n            result = False\n        t1 = time.time()\n        if result is True:\n            logger.info(f\"Pulled image '{image_name}' in {(t1 - t0):.3f} secs.\")\n        return result\n\n    def execute_cmd(self, argv: List[str]) -> bool:\n        \"\"\"Execute the given command expressed in 'argv'. If expected_output is provided it\n\n        will be checked against the command's stdout after stripping off the '\\n' character.\n        \"\"\"\n        result = True\n        try:\n            run(argv, capture_output=True, text=True, check=True)\n        except CalledProcessError as cpe:\n            error_msg = cpe.stderr[:-1]  # strip off trailing newline\n            logger.error(f\"Error executing {' '.join(argv)}: {error_msg}\")\n            result = False\n        except Exception as ex:\n            logger.error(f\"Error executing {' '.join(argv)}: {ex}\")\n            result = False\n        return result\n\n\nif __name__ == \"__main__\":\n    logger = logging.getLogger(\"kernel_image_puller\")\n    logger.setLevel(log_level)\n    logger.info(\"Loading KernelImagePuller...\")\n    fetcher_class_name = os.getenv('KIP_IMAGE_FETCHER', 'KernelSpecsFetcher')\n    args = (logger,)\n    module = importlib.import_module(\"image_fetcher\")\n    fetcher = getattr(module, fetcher_class_name)(*args)\n    kip = KernelImagePuller(logger, fetcher)\n    kip.start()\n"
  },
  {
    "path": "etc/docker/kernel-image-puller/requirements.txt",
    "content": "docker>=3.7.2\nkubernetes>=17.17.0\nrequests>=2.7,<3.0\n"
  },
  {
    "path": "etc/docker/kernel-py/Dockerfile",
    "content": "# Ubuntu 18.04.1 LTS Bionic\nARG BASE_CONTAINER=jupyter/scipy-notebook:2023-03-13\nFROM $BASE_CONTAINER\n\nENV PATH=$PATH:$CONDA_DIR/bin\n\n# Add debugger support\nRUN pip install --upgrade ipykernel\n\nRUN conda install --quiet --yes \\\n    cffi \\\n    future \\\n    pycryptodomex && \\\n    conda clean --all && \\\n    fix-permissions $CONDA_DIR && \\\n    fix-permissions /home/$NB_USER\n\nADD jupyter_enterprise_gateway_kernel_image_files*.tar.gz /usr/local/bin/\n\nUSER root\n\nRUN apt-get update && apt-get install -yq --no-install-recommends \\\n    libkrb5-dev \\\n    && rm -rf /var/lib/apt/lists/*\n\nRUN chown jovyan:users /usr/local/bin/bootstrap-kernel.sh && \\\n\tchmod 0755 /usr/local/bin/bootstrap-kernel.sh && \\\n\tchown -R jovyan:users /usr/local/bin/kernel-launchers\n\nUSER jovyan\n\nENV KERNEL_LANGUAGE=python\n\n# Disble healthcheck inherited from notebook image\nHEALTHCHECK NONE\n\nCMD /usr/local/bin/bootstrap-kernel.sh\n"
  },
  {
    "path": "etc/docker/kernel-py/README.md",
    "content": "This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster. It is built on [jupyter/scipy-notebook](https://hub.docker.com/r/jupyter/scipy-notebook/).\n\n# What it Gives You\n\n- IPython kernel support (with debugger)\n- [Data science libraries](https://jupyter-docker-stacks.readthedocs.io/en/latest/using/selecting.html#jupyter-scipy-notebook)\n\n# Basic Use\n\nDeploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment.\n\nLaunch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook.\n\nFor more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/).\n"
  },
  {
    "path": "etc/docker/kernel-r/Dockerfile",
    "content": "# Ubuntu 18.04.1 LTS Bionic\nARG BASE_CONTAINER=quay.io/jupyter/r-notebook:r-4.5.2\nFROM $BASE_CONTAINER\n\nRUN conda install --quiet --yes \\\n    'r-argparse' \\\n    pycryptodomex && \\\n    conda clean --all && \\\n    fix-permissions $CONDA_DIR\n\nADD jupyter_enterprise_gateway_kernel_image_files*.tar.gz /usr/local/bin/\n\n# Switch back to root to modify ownerships\nUSER root\n\nRUN apt-get update && apt-get install -y \\\n    less \\\n    curl \\\n    libkrb5-dev \\\n    && rm -rf /var/lib/apt/lists/*\n\nRUN chown jovyan:users /usr/local/bin/bootstrap-kernel.sh && \\\n\tchmod 0755 /usr/local/bin/bootstrap-kernel.sh && \\\n\tchown -R jovyan:users /usr/local/bin/kernel-launchers\n\nUSER jovyan\n\nENV KERNEL_LANGUAGE=R\n\n# Disble healthcheck inherited from notebook image\nHEALTHCHECK NONE\n\nCMD /usr/local/bin/bootstrap-kernel.sh\n"
  },
  {
    "path": "etc/docker/kernel-r/README.md",
    "content": "This image enables the use of an IRKernel kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster. It is currently built on [jupyter/r-notebook](https://hub.docker.com/r/jupyter/r-notebook/).\n\n# What it Gives You\n\n- IRKernel kernel support\n\n# Basic Use\n\nDeploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment.\n\nLaunch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook.\n\nFor more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/).\n"
  },
  {
    "path": "etc/docker/kernel-scala/Dockerfile",
    "content": "ARG HUB_ORG\nARG SPARK_VERSION\n\n# TODO: Restore usage of SPARK_VERSION ARG once https://github.com/jupyter/enterprise_gateway/pull/867 is merged\nARG BASE_CONTAINER=$HUB_ORG/spark:v$SPARK_VERSION\nFROM $BASE_CONTAINER\n\nADD jupyter_enterprise_gateway_kernel_image_files*.tar.gz /usr/local/bin/\n\nUSER root\n\n# Create/setup the jovyan system user\nRUN adduser --system -uid 1000 jovyan --ingroup users && \\\n    chown jovyan:users /usr/local/bin/bootstrap-kernel.sh && \\\n\tchmod 0755 /usr/local/bin/bootstrap-kernel.sh && \\\n\tchmod 0777 /opt/spark/work-dir && \\\n    chown -R jovyan:users /usr/local/bin/kernel-launchers\n\nUSER jovyan\nENV KERNEL_LANGUAGE=scala\nCMD /usr/local/bin/bootstrap-kernel.sh\n"
  },
  {
    "path": "etc/docker/kernel-scala/README.md",
    "content": "This image enables the use of a Scala ([Apache Toree](https://toree.apache.org/)) kernel launched from [Jupyter Enterprise Gateway](http://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster. It is built on [elyra/spark:v2.4.6](https://hub.docker.com/r/elyra/spark/) deriving from the [Apache Spark 2.4.6 release](https://spark.apache.org/docs/2.4.6/). Note: The ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results.\n\n# What it Gives You\n\n- Scala (Toree) kernel support\n- Spark on kubernetes support from within a Jupyter Notebook\n\n# Basic Use\n\nDeploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment.\n\nLaunch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook.\n\nFor more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/).\n"
  },
  {
    "path": "etc/docker/kernel-spark-py/Dockerfile",
    "content": "ARG HUB_ORG\nARG TAG\n\n# Ubuntu 18.04.1 LTS Bionic\nARG BASE_CONTAINER=$HUB_ORG/kernel-py:$TAG\nFROM $BASE_CONTAINER\n\nARG SPARK_VERSION\n\nENV SPARK_VER=$SPARK_VERSION\nENV SPARK_HOME=/opt/spark\nENV KERNEL_LANGUAGE=python\nENV R_LIBS_USER=$R_LIBS_USER:${SPARK_HOME}/R/lib\nENV PATH=$PATH:$SPARK_HOME/bin\n\nUSER root\n\nRUN dpkg --purge --force-depends ca-certificates-java \\\n    && apt-get update \\\n    && apt-get install -yq --no-install-recommends \\\n    ca-certificates \\\n    ca-certificates-java \\\n    openjdk-8-jdk \\\n    less \\\n    curl \\\n    libssl-dev \\\n    && rm -rf /var/lib/apt/lists/*\n\nENV JAVA_HOME=/usr/lib/jvm/java\nRUN ln -s $(readlink -f /usr/bin/javac | sed \"s:/bin/javac::\") ${JAVA_HOME}\n\n# Download and install Spark\nRUN curl -s https://archive.apache.org/dist/spark/spark-${SPARK_VER}/spark-${SPARK_VER}-bin-hadoop2.7.tgz | \\\n    tar -xz -C /opt && \\\n    ln -s ${SPARK_HOME}-${SPARK_VER}-bin-hadoop2.7 $SPARK_HOME\n\n# Download entrypoint.sh from matching tag\nRUN cd /opt/ && \\\n    wget https://raw.githubusercontent.com/apache/spark/v${SPARK_VER}/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/entrypoint.sh && \\\n    chmod a+x /opt/entrypoint.sh && \\\n    sed -i 's/tini -s/tini -g/g' /opt/entrypoint.sh\n\nWORKDIR $SPARK_HOME/work-dir\n# Ensure that work-dir is writable by everyone\nRUN chmod 0777 $SPARK_HOME/work-dir\n\nENTRYPOINT [ \"/opt/entrypoint.sh\" ]\n\nUSER jovyan\n"
  },
  {
    "path": "etc/docker/kernel-spark-py/README.md",
    "content": "This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes cluster. It is built on the base image [elyra/kernel-py](https://hub.docker.com/r/elyra/kernel-py/), and adds [Apache Spark 2.4.6](https://spark.apache.org/docs/2.4.6/). Note: The ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results.\n\n# What it Gives You\n\n- IPython kernel support (with debugger)\n- [Data science libraries](https://jupyter-docker-stacks.readthedocs.io/en/latest/using/selecting.html#jupyter-scipy-notebook)\n- Spark on kubernetes support from within a Jupyter Notebook\n\n# Basic Use\n\nDeploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment.\n\nLaunch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook.\n\nFor more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/).\n"
  },
  {
    "path": "etc/docker/kernel-spark-r/Dockerfile",
    "content": "ARG HUB_ORG\nARG TAG\n\nARG BASE_CONTAINER=$HUB_ORG/kernel-r:$TAG\nFROM $BASE_CONTAINER\n\nARG SPARK_VERSION\n\nUSER root\n\nENV SPARK_VER=$SPARK_VERSION\nENV SPARK_HOME=/opt/spark\nENV KERNEL_LANGUAGE=R\nENV R_LIBS_USER=$R_LIBS_USER:${R_HOME}/library:${SPARK_HOME}/R/lib\nENV PATH=$PATH:$SPARK_HOME/bin\n\nRUN dpkg --purge --force-depends ca-certificates-java \\\n    && apt-get update \\\n    && apt-get install -y \\\n    ca-certificates \\\n    ca-certificates-java \\\n    openjdk-8-jdk \\\n    libssl-dev \\\n    && rm -rf /var/lib/apt/lists/*\n\nENV JAVA_HOME=/usr/lib/jvm/java\nRUN ln -s $(readlink -f /usr/bin/javac | sed \"s:/bin/javac::\") ${JAVA_HOME}\n\n# Download and install Spark\nRUN curl -s https://archive.apache.org/dist/spark/spark-${SPARK_VER}/spark-${SPARK_VER}-bin-hadoop2.7.tgz | \\\n    tar -xz -C /opt && \\\n    ln -s ${SPARK_HOME}-${SPARK_VER}-bin-hadoop2.7 $SPARK_HOME\n\n# Download entrypoint.sh from matching tag\nRUN cd /opt/ && \\\n    wget https://raw.githubusercontent.com/apache/spark/v${SPARK_VER}/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/entrypoint.sh && \\\n    chmod a+x /opt/entrypoint.sh && \\\n    sed -i 's/tini -s/tini -g/g' /opt/entrypoint.sh\n\nWORKDIR $SPARK_HOME/work-dir\n# Ensure that work-dir is writable by everyone\nRUN chmod 0777 $SPARK_HOME/work-dir\n\nENTRYPOINT [ \"/opt/entrypoint.sh\" ]\n\nUSER jovyan\n"
  },
  {
    "path": "etc/docker/kernel-spark-r/README.md",
    "content": "This image enables the use of an IRKernel kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes cluster. It is built on the base image [elyra/kernel-r](https://hub.docker.com/r/elyra/kernel-r/), and adds [Apache Spark 2.4.6](https://spark.apache.org/docs/2.4.6/). Note: The ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results.\n\n# What it Gives You\n\n- IRkernel kernel support\n- Spark on kubernetes support from within a Jupyter Notebook\n\n# Basic Use\n\nDeploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment.\n\nLaunch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook.\n\nFor more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/).\n"
  },
  {
    "path": "etc/docker/kernel-tf-gpu-py/Dockerfile",
    "content": "# Ubuntu:xenial\nARG BASE_CONTAINER=tensorflow/tensorflow:2.9.1-gpu\nFROM $BASE_CONTAINER\n\nENV DEBIAN_FRONTEND=noninteractive\n\nRUN apt-get update && apt-get install -yq \\\n    build-essential \\\n    libsm6 \\\n    libxext-dev \\\n    libxrender1 \\\n    netcat \\\n    python3-dev \\\n    tzdata \\\n    unzip && \\\n    rm -rf /var/lib/apt/lists/* && \\\n    pip install --upgrade future pycryptodomex ipykernel\n\nADD jupyter_enterprise_gateway_kernel_image_files*.tar.gz /usr/local/bin/\n\nUSER root\n\nRUN adduser --system --uid 1000 --gid 100 jovyan && \\\n    chown jovyan:users /usr/local/bin/bootstrap-kernel.sh && \\\n    chmod 0755 /usr/local/bin/bootstrap-kernel.sh && \\\n    chown -R jovyan:users /usr/local/bin/kernel-launchers\n\n\nUSER jovyan\nENV KERNEL_LANGUAGE=python\nCMD /usr/local/bin/bootstrap-kernel.sh\n"
  },
  {
    "path": "etc/docker/kernel-tf-gpu-py/README.md",
    "content": "This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster that can perform Tensorflow operations. It is currently built on [tensorflow/tensorflow:2.7.0-gpu-jupyter](https://hub.docker.com/r/tensorflow/tensorflow/) deriving from the [tensorflow](https://github.com/tensorflow/tensorflow) project.\n\n# What it Gives You\n\n- IPython kernel support supplemented with Tensorflow functionality (and debugger)\n\n# Basic Use\n\nDeploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment.\n\nLaunch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook.\n\nFor more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/).\n"
  },
  {
    "path": "etc/docker/kernel-tf-py/Dockerfile",
    "content": "# Ubuntu:Bionic\n# TensorFlow 2.4.0\nARG BASE_CONTAINER=jupyter/tensorflow-notebook:2023-10-20\n\nFROM $BASE_CONTAINER\n\nENV KERNEL_LANGUAGE=python\n\nADD jupyter_enterprise_gateway_kernel_image_files*.tar.gz /usr/local/bin/\n\nRUN conda install --quiet --yes \\\n    pillow \\\n    future \\\n    pycryptodomex && \\\n    fix-permissions $CONDA_DIR\n\nUSER root\n\nRUN chown jovyan:users /usr/local/bin/bootstrap-kernel.sh && \\\n\tchmod 0755 /usr/local/bin/bootstrap-kernel.sh && \\\n\tchown -R jovyan:users /usr/local/bin/kernel-launchers\n\nUSER jovyan\n\n# Disble healthcheck inherited from notebook image\nHEALTHCHECK NONE\n\nCMD [ \"/usr/local/bin/bootstrap-kernel.sh\" ]\n"
  },
  {
    "path": "etc/docker/kernel-tf-py/README.md",
    "content": "This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster that can perform Tensorflow operations. It is currently built on the [jupyter/tensorflow-notebook](https://hub.docker.com/r/jupyter/tensorflow-notebook) image deriving from the [jupyter/tensorflow-notebook](https://github.com/jupyter/docker-stacks/tree/main/images/tensorflow-notebook) project.\n\n# What it Gives You\n\n- IPython kernel support supplemented with Tensorflow functionality (and debugger)\n\n# Basic Use\n\nDeploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment.\n\nLaunch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook.\n\nFor more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/).\n"
  },
  {
    "path": "etc/kernel-launchers/R/scripts/launch_IRkernel.R",
    "content": "library(argparse)\nlibrary(jsonlite)\n\nrequire(\"SparkR\")\nrequire(\"base64enc\")\nrequire(\"digest\")\nrequire(\"stringr\")\n\nr_libs_user <- Sys.getenv(\"R_LIBS_USER\")\n\nsparkConfigList <- list(\nspark.executorEnv.R_LIBS_USER=r_libs_user,\nspark.rdd.compress=\"true\")\n\nmin_port_range_size = Sys.getenv(\"MIN_PORT_RANGE_SIZE\")\nif ( is.null(min_port_range_size) )\n    min_port_range_size = Sys.getenv(\"EG_MIN_PORT_RANGE_SIZE\")\nif ( is.null(min_port_range_size) )\n    min_port_range_size = 1000\n\n# Initializes the Spark session/context and SQL context\ninitialize_spark_session <- function(mode) {\n    # Make sure SparkR package is loaded last; this is necessary\n    # to avoid the need to fully qualify package namespace (using ::)\n    old <- getOption(\"defaultPackages\")\n    options(defaultPackages = c(old, \"SparkR\"))\n\n    if (identical(mode, \"eager\")) {\n        # Start the spark context immediately if set to eager\n        spark <- SparkR::sparkR.session(enableHiveSupport = FALSE, sparkConfig=sparkConfigList)\n        assign(\"spark\", spark, envir = .GlobalEnv)\n        sc <- SparkR:::callJStatic(\"org.apache.spark.sql.api.r.SQLUtils\", \"getJavaSparkContext\", spark)\n        sqlContext <<- SparkR::sparkRSQL.init(sc)\n        assign(\"sc\", sc, envir = .GlobalEnv)\n\n    } else {\n        # Keep lazy evaluation as default starting mode if initialization mode is lazy or not set at all\n        makeActiveBinding(\".sparkRsession\", sparkSessionFn, SparkR:::.sparkREnv)\n        makeActiveBinding(\".sparkRjsc\", sparkContextFn, SparkR:::.sparkREnv)\n\n        delayedAssign(\"spark\", {get(\".sparkRsession\", envir=SparkR:::.sparkREnv)}, assign.env=.GlobalEnv)\n\n        # backward compatibility for Spark 1.6 and earlier notebooks\n        delayedAssign(\"sc\", {get(\".sparkRjsc\", envir=SparkR:::.sparkREnv)}, assign.env=.GlobalEnv)\n        delayedAssign(\"sqlContext\", {spark}, assign.env=.GlobalEnv)\n    }\n}\n\nsparkSessionFn <- local({\n     function(v) {\n       if (missing(v)) {\n         # get SparkSession\n\n         # create a new sparkSession\n         rm(\".sparkRsession\", envir=SparkR:::.sparkREnv) # rm to ensure no infinite recursion\n\n         get(\"sc\", envir=.GlobalEnv)\n\n         sparkSession <- SparkR::sparkR.session(\n                                        sparkHome=Sys.getenv(\"SPARK_HOME\"),\n                                        sparkConfig=sparkConfigList);\n         sparkSession\n       }\n     }\n   })\n\nsparkContextFn <- local({\n    function(v) {\n      if (missing(v)) {\n        # get SparkContext\n\n        # create a new sparkContext\n        rm(\".sparkRjsc\", envir=SparkR:::.sparkREnv) # rm to ensure no infinite recursion\n\n        message (\"Obtaining Spark session...\")\n\n        sparkContext <- SparkR:::sparkR.sparkContext(\n                                          sparkHome=Sys.getenv(\"SPARK_HOME\"),\n                                          sparkEnvirMap=SparkR:::convertNamedListToEnv(sparkConfigList))\n\n        message (\"Spark session obtained.\")\n        sparkContext\n      }\n    }\n  })\n\n# Figure out the connection_file to use\ndetermine_connection_file <- function(kernel_id){\n    base_file = paste(\"kernel-\", kernel_id, sep=\"\")\n    temp_file = tempfile(pattern=paste(base_file,\"_\",sep=\"\"), fileext=\".json\")\n    cat(paste(\"Using connection file \",temp_file,\" \\n\",sep=\"'\"))\n    return(temp_file)\n}\n\nvalidate_port_range <- function(port_range){\n    port_ranges = strsplit(port_range, \"..\", fixed=TRUE)\n    lower_port = as.integer(port_ranges[[1]][1])\n    upper_port = as.integer(port_ranges[[1]][2])\n\n    port_range_size = upper_port - lower_port\n    if (port_range_size != 0) {\n        if (port_range_size < min_port_range_size){\n            message(paste(\"Port range validation failed for range:\", port_range, \". Range size must be at least\",\n                min_port_range_size, \"as specified by env EG_MIN_PORT_RANGE_SIZE\"))\n            return(NA)\n        }\n    }\n    return(list(\"lower_port\"=lower_port, \"upper_port\"=upper_port))\n}\n\n# Check arguments\nparser <- argparse::ArgumentParser(description=\"Parse Arguments for R Launcher\")\nparser$add_argument(\"--kernel-id\", nargs='?',\n       help=\"the id associated with the launched kernel\")\nparser$add_argument(\"--port-range\", nargs='?', metavar='<lowerPort>..<upperPort>',\n       help=\"the range of ports impose for kernel ports\")\nparser$add_argument(\"--response-address\", nargs='?', metavar='<ip>:<port>',\n      help=\"the IP:port address of the system hosting the server and expecting response\")\nparser$add_argument(\"--public-key\", nargs='?',\n      help=\"the public key used to encrypt connection information\")\nparser$add_argument(\"--spark-context-initialization-mode\", nargs='?',\n      help=\"the initialization mode of the spark context: lazy, eager or none\")\nparser$add_argument(\"--customAppName\", nargs='?', help=\"the custom application name to be set\")\n\n# The following arguments are deprecated and will be used only if their mirroring arguments have no value.\n# This means that the default value for --spark-context-initialization-mode (none) will need to come from\n# the mirrored args' default until deprecated items have been removed.\n\nparser$add_argument(\"connection_file\", nargs='?', help='Connection file to write connection info')\nparser$add_argument(\"--RemoteProcessProxy.kernel-id\", nargs='?',\n       help=\"the id associated with the launched kernel (deprecated)\")\nparser$add_argument(\"--RemoteProcessProxy.port-range\", nargs='?', metavar='<lowerPort>..<upperPort>',\n       help=\"the range of ports impose for kernel ports (deprecated)\")\nparser$add_argument(\"--RemoteProcessProxy.response-address\", nargs='?', metavar='<ip>:<port>',\n      help=\"the IP:port address of the system hosting the server and expecting response (deprecated)\")\nparser$add_argument(\"--RemoteProcessProxy.public-key\", nargs='?',\n      help=\"the public key used to encrypt connection information (deprecated)\")\nparser$add_argument(\"--RemoteProcessProxy.spark-context-initialization-mode\", nargs='?', default=\"none\",\n      help=\"the initialization mode of the spark context: lazy, eager or none (deprecated)\")\n\nargv <- parser$parse_args()\n\nkernel_id <- argv$kernel_id\nif (is.null(kernel_id)) {\n    kernel_id <- argv$RemoteProcessProxy.kernel_id\n}\n\nport_range <- argv$port_range\nif (is.null(port_range)) {\n    port_range <- argv$RemoteProcessProxy.port_range\n}\n\nresponse_address <- argv$response_address\nif (is.null(response_address)) {\n    response_address <- argv$RemoteProcessProxy.response_address\n}\n\npublic_key <- argv$public_key\nif (is.null(public_key)) {\n    public_key <- argv$RemoteProcessProxy.public_key\n}\n\nspark_context_initialization_mode <- argv$spark_context_initialization_mode\nif (is.null(spark_context_initialization_mode)) {\n    spark_context_initialization_mode <- argv$RemoteProcessProxy.spark_context_initialization_mode\n}\n\n\nif (is.null(argv$connection_file) && is.null(kernel_id)){\n    message(\"At least one of the parameters: 'connection_file' or '--kernel-id' must be provided!\")\n    return(NA)\n}\n\nif (is.null(kernel_id)){\n    message(\"Parameter '--kernel-id' must be provided!\")\n    return(NA)\n}\n\nif (is.null(public_key)){\n    message(\"Parameter '--public-key' must be provided!\")\n    return(NA)\n}\n\n# if we have a response address, then deal with items relative to remote support (ports, comm-socket, etc.)\nif (!is.null(response_address) && str_length(response_address) > 0){\n\n    #If port range argument is passed from kernel json with no value\n    if (is.null(port_range)){\n        port_range <- NA\n    }\n\n    #  If there is a response address, use pull socket mode\n    connection_file <- determine_connection_file(kernel_id)\n\n    # if port-range was provided, validate the range and determine bounds\n    lower_port = 0\n    upper_port = 0\n    if (!is.na(port_range)){\n        range <- validate_port_range(port_range)\n        if (length(range) > 1){\n            lower_port = range$lower_port\n            upper_port = range$upper_port\n        }\n    }\n\n    # Get the pid of the launcher so the listener thread (process) can detect its\n    # presence to know when to shutdown.\n    pid <- Sys.getpid()\n\n    # Hoop to jump through to get the directory this script resides in so that we can\n    # load the co-located python server_listener.py file.  This code will not work if\n    # called directly from within RStudio.\n    # https://stackoverflow.com/questions/1815606/rscript-determine-path-of-the-executing-script\n    launch_args <- commandArgs(trailingOnly = FALSE)\n    file_option <- \"--file=\"\n    script_path <- sub(file_option, \"\", launch_args[grep(file_option, launch_args)])\n    listener_file <- paste(sep=\"/\", dirname(script_path), \"server_listener.py\")\n\n    # Launch the server listener logic in an async manner and poll for the existence of\n    # the connection file before continuing.  Should there be an issue, the server\n    # will terminate the launcher, so there's no need for a timeout.\n    python_cmd <- Sys.getenv(\"PYSPARK_PYTHON\", \"python\")  # If present, use the same python specified for Spark\n\n    svr_listener_cmd <- stringr::str_interp(gsub(\"\\n[:space:]*\" , \"\",\n                paste(python_cmd,\"-c \\\"import os, sys, imp;\n                gl = imp.load_source('setup_server_listener', '${listener_file}');\n                gl.setup_server_listener(conn_filename='${connection_file}', parent_pid='${pid}',\n                lower_port=${lower_port}, upper_port=${upper_port},\n                response_addr='${response_address}', kernel_id='${kernel_id}', public_key='${public_key}')\\\"\")))\n    system(svr_listener_cmd, wait=FALSE)\n\n    while (!file.exists(connection_file)) {\n        Sys.sleep(0.5)\n    }\n\n} else {\n    # already provided\n    connection_file = argv$connection_file\n}\n\n# If spark context creation is desired go ahead and initialize the session/context\n# Otherwise, skip spark context creation if set to none or not provided\nif (!is.na(spark_context_initialization_mode)){\n    if (!identical(spark_context_initialization_mode, \"none\")){\n        # Add custom application name (spark.app.name) spark config if available, else default to kernel_id\n        if (!is.null(argv$customAppName) && str_length(argv$customAppName) > 0){\n            sparkConfigList[['spark.app.name']] <- argv$customAppName\n        } else {\n            sparkConfigList[['spark.app.name']] <- kernel_id\n        }\n        initialize_spark_session(spark_context_initialization_mode)\n    }\n}\n\n# Start the kernel\nIRkernel::main(connection_file)\n\n# Only unlink the connection file if we're launched for remote behavior.\nif (!is.na(response_address)){\n    unlink(connection_file)\n}\n\n# Stop the context and exit\nif (!identical(spark_context_initialization_mode, \"none\")){\n    sparkR.session.stop()\n}\n"
  },
  {
    "path": "etc/kernel-launchers/R/scripts/server_listener.py",
    "content": "\"\"\"A server listener for R.\"\"\"\n\nimport base64\nimport json\nimport logging\nimport os\nimport random\nimport socket\nimport uuid\nfrom threading import Thread\n\nfrom Cryptodome.Cipher import AES, PKCS1_v1_5\nfrom Cryptodome.PublicKey import RSA\nfrom Cryptodome.Random import get_random_bytes\nfrom Cryptodome.Util.Padding import pad\nfrom jupyter_client.connect import write_connection_file\n\nLAUNCHER_VERSION = 1  # Indicate to server the version of this launcher (payloads may vary)\n\nmax_port_range_retries = int(\n    os.getenv(\"MAX_PORT_RANGE_RETRIES\", os.getenv(\"EG_MAX_PORT_RANGE_RETRIES\", \"5\"))\n)\n\nlog_level = os.getenv(\"LOG_LEVEL\", os.getenv(\"EG_LOG_LEVEL\", \"10\"))\nlog_level = int(log_level) if log_level.isdigit() else log_level\n\nlogging.basicConfig(format=\"[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s] %(message)s\")\n\nlogger = logging.getLogger(\"server_listener for R launcher\")\nlogger.setLevel(log_level)\n\n\ndef _encrypt(connection_info_str, public_key):\n    \"\"\"Encrypt the connection information using a generated AES key that is then encrypted using\n    the public key passed from the server.  Both are then returned in an encoded JSON payload.\n\n    This code also exists in the Python kernel-launcher's launch_ipykernel.py script.\n    \"\"\"\n    aes_key = get_random_bytes(16)\n    cipher = AES.new(aes_key, mode=AES.MODE_ECB)\n\n    # Encrypt the connection info using the aes_key\n    encrypted_connection_info = cipher.encrypt(pad(connection_info_str, 16))\n    b64_connection_info = base64.b64encode(encrypted_connection_info)\n\n    # Encrypt the aes_key using the server's public key\n    imported_public_key = RSA.importKey(base64.b64decode(public_key.encode()))\n    cipher = PKCS1_v1_5.new(key=imported_public_key)\n    encrypted_key = base64.b64encode(cipher.encrypt(aes_key))\n\n    # Compose the payload and Base64 encode it\n    payload = {\n        \"version\": LAUNCHER_VERSION,\n        \"key\": encrypted_key.decode(),\n        \"conn_info\": b64_connection_info.decode(),\n    }\n    b64_payload = base64.b64encode(json.dumps(payload).encode(encoding=\"utf-8\"))\n    return b64_payload\n\n\ndef return_connection_info(\n    connection_file, response_addr, lower_port, upper_port, kernel_id, public_key, parent_pid\n):\n    \"\"\"Returns the connection information corresponding to this kernel.\n\n    This code also exists in the Python kernel-launcher's launch_ipykernel.py script.\n    \"\"\"\n    response_parts = response_addr.split(\":\")\n    if len(response_parts) != 2:\n        logger.error(\n            f\"Invalid format for response address '{response_addr}'. Assuming 'pull' mode...\"\n        )\n        return\n\n    response_ip = response_parts[0]\n    try:\n        response_port = int(response_parts[1])\n    except ValueError:\n        logger.error(\n            f\"Invalid port component found in response address '{response_addr}'. \"\n            \"Assuming 'pull' mode...\"\n        )\n        return\n\n    with open(connection_file) as fp:\n        cf_json = json.load(fp)\n        fp.close()\n\n    # add process and process group ids into connection info\n    cf_json[\"pid\"] = parent_pid\n    cf_json[\"pgid\"] = os.getpgid(parent_pid)\n\n    # prepare socket address for handling signals\n    comm_sock = prepare_comm_socket(lower_port, upper_port)\n    cf_json[\"comm_port\"] = comm_sock.getsockname()[1]\n    cf_json[\"kernel_id\"] = kernel_id\n\n    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n        s.connect((response_ip, response_port))\n        json_content = json.dumps(cf_json).encode(encoding=\"utf-8\")\n        logger.debug(f\"JSON Payload '{json_content}\")\n        payload = _encrypt(json_content, public_key)\n        logger.debug(f\"Encrypted Payload '{payload}\")\n        s.send(payload)\n\n    return comm_sock\n\n\ndef prepare_comm_socket(lower_port, upper_port):\n    \"\"\"Prepares the socket to which the server will send signal and shutdown requests.\n\n    This code also exists in the Python kernel-launcher's launch_ipykernel.py script.\n    \"\"\"\n    sock = _select_socket(lower_port, upper_port)\n    logger.info(\n        f\"Signal socket bound to host: {sock.getsockname()[0]}, port: {sock.getsockname()[1]}\"\n    )\n    sock.listen(1)\n    sock.settimeout(5)\n    return sock\n\n\ndef _select_ports(count, lower_port, upper_port):\n    \"\"\"Select and return n random ports that are available and adhere to the given port range, if applicable.\n\n    This code also exists in the Python kernel-launcher's launch_ipykernel.py script.\n    \"\"\"\n    ports = []\n    sockets = []\n    for _ in range(count):\n        sock = _select_socket(lower_port, upper_port)\n        ports.append(sock.getsockname()[1])\n        sockets.append(sock)\n    for sock in sockets:\n        sock.close()\n    return ports\n\n\ndef _select_socket(lower_port, upper_port):\n    \"\"\"Create and return a socket whose port is available and adheres to the given port range, if applicable.\n\n    This code also exists in the Python kernel-launcher's launch_ipykernel.py script.\n    \"\"\"\n    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n    found_port = False\n    retries = 0\n    while not found_port:\n        try:\n            sock.bind((\"0.0.0.0\", _get_candidate_port(lower_port, upper_port)))  # noqa\n            found_port = True\n        except Exception:\n            retries = retries + 1\n            if retries > max_port_range_retries:\n                msg = \"Failed to locate port within range {}..{} after {} retries!\".format(\n                    lower_port, upper_port, max_port_range_retries\n                )\n                raise RuntimeError(msg) from None\n    return sock\n\n\ndef _get_candidate_port(lower_port, upper_port):\n    \"\"\"Returns a port within the given range.  If the range is zero, the zero is returned.\n\n    This code also exists in the Python kernel-launcher's launch_ipykernel.py script.\n    \"\"\"\n    range_size = upper_port - lower_port\n    if range_size == 0:\n        return 0\n    return random.randint(lower_port, upper_port)\n\n\ndef get_server_request(sock):\n    \"\"\"Gets a request from the server and returns the corresponding dictionary.\n\n    This code also exists in the Python kernel-launcher's launch_ipykernel.py script.\n    \"\"\"\n    conn = None\n    data = \"\"\n    request_info = None\n    try:\n        conn, addr = sock.accept()\n        while True:\n            buffer = conn.recv(1024).decode(\"utf-8\")\n            if not buffer:  # send is complete\n                request_info = json.loads(data)\n                break\n            data = data + buffer  # append what we received until we get no more...\n    except Exception as e:\n        if type(e) is not socket.timeout:\n            raise e\n    finally:\n        if conn:\n            conn.close()\n\n    return request_info\n\n\ndef server_listener(sock, parent_pid):\n    \"\"\"Waits for requests from the server and processes each when received.  Currently,\n    these will be one of a sending a signal to the corresponding kernel process (signum) or\n    stopping the listener and exiting the kernel (shutdown).\n\n     This code also exists in the Python kernel-launcher's launch_ipykernel.py script.\n    \"\"\"\n    shutdown = False\n    while not shutdown:\n        request = get_server_request(sock)\n        if request:\n            signum = -1  # prevent logging poll requests since that occurs every 3 seconds\n            if request.get(\"signum\") is not None:\n                signum = int(request.get(\"signum\"))\n                os.kill(parent_pid, signum)\n            if request.get(\"shutdown\") is not None:\n                shutdown = bool(request.get(\"shutdown\"))\n            if signum != 0:\n                logger.info(f\"server_listener got request: {request}\")\n\n\ndef setup_server_listener(\n    conn_filename, parent_pid, lower_port, upper_port, response_addr, kernel_id, public_key\n):\n    \"\"\"Set up the server listener.\"\"\"\n    ip = \"0.0.0.0\"  # noqa\n    key = str(uuid.uuid4()).encode()  # convert to bytes\n\n    ports = _select_ports(5, lower_port, upper_port)\n\n    write_connection_file(\n        fname=conn_filename,\n        ip=ip,\n        key=key,\n        shell_port=ports[0],\n        iopub_port=ports[1],\n        stdin_port=ports[2],\n        hb_port=ports[3],\n        control_port=ports[4],\n    )\n    if response_addr:\n        comm_socket = return_connection_info(\n            conn_filename,\n            response_addr,\n            int(lower_port),\n            int(upper_port),\n            kernel_id,\n            public_key,\n            int(parent_pid),\n        )\n        if comm_socket:  # socket in use, start server listener thread\n            server_listener_thread = Thread(\n                target=server_listener,\n                args=(\n                    comm_socket,\n                    int(parent_pid),\n                ),\n            )\n            server_listener_thread.start()\n\n    return\n\n\n__all__ = [\n    \"setup_server_listener\",\n]\n"
  },
  {
    "path": "etc/kernel-launchers/bootstrap/bootstrap-kernel.sh",
    "content": "#!/bin/bash\n\nPORT_RANGE=${PORT_RANGE:-${EG_PORT_RANGE:-0..0}}\nRESPONSE_ADDRESS=${RESPONSE_ADDRESS:-${EG_RESPONSE_ADDRESS}}\nPUBLIC_KEY=${PUBLIC_KEY:-${EG_PUBLIC_KEY}}\nKERNEL_LAUNCHERS_DIR=${KERNEL_LAUNCHERS_DIR:-/usr/local/bin/kernel-launchers}\nKERNEL_SPARK_CONTEXT_INIT_MODE=${KERNEL_SPARK_CONTEXT_INIT_MODE:-none}\nKERNEL_CLASS_NAME=${KERNEL_CLASS_NAME}\n\necho $0 env: `env`\n\nlaunch_python_kernel() {\n  # Launch the python kernel launcher - which embeds the IPython kernel and listens for interrupts\n  # and shutdown requests from Enterprise Gateway.\n\n  export JPY_PARENT_PID=$$  # Force reset of parent pid since we're detached\n\n  if [ -z \"${KERNEL_CLASS_NAME}\" ]\n  then\n    kernel_class_option=\"\"\n  else\n    kernel_class_option=\"--kernel-class-name ${KERNEL_CLASS_NAME}\"\n  fi\n\n\tset -x\n\tpython ${KERNEL_LAUNCHERS_DIR}/python/scripts/launch_ipykernel.py --kernel-id ${KERNEL_ID} \\\n\t      --port-range ${PORT_RANGE} --response-address ${RESPONSE_ADDRESS} --public-key ${PUBLIC_KEY} \\\n\t      --spark-context-initialization-mode ${KERNEL_SPARK_CONTEXT_INIT_MODE} ${kernel_class_option}\n\t{ set +x; } 2>/dev/null\n}\n\nlaunch_R_kernel() {\n    # Launch the R kernel launcher - which embeds the IRkernel kernel and listens for interrupts\n    # and shutdown requests from Enterprise Gateway.\n\n\tset -x\n\tRscript ${KERNEL_LAUNCHERS_DIR}/R/scripts/launch_IRkernel.R --kernel-id ${KERNEL_ID} --port-range ${PORT_RANGE} --response-address ${RESPONSE_ADDRESS} --public-key ${PUBLIC_KEY} --spark-context-initialization-mode ${KERNEL_SPARK_CONTEXT_INIT_MODE}\n\t{ set +x; } 2>/dev/null\n}\n\nlaunch_scala_kernel() {\n    # Launch the scala kernel launcher - which embeds the Apache Toree kernel and listens for interrupts\n    # and shutdown requests from Enterprise Gateway.  This kernel is currenly always launched using\n    # spark-submit, so additional setup is required.\n\n    PROG_HOME=${KERNEL_LAUNCHERS_DIR}/scala\n    KERNEL_ASSEMBLY=`(cd \"${PROG_HOME}/lib\"; ls -1 toree-assembly-*.jar;)`\n    TOREE_ASSEMBLY=\"${PROG_HOME}/lib/${KERNEL_ASSEMBLY}\"\n    if [ ! -f ${TOREE_ASSEMBLY} ]; then\n        echo \"Toree assembly '${PROG_HOME}/lib/toree-assembly-*.jar' is missing.  Exiting...\"\n        exit 1\n    fi\n\n    # Toree launcher jar path, plus required lib jars (toree-assembly)\n    JARS=\"${TOREE_ASSEMBLY}\"\n    # Toree launcher app path\n    LAUNCHER_JAR=`(cd \"${PROG_HOME}/lib\"; ls -1 toree-launcher*.jar;)`\n    LAUNCHER_APP=\"${PROG_HOME}/lib/${LAUNCHER_JAR}\"\n    if [ ! -f ${LAUNCHER_APP} ]; then\n        echo \"Scala launcher jar '${PROG_HOME}/lib/toree-launcher*.jar' is missing.  Exiting...\"\n        exit 1\n    fi\n\n    SPARK_OPTS=\"--name ${KERNEL_USERNAME}-${KERNEL_ID}\"\n    TOREE_OPTS=\"--alternate-sigint USR2\"\n\n    set -x\n    eval exec \\\n         \"${SPARK_HOME}/bin/spark-submit\" \\\n         \"${SPARK_OPTS}\" \\\n         --jars \"${JARS}\" \\\n         --class launcher.ToreeLauncher \\\n         \"${LAUNCHER_APP}\" \\\n         \"${TOREE_OPTS}\" \\\n         \"--kernel-id ${KERNEL_ID} --port-range ${PORT_RANGE} --response-address ${RESPONSE_ADDRESS} --public-key ${PUBLIC_KEY} --spark-context-initialization-mode ${KERNEL_SPARK_CONTEXT_INIT_MODE}\"\n    { set +x; } 2>/dev/null\n}\n\n# Ensure that required envs are present, check language before the dynamic values\nif [ -z \"${KERNEL_LANGUAGE+x}\" ]\nthen\n    echo \"KERNEL_LANGUAGE is required.  Set this value in the image or when starting container.\"\n    exit 1\nfi\nif [ -z \"${KERNEL_ID+x}\" ] || [ -z \"${RESPONSE_ADDRESS+x}\" ] || [ -z \"${PUBLIC_KEY+x}\" ]\nthen\n    echo \"Environment variables, KERNEL_ID, RESPONSE_ADDRESS, and PUBLIC_KEY are required.\"\n    exit 1\nfi\n\n# Invoke appropriate launcher based on KERNEL_LANGUAGE (case-insensitive)\n\nif [[ \"${KERNEL_LANGUAGE,,}\" == \"python\" ]]\nthen\n    launch_python_kernel\nelif [[ \"${KERNEL_LANGUAGE,,}\" == \"scala\" ]]\nthen\n    launch_scala_kernel\nelif [[ \"${KERNEL_LANGUAGE,,}\" == \"r\" ]]\nthen\n    launch_R_kernel\nelse\n\techo \"Unrecognized value for KERNEL_LANGUAGE: '${KERNEL_LANGUAGE}'!\"\n\texit 1\nfi\nexit 0\n"
  },
  {
    "path": "etc/kernel-launchers/docker/scripts/launch_docker.py",
    "content": "\"\"\"Launches a containerized kernel.\"\"\"\n\nimport argparse\nimport os\nimport re\nimport sys\n\nimport urllib3\nfrom docker.client import DockerClient\nfrom docker.types import EndpointSpec, RestartPolicy\n\nurllib3.disable_warnings()\n\n# Set env to False if the container should be left around for debug purposes, etc.\nremove_container = bool(\n    os.getenv(\"REMOVE_CONTAINER\", os.getenv(\"EG_REMOVE_CONTAINER\", \"True\")).lower() == \"true\"\n)\nswarm_mode = bool(os.getenv(\"DOCKER_MODE\", os.getenv(\"EG_DOCKER_MODE\", \"swarm\")).lower() == \"swarm\")\n\n\ndef launch_docker_kernel(\n    kernel_id, port_range, response_addr, public_key, spark_context_init_mode, kernel_class_name\n):\n    \"\"\"Launches a containerized kernel.\"\"\"\n\n    # Can't proceed if no image was specified.\n    image_name = os.environ.get(\"KERNEL_IMAGE\", None)\n    if image_name is None:\n        sys.exit(\"ERROR - KERNEL_IMAGE not found in environment - kernel launch terminating!\")\n\n    if not re.match(\n        r'^[a-zA-Z0-9][a-zA-Z0-9._\\-/]*(:[a-zA-Z0-9._\\-]+)?(@sha256:[a-f0-9]+)?$', image_name\n    ):\n        sys.exit(f\"ERROR - KERNEL_IMAGE contains invalid characters: {image_name}\")\n\n    # Container name is composed of KERNEL_USERNAME and KERNEL_ID\n    container_name = os.environ.get(\"KERNEL_USERNAME\", \"\") + \"-\" + kernel_id\n\n    # Determine network. If EG_DOCKER_NETWORK has not been propagated, fall back to 'bridge'...\n    docker_network = os.environ.get(\"DOCKER_NETWORK\", os.environ.get(\"EG_DOCKER_NETWORK\", \"bridge\"))\n\n    # Build labels - these will be modelled similar to kubernetes: kernel_id, component, app, ...\n    labels = {}\n    labels[\"kernel_id\"] = kernel_id\n    labels[\"component\"] = \"kernel\"\n    labels[\"app\"] = \"enterprise-gateway\"\n\n    # Capture env parameters...\n    param_env = {}\n    param_env[\"PORT_RANGE\"] = port_range\n    param_env[\"PUBLIC_KEY\"] = public_key\n    param_env[\"RESPONSE_ADDRESS\"] = response_addr\n    param_env[\"KERNEL_SPARK_CONTEXT_INIT_MODE\"] = spark_context_init_mode\n    if kernel_class_name:\n        param_env[\"KERNEL_CLASS_NAME\"] = kernel_class_name\n\n    # Since the environment is specific to the kernel (per env stanza of kernelspec, KERNEL_ and EG_CLIENT_ENVS)\n    # just add the env here.\n    param_env.update(os.environ)\n    param_env.pop(\n        \"PATH\"\n    )  # Let the image PATH be used.  Since this is relative to images, we're probably safe.\n\n    user = param_env.get(\"KERNEL_UID\")\n    group = param_env.get(\"KERNEL_GID\")\n\n    # setup common args\n    kwargs = {}\n    kwargs[\"name\"] = container_name\n    kwargs[\"hostname\"] = container_name\n    kwargs[\"user\"] = user\n    kwargs[\"labels\"] = labels\n\n    client = DockerClient.from_env()\n    if swarm_mode:\n        networks = []\n        networks.append(docker_network)\n        # mounts = list()  # Enable if necessary\n        # mounts.append(\"/usr/local/share/jupyter/kernels:/usr/local/share/jupyter/kernels:ro\")\n        endpoint_spec = EndpointSpec(mode=\"dnsrr\")\n        restart_policy = RestartPolicy(condition=\"none\")\n\n        # finish args setup\n        kwargs[\"env\"] = param_env\n        kwargs[\"endpoint_spec\"] = endpoint_spec\n        kwargs[\"restart_policy\"] = restart_policy\n        kwargs[\"container_labels\"] = labels\n        kwargs[\"networks\"] = networks\n        kwargs[\"groups\"] = [group, \"100\"]\n        if param_env.get(\"KERNEL_WORKING_DIR\"):\n            kwargs[\"workdir\"] = param_env.get(\"KERNEL_WORKING_DIR\")\n        # kwargs['mounts'] = mounts   # Enable if necessary\n        # print(\"service args: {}\".format(kwargs))  # useful for debug\n        client.services.create(image_name, **kwargs)\n    else:\n        # volumes = {  # Enable if necessary\n        #     \"/usr/local/share/jupyter/kernels\": {\n        #         \"bind\": \"/usr/local/share/jupyter/kernels\",\n        #         \"mode\": \"ro\",\n        #     }\n        # }\n\n        # finish args setup\n        kwargs[\"environment\"] = param_env\n        kwargs[\"remove\"] = remove_container\n        kwargs[\"network\"] = docker_network\n        kwargs[\"group_add\"] = [group, \"100\"]\n        kwargs[\"detach\"] = True\n        if param_env.get(\"KERNEL_WORKING_DIR\"):\n            kwargs[\"working_dir\"] = param_env.get(\"KERNEL_WORKING_DIR\")\n        # kwargs['volumes'] = volumes   # Enable if necessary\n        # print(\"container args: {}\".format(kwargs))  # useful for debug\n        client.containers.run(image_name, **kwargs)\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\n        \"--kernel-id\",\n        dest=\"kernel_id\",\n        nargs=\"?\",\n        help=\"Indicates the id associated with the launched kernel.\",\n    )\n    parser.add_argument(\n        \"--port-range\",\n        dest=\"port_range\",\n        nargs=\"?\",\n        metavar=\"<lowerPort>..<upperPort>\",\n        help=\"Port range to impose for kernel ports\",\n    )\n    parser.add_argument(\n        \"--response-address\",\n        dest=\"response_address\",\n        nargs=\"?\",\n        metavar=\"<ip>:<port>\",\n        help=\"Connection address (<ip>:<port>) for returning connection file\",\n    )\n    parser.add_argument(\n        \"--public-key\",\n        dest=\"public_key\",\n        nargs=\"?\",\n        help=\"Public key used to encrypt connection information\",\n    )\n    parser.add_argument(\n        \"--spark-context-initialization-mode\",\n        dest=\"spark_context_init_mode\",\n        nargs=\"?\",\n        help=\"Indicates whether or how a spark context should be created\",\n    )\n    parser.add_argument(\n        \"--kernel-class-name\",\n        dest=\"kernel_class_name\",\n        nargs=\"?\",\n        help=\"Indicates the name of the kernel class to use.  Must be a subclass of 'ipykernel.kernelbase.Kernel'.\",\n    )\n\n    # The following arguments are deprecated and will be used only if their mirroring arguments have no value.\n    # This means that the default value for --spark-context-initialization-mode (none) will need to come from\n    # the mirrored args' default until deprecated item has been removed.\n    parser.add_argument(\n        \"--RemoteProcessProxy.kernel-id\",\n        dest=\"rpp_kernel_id\",\n        nargs=\"?\",\n        help=\"Indicates the id associated with the launched kernel. (deprecated)\",\n    )\n    parser.add_argument(\n        \"--RemoteProcessProxy.port-range\",\n        dest=\"rpp_port_range\",\n        nargs=\"?\",\n        metavar=\"<lowerPort>..<upperPort>\",\n        help=\"Port range to impose for kernel ports (deprecated)\",\n    )\n    parser.add_argument(\n        \"--RemoteProcessProxy.response-address\",\n        dest=\"rpp_response_address\",\n        nargs=\"?\",\n        metavar=\"<ip>:<port>\",\n        help=\"Connection address (<ip>:<port>) for returning connection file (deprecated)\",\n    )\n    parser.add_argument(\n        \"--RemoteProcessProxy.public-key\",\n        dest=\"rpp_public_key\",\n        nargs=\"?\",\n        help=\"Public key used to encrypt connection information (deprecated)\",\n    )\n    parser.add_argument(\n        \"--RemoteProcessProxy.spark-context-initialization-mode\",\n        dest=\"rpp_spark_context_init_mode\",\n        nargs=\"?\",\n        help=\"Indicates whether or how a spark context should be created (deprecated)\",\n        default=\"none\",\n    )\n\n    arguments = vars(parser.parse_args())\n    kernel_id = arguments[\"kernel_id\"] or arguments[\"rpp_kernel_id\"]\n    port_range = arguments[\"port_range\"] or arguments[\"rpp_port_range\"]\n    response_addr = arguments[\"response_address\"] or arguments[\"rpp_response_address\"]\n    public_key = arguments[\"public_key\"] or arguments[\"rpp_public_key\"]\n    spark_context_init_mode = (\n        arguments[\"spark_context_init_mode\"] or arguments[\"rpp_spark_context_init_mode\"]\n    )\n    kernel_class_name = arguments[\"kernel_class_name\"]\n\n    launch_docker_kernel(\n        kernel_id, port_range, response_addr, public_key, spark_context_init_mode, kernel_class_name\n    )\n"
  },
  {
    "path": "etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2",
    "content": "# This file defines the Kubernetes objects necessary for kernels to run witihin Kubernetes.\n# Substitution parameters are processed by the launch_kubernetes.py code located in the\n# same directory.  Some values are factory values, while others (typically prefixed with 'kernel_') can be\n# provided by the client.\n#\n# This file can be customized as needed.  No changes are required to launch_kubernetes.py provided kernel_\n# values are used - which be automatically set from corresponding KERNEL_ env values.  Updates will be required\n# to launch_kubernetes.py if new document sections (i.e., new k8s 'kind' objects) are introduced.\n#\napiVersion: v1\nkind: Pod\nmetadata:\n  name: {{ kernel_pod_name | yaml_safe }}\n  namespace: {{ kernel_namespace | yaml_safe }}\n  labels:\n    kernel_id: {{ kernel_id | yaml_safe }}\n    app: enterprise-gateway\n    component: kernel\n    source: kernel-pod.yaml\n  annotations:\n    cluster-autoscaler.kubernetes.io/safe-to-evict: \"false\"\nspec:\n  restartPolicy: Never\n  serviceAccountName: {{ kernel_service_account_name | yaml_safe }}\n# NOTE: that using runAsGroup requires that feature-gate RunAsGroup be enabled.\n# WARNING: Only using runAsUser w/o runAsGroup or NOT enabling the RunAsGroup feature-gate\n# will result in the new kernel pod's effective group of 0 (root)! although the user will\n# correspond to the runAsUser value.  As a result, BOTH should be uncommented AND the feature-gate\n# should be enabled to ensure expected behavior.  In addition, 'fsGroup: 100' is recommended so\n# that /home/jovyan can be written to via the 'users' group (gid: 100) irrespective of the\n# \"kernel_uid\" and \"kernel_gid\" values.\n  {% if kernel_uid is defined or kernel_gid is defined %}\n  securityContext:\n    {% if kernel_uid is defined %}\n    runAsUser: {{ kernel_uid | int }}\n    {% endif %}\n    {% if kernel_gid is defined %}\n    runAsGroup: {{ kernel_gid | int }}\n    {% endif %}\n    fsGroup: 100\n  {% endif %}\n  containers:\n  - image: {{ kernel_image | yaml_safe }}\n    name: {{ kernel_pod_name | yaml_safe }}\n    env:\n# Add any custom envs here that aren't already configured for the kernel's environment\n#    - name: MY_CUSTOM_ENV\n#      value: \"my_custom_value\"\n    {% if kernel_cpus is defined or kernel_memory is defined or kernel_gpus is defined or kernel_cpus_limit is defined or kernel_memory_limit is defined or kernel_gpus_limit is defined %}\n    resources:\n      {% if kernel_cpus is defined or kernel_memory is defined or kernel_gpus is defined %}\n      requests:\n        {% if kernel_cpus is defined %}\n        cpu: {{ kernel_cpus | yaml_safe }}\n        {% endif %}\n        {% if kernel_memory is defined %}\n        memory: {{ kernel_memory | yaml_safe }}\n        {% endif %}\n        {% if kernel_gpus is defined %}\n        nvidia.com/gpu: {{ kernel_gpus | yaml_safe }}\n        {% endif %}\n      {% endif %}\n      {% if kernel_cpus_limit is defined or kernel_memory_limit is defined or kernel_gpus_limit is defined %}\n      limits:\n        {% if kernel_cpus_limit is defined %}\n        cpu: {{ kernel_cpus_limit | yaml_safe }}\n        {% endif %}\n        {% if kernel_memory_limit is defined %}\n        memory: {{ kernel_memory_limit | yaml_safe }}\n        {% endif %}\n        {% if kernel_gpus_limit is defined %}\n        nvidia.com/gpu: {{ kernel_gpus_limit | yaml_safe }}\n        {% endif %}\n      {% endif %}\n    {% endif %}\n    {% if kernel_working_dir %}\n    workingDir: {{ kernel_working_dir | yaml_safe }}\n    {% endif %}\n    volumeMounts:\n# Define any \"unconditional\" mounts here, followed by \"conditional\" mounts that vary per client\n    {% if kernel_volume_mounts %}\n      {% for volume_mount in kernel_volume_mounts %}\n    - {{ volume_mount | yaml_safe }}\n      {% endfor %}\n    {% endif %}\n  volumes:\n# Define any \"unconditional\" volumes here, followed by \"conditional\" volumes that vary per client\n  {% if kernel_volumes %}\n    {% for volume in kernel_volumes %}\n  - {{ volume | yaml_safe }}\n    {% endfor %}\n  {% endif %}\n"
  },
  {
    "path": "etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py",
    "content": "#!/opt/conda/bin/python\n\"\"\"Launch on kubernetes.\"\"\"\nimport argparse\nimport os\nimport sys\nfrom typing import Dict, List\n\nimport urllib3\nimport yaml\nfrom jinja2 import Environment, FileSystemLoader, select_autoescape\nfrom kubernetes import client, config\nfrom kubernetes.client.rest import ApiException\n\nurllib3.disable_warnings()\n\nKERNEL_POD_TEMPLATE_PATH = \"/kernel-pod.yaml.j2\"\n\nALLOWED_K8S_KINDS = {\n    \"Pod\",\n    \"Secret\",\n    \"PersistentVolumeClaim\",\n    \"PersistentVolume\",\n    \"Service\",\n    \"ConfigMap\",\n}\nMAX_DOCUMENTS_PER_KIND = 1\nYAML_PARSED_KERNEL_VARS = {\"KERNEL_VOLUME_MOUNTS\", \"KERNEL_VOLUMES\"}\n\n\ndef yaml_safe_str(value):\n    \"\"\"Escape a value for safe inclusion in a YAML template.\n\n    Uses PyYAML's own serializer to produce properly escaped output:\n    - Strings are double-quoted with special characters escaped.\n    - Dicts/lists are serialized as YAML flow mappings/sequences.\n    - None, bools, and numbers are serialized to their YAML-canonical form.\n    \"\"\"\n    if isinstance(value, str):\n        return yaml.dump(value, default_style='\"', width=10000).strip()\n    if isinstance(value, (dict, list)):\n        return yaml.dump(value, default_flow_style=True, width=10000).strip()\n    # yaml.dump appends a document-end marker (\"...\\n\") for scalars; strip it\n    return yaml.dump(value, width=10000).replace(\"\\n...\", \"\").strip()\n\n\ndef generate_kernel_pod_yaml(keywords):\n    \"\"\"Return the kubernetes pod spec as a yaml string.\n\n    - load jinja2 template from this file directory.\n    - substitute template variables with keywords items.\n    \"\"\"\n    j_env = Environment(\n        loader=FileSystemLoader(os.path.dirname(__file__)),\n        trim_blocks=True,\n        lstrip_blocks=True,\n        autoescape=select_autoescape(\n            disabled_extensions=(\n                \"j2\",\n                \"yaml\",\n            ),\n            default_for_string=True,\n            default=True,\n        ),\n    )\n    j_env.filters[\"yaml_safe\"] = yaml_safe_str\n\n    k8s_yaml = j_env.get_template(KERNEL_POD_TEMPLATE_PATH).render(**keywords)\n\n    return k8s_yaml\n\n\ndef extend_pod_env(pod_def: dict) -> dict:\n    \"\"\"Extends the pod_def.spec.containers[0].env stanza with current environment.\"\"\"\n    env_stanza = pod_def[\"spec\"][\"containers\"][0].get(\"env\") or []\n\n    # Walk current set of template env entries and replace those found in the current\n    # env with their values (and record those items).   Then add all others from the env\n    # that were not already.\n    processed_entries: List[str] = []\n    for item in env_stanza:\n        item_name = item.get(\"name\")\n        if item_name in os.environ:\n            item[\"value\"] = os.environ[item_name]\n            processed_entries.append(item_name)\n\n    for name, value in os.environ.items():\n        if name not in processed_entries:\n            env_stanza.append({\"name\": name, \"value\": value})\n\n    pod_def[\"spec\"][\"containers\"][0][\"env\"] = env_stanza\n    return pod_def\n\n\n# a popular reason that lasts many APIs but is not constantized in the client lib\nK8S_ALREADY_EXIST_REASON = \"AlreadyExists\"\n\n\ndef _parse_k8s_exception(exc: ApiException) -> str:\n    \"\"\"Parse the exception and return the error message from kubernetes api\n\n    Args:\n        exc (Exception): Exception object\n\n    Returns:\n        str: Error message from kubernetes api\n    \"\"\"\n    # more exception can be parsed, but at the time of implementation we only need this one\n    msg = f'\"reason\":{K8S_ALREADY_EXIST_REASON}'\n    if exc.status == 409 and exc.reason == \"Conflict\" and msg in exc.body:\n        return K8S_ALREADY_EXIST_REASON\n    return \"\"\n\n\ndef launch_kubernetes_kernel(\n    kernel_id,\n    port_range,\n    response_addr,\n    public_key,\n    spark_context_init_mode,\n    pod_template_file,\n    spark_opts_out,\n    kernel_class_name,\n):\n    \"\"\"Launches a containerized kernel as a kubernetes pod.\"\"\"\n\n    if os.getenv(\"KUBERNETES_SERVICE_HOST\"):\n        config.load_incluster_config()\n    else:\n        config.load_kube_config()\n\n    # Capture keywords and their values.\n    keywords = {}\n\n    # Factory values...\n    # Since jupyter lower cases the kernel directory as the kernel-name, we need to capture its case-sensitive\n    # value since this is used to locate the kernel launch script within the image.\n    # Ensure these key/value pairs are reflected in the environment.  We'll add these to the container's env\n    # stanza after the pod template is generated.\n    if port_range:\n        os.environ[\"PORT_RANGE\"] = port_range\n    if public_key:\n        os.environ[\"PUBLIC_KEY\"] = public_key\n    if response_addr:\n        os.environ[\"RESPONSE_ADDRESS\"] = response_addr\n    if kernel_id:\n        os.environ[\"KERNEL_ID\"] = kernel_id\n    if spark_context_init_mode:\n        os.environ[\"KERNEL_SPARK_CONTEXT_INIT_MODE\"] = spark_context_init_mode\n    if kernel_class_name:\n        os.environ[\"KERNEL_CLASS_NAME\"] = kernel_class_name\n\n    os.environ[\"KERNEL_NAME\"] = os.path.basename(\n        os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n    )\n\n    # Walk env variables looking for names prefixed with KERNEL_.  When found, set corresponding keyword value\n    # with name in lower case.  Only parse YAML for variables that legitimately carry structured data\n    # (lists/dicts); treat all others as raw strings to prevent YAML injection attacks.\n    for name, value in os.environ.items():\n        if name.startswith(\"KERNEL_\"):\n            if name in YAML_PARSED_KERNEL_VARS:\n                parsed = yaml.safe_load(value)\n                if not isinstance(parsed, list) or not all(\n                    isinstance(item, dict) for item in parsed\n                ):\n                    sys.exit(\n                        f\"ERROR - {name} must be a YAML list of mappings - \"\n                        f\"kernel launch terminating!\"\n                    )\n                keywords[name.lower()] = parsed\n            else:\n                keywords[name.lower()] = value\n\n    # Substitute all template variable (wrapped with {{ }}) and generate `yaml` string.\n    k8s_yaml = generate_kernel_pod_yaml(keywords)\n\n    # For each k8s object (kind), call the appropriate API method.  Too bad there isn't a method\n    # that can take a set of objects.\n    #\n    # Creation for additional kinds of k8s objects can be added below.  Refer to\n    # https://github.com/kubernetes-client/python for API signatures.  Other examples can be found in\n    # https://github.com/jupyter-server/enterprise_gateway/tree/main/enterprise_gateway/services/processproxies/k8s.py\n    #\n    pod_template = None\n    pod_created = None\n    kernel_namespace = keywords[\"kernel_namespace\"]\n    k8s_objs = list(yaml.safe_load_all(k8s_yaml))\n    kind_counts: Dict[str, int] = {}\n    for k8s_obj in k8s_objs:\n        if not k8s_obj:\n            continue\n        kind = k8s_obj.get(\"kind\")\n        if kind not in ALLOWED_K8S_KINDS:\n            sys.exit(\n                f\"ERROR - Unexpected resource kind '{kind}' in rendered manifest - \"\n                f\"kernel launch terminating!\"\n            )\n        kind_counts[kind] = kind_counts.get(kind, 0) + 1\n    for kind, count in kind_counts.items():\n        if count > MAX_DOCUMENTS_PER_KIND:\n            sys.exit(\n                f\"ERROR - Rendered manifest contains {count} '{kind}' documents \"\n                f\"(max {MAX_DOCUMENTS_PER_KIND}) - kernel launch terminating!\"\n            )\n    for k8s_obj in k8s_objs:\n        if k8s_obj.get(\"kind\"):\n            if k8s_obj[\"kind\"] == \"Pod\":\n                #  print(\"{}\".format(k8s_obj))  # useful for debug\n                pod_template = extend_pod_env(k8s_obj)\n                if pod_template_file is None:\n                    try:\n                        pod_created = client.CoreV1Api(client.ApiClient()).create_namespaced_pod(\n                            body=k8s_obj, namespace=kernel_namespace\n                        )\n                    except ApiException as exc:\n                        if _parse_k8s_exception(exc) == K8S_ALREADY_EXIST_REASON:\n                            pod_created = (\n                                client.CoreV1Api(client.ApiClient())\n                                .list_namespaced_pod(\n                                    namespace=kernel_namespace,\n                                    label_selector=f\"kernel_id={kernel_id}\",\n                                    watch=False,\n                                )\n                                .items[0]\n                            )\n                        else:\n                            raise exc\n            elif k8s_obj[\"kind\"] == \"Secret\":\n                if pod_template_file is None:\n                    client.CoreV1Api(client.ApiClient()).create_namespaced_secret(\n                        body=k8s_obj, namespace=kernel_namespace\n                    )\n            elif k8s_obj[\"kind\"] == \"PersistentVolumeClaim\":\n                if pod_template_file is None:\n                    try:\n                        client.CoreV1Api(\n                            client.ApiClient()\n                        ).create_namespaced_persistent_volume_claim(\n                            body=k8s_obj, namespace=kernel_namespace\n                        )\n                    except ApiException as exc:\n                        if _parse_k8s_exception(exc) == K8S_ALREADY_EXIST_REASON:\n                            pass\n                        else:\n                            raise exc\n            elif k8s_obj[\"kind\"] == \"PersistentVolume\":\n                if pod_template_file is None:\n                    client.CoreV1Api(client.ApiClient()).create_persistent_volume(body=k8s_obj)\n            elif k8s_obj[\"kind\"] == \"Service\":\n                if pod_template_file is None and pod_created is not None:\n                    # Create dependency between pod and service, useful to delete service when kernel stops\n                    k8s_obj[\"metadata\"][\"ownerReferences\"] = [\n                        {\n                            \"apiVersion\": \"v1\",\n                            \"kind\": \"pod\",\n                            \"name\": str(pod_created.metadata.name),\n                            \"uid\": str(pod_created.metadata.uid),\n                        }\n                    ]\n                    client.CoreV1Api(client.ApiClient()).create_namespaced_service(\n                        body=k8s_obj, namespace=kernel_namespace\n                    )\n            elif k8s_obj[\"kind\"] == \"ConfigMap\":\n                if pod_template_file is None and pod_created is not None:\n                    # Create dependency between pod and configmap, useful to delete service when kernel stops\n                    k8s_obj[\"metadata\"][\"ownerReferences\"] = [\n                        {\n                            \"apiVersion\": \"v1\",\n                            \"kind\": \"pod\",\n                            \"name\": str(pod_created.metadata.name),\n                            \"uid\": str(pod_created.metadata.uid),\n                        }\n                    ]\n                    client.CoreV1Api(client.ApiClient()).create_namespaced_config_map(\n                        body=k8s_obj, namespace=kernel_namespace\n                    )\n            else:\n                sys.exit(\n                    f\"ERROR - Unhandled Kubernetes object kind '{k8s_obj['kind']}' found in yaml file - \"\n                    f\"kernel launch terminating!\"\n                )\n        else:\n            print(\"ERROR processing Kubernetes yaml file - kernel launch terminating!\")\n            print(k8s_yaml)\n            sys.exit(\n                f\"ERROR - Unknown Kubernetes object '{k8s_obj}' found in yaml file - kernel launch terminating!\"\n            )\n\n    if pod_template_file:\n        # TODO - construct other --conf options for things like mounts, resources, etc.\n        # write yaml to file...\n        with open(pod_template_file, \"w\") as stream:\n            yaml.dump(pod_template, stream)\n\n        # Build up additional spark options.  Note the trailing space to accommodate concatenation\n        additional_spark_opts = (\n            f\"--conf spark.kubernetes.driver.podTemplateFile={pod_template_file} \"\n            f\"--conf spark.kubernetes.executor.podTemplateFile={pod_template_file} \"\n        )\n\n        additional_spark_opts += _get_spark_resources(pod_template)\n\n        if spark_opts_out:\n            with open(spark_opts_out, \"w+\") as soo_fd:\n                soo_fd.write(additional_spark_opts)\n        else:  # If no spark_opts_out was specified, print to stdout in case this is an old caller\n            print(additional_spark_opts)\n\n\ndef _get_spark_resources(pod_template: Dict) -> str:\n    # Gather up resources for cpu/memory requests/limits.  Since gpus require a \"discovery script\"\n    # we'll leave that alone for now:\n    # https://spark.apache.org/docs/latest/running-on-kubernetes.html#resource-allocation-and-configuration-overview\n    #\n    # The config value names below are pulled from:\n    # https://spark.apache.org/docs/latest/running-on-kubernetes.html#container-spec\n    spark_resources = \"\"\n    containers = pod_template.get(\"spec\", {}).get(\"containers\", [])\n    if containers:\n        # We're just dealing with single-container pods at this time.\n        resources = containers[0].get(\"resources\", {})\n        if resources:\n            requests = resources.get(\"requests\", {})\n            if requests:\n                cpu_request = requests.get(\"cpu\")\n                if cpu_request:\n                    spark_resources += (\n                        f\"--conf spark.driver.cores={cpu_request} \"\n                        f\"--conf spark.executor.cores={cpu_request} \"\n                    )\n                memory_request = requests.get(\"memory\")\n                if memory_request:\n                    spark_resources += (\n                        f\"--conf spark.driver.memory={memory_request} \"\n                        f\"--conf spark.executor.memory={memory_request} \"\n                    )\n\n            limits = resources.get(\"limits\", {})\n            if limits:\n                cpu_limit = limits.get(\"cpu\")\n                if cpu_limit:\n                    spark_resources += (\n                        f\"--conf spark.kubernetes.driver.limit.cores={cpu_limit} \"\n                        f\"--conf spark.kubernetes.executor.limit.cores={cpu_limit} \"\n                    )\n                memory_limit = limits.get(\"memory\")\n                if memory_limit:\n                    spark_resources += (\n                        f\"--conf spark.driver.memory={memory_limit} \"\n                        f\"--conf spark.executor.memory={memory_limit} \"\n                    )\n    return spark_resources\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\n        \"--kernel-id\",\n        dest=\"kernel_id\",\n        nargs=\"?\",\n        help=\"Indicates the id associated with the launched kernel.\",\n    )\n    parser.add_argument(\n        \"--port-range\",\n        dest=\"port_range\",\n        nargs=\"?\",\n        metavar=\"<lowerPort>..<upperPort>\",\n        help=\"Port range to impose for kernel ports\",\n    )\n    parser.add_argument(\n        \"--response-address\",\n        dest=\"response_address\",\n        nargs=\"?\",\n        metavar=\"<ip>:<port>\",\n        help=\"Connection address (<ip>:<port>) for returning connection file\",\n    )\n    parser.add_argument(\n        \"--public-key\",\n        dest=\"public_key\",\n        nargs=\"?\",\n        help=\"Public key used to encrypt connection information\",\n    )\n    parser.add_argument(\n        \"--spark-context-initialization-mode\",\n        dest=\"spark_context_init_mode\",\n        nargs=\"?\",\n        help=\"Indicates whether or how a spark context should be created\",\n    )\n    parser.add_argument(\n        \"--pod-template\",\n        dest=\"pod_template_file\",\n        nargs=\"?\",\n        metavar=\"template filename\",\n        help=\"When present, yaml is written to file, no launch performed.\",\n    )\n    parser.add_argument(\n        \"--spark-opts-out\",\n        dest=\"spark_opts_out\",\n        nargs=\"?\",\n        metavar=\"additional spark options filename\",\n        help=\"When present, additional spark options are written to file, \"\n        \"no launch performed, requires --pod-template.\",\n    )\n    parser.add_argument(\n        \"--kernel-class-name\",\n        dest=\"kernel_class_name\",\n        nargs=\"?\",\n        help=\"Indicates the name of the kernel class to use.  Must be a subclass of 'ipykernel.kernelbase.Kernel'.\",\n    )\n\n    # The following arguments are deprecated and will be used only if their mirroring arguments have no value.\n    # This means that the default value for --spark-context-initialization-mode (none) will need to come from\n    # the mirrored args' default until deprecated item has been removed.\n    parser.add_argument(\n        \"--RemoteProcessProxy.kernel-id\",\n        dest=\"rpp_kernel_id\",\n        nargs=\"?\",\n        help=\"Indicates the id associated with the launched kernel. (deprecated)\",\n    )\n    parser.add_argument(\n        \"--RemoteProcessProxy.port-range\",\n        dest=\"rpp_port_range\",\n        nargs=\"?\",\n        metavar=\"<lowerPort>..<upperPort>\",\n        help=\"Port range to impose for kernel ports (deprecated)\",\n    )\n    parser.add_argument(\n        \"--RemoteProcessProxy.response-address\",\n        dest=\"rpp_response_address\",\n        nargs=\"?\",\n        metavar=\"<ip>:<port>\",\n        help=\"Connection address (<ip>:<port>) for returning connection file (deprecated)\",\n    )\n    parser.add_argument(\n        \"--RemoteProcessProxy.public-key\",\n        dest=\"rpp_public_key\",\n        nargs=\"?\",\n        help=\"Public key used to encrypt connection information (deprecated)\",\n    )\n    parser.add_argument(\n        \"--RemoteProcessProxy.spark-context-initialization-mode\",\n        dest=\"rpp_spark_context_init_mode\",\n        nargs=\"?\",\n        help=\"Indicates whether or how a spark context should be created (deprecated)\",\n        default=\"none\",\n    )\n\n    arguments = vars(parser.parse_args())\n    kernel_id = arguments[\"kernel_id\"] or arguments[\"rpp_kernel_id\"]\n    port_range = arguments[\"port_range\"] or arguments[\"rpp_port_range\"]\n    response_addr = arguments[\"response_address\"] or arguments[\"rpp_response_address\"]\n    public_key = arguments[\"public_key\"] or arguments[\"rpp_public_key\"]\n    spark_context_init_mode = (\n        arguments[\"spark_context_init_mode\"] or arguments[\"rpp_spark_context_init_mode\"]\n    )\n    pod_template_file = arguments[\"pod_template_file\"]\n    spark_opts_out = arguments[\"spark_opts_out\"]\n    kernel_class_name = arguments[\"kernel_class_name\"]\n\n    launch_kubernetes_kernel(\n        kernel_id,\n        port_range,\n        response_addr,\n        public_key,\n        spark_context_init_mode,\n        pod_template_file,\n        spark_opts_out,\n        kernel_class_name,\n    )\n"
  },
  {
    "path": "etc/kernel-launchers/operators/scripts/launch_custom_resource.py",
    "content": "#!/opt/conda/bin/python\n\"\"\"Launch a custom operator resource.\"\"\"\nimport argparse\nimport os\nimport re\nimport sys\n\nimport urllib3\nimport yaml\nfrom jinja2 import Environment, FileSystemLoader, select_autoescape\nfrom kubernetes import client, config\n\nurllib3.disable_warnings()\n\nYAML_PARSED_KERNEL_VARS = {\"KERNEL_VOLUME_MOUNTS\", \"KERNEL_VOLUMES\"}\n\n\ndef yaml_safe_str(value):\n    \"\"\"Escape a value for safe inclusion in a YAML template.\n\n    Uses PyYAML's own serializer to produce properly escaped output:\n    - Strings are double-quoted with special characters escaped.\n    - Dicts/lists are serialized as YAML flow mappings/sequences.\n    - None, bools, and numbers are serialized to their YAML-canonical form.\n    \"\"\"\n    if isinstance(value, str):\n        return yaml.dump(value, default_style='\"', width=10000).strip()\n    if isinstance(value, (dict, list)):\n        return yaml.dump(value, default_flow_style=True, width=10000).strip()\n    # yaml.dump appends a document-end marker (\"...\\n\") for scalars; strip it\n    return yaml.dump(value, width=10000).replace(\"\\n...\", \"\").strip()\n\n\ndef generate_kernel_custom_resource_yaml(kernel_crd_template, keywords):\n    \"\"\"Generate the kernel custom resource yaml given a template.\"\"\"\n    j_env = Environment(\n        loader=FileSystemLoader(os.path.dirname(__file__)),\n        trim_blocks=True,\n        lstrip_blocks=True,\n        autoescape=select_autoescape(\n            disabled_extensions=(\n                \"j2\",\n                \"yaml\",\n            ),\n            default_for_string=True,\n            default=True,\n        ),\n    )\n    j_env.filters[\"yaml_safe\"] = yaml_safe_str\n\n    k8s_yaml = j_env.get_template(\"/\" + kernel_crd_template + \".yaml.j2\").render(**keywords)\n    return k8s_yaml\n\n\ndef extend_operator_env(op_def: dict, sub_spec: str) -> dict:\n    \"\"\"Extends the op_def.spec.sub_spec.env stanza with current environment.\"\"\"\n    env_stanza = op_def[\"spec\"][sub_spec].get(\"env\") or []\n\n    # Walk current set of template env entries and replace those found in the current\n    # env with their values (and record those items).   Then add all others from the env\n    # that were not already.\n    processed_entries: list[str] = []\n    for item in env_stanza:\n        item_name = item.get(\"name\")\n        if item_name in os.environ:\n            item[\"value\"] = os.environ[item_name]\n            processed_entries.append(item_name)\n\n    for name, value in os.environ.items():\n        if name not in processed_entries:\n            env_stanza.append({\"name\": name, \"value\": value})\n\n    op_def[\"spec\"][sub_spec][\"env\"] = env_stanza\n    return op_def\n\n\ndef launch_custom_resource_kernel(\n    kernel_id, port_range, response_addr, public_key, spark_context_init_mode\n):\n    \"\"\"Launch a custom resource kernel.\"\"\"\n    config.load_incluster_config()\n\n    keywords = {}\n\n    keywords[\"eg_port_range\"] = port_range\n    keywords[\"eg_public_key\"] = public_key\n    keywords[\"eg_response_address\"] = response_addr\n    keywords[\"kernel_id\"] = kernel_id\n    keywords[\"kernel_name\"] = os.path.basename(\n        os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n    )\n    keywords[\"spark_context_initialization_mode\"] = spark_context_init_mode\n\n    # Only parse YAML for variables that legitimately carry structured data (lists/dicts);\n    # treat all others as raw strings to prevent YAML injection attacks.\n    for name, value in os.environ.items():\n        if name.startswith(\"KERNEL_\"):\n            if name in YAML_PARSED_KERNEL_VARS:\n                parsed = yaml.safe_load(value)\n                if not isinstance(parsed, list) or not all(\n                    isinstance(item, dict) for item in parsed\n                ):\n                    sys.exit(\n                        f\"ERROR - {name} must be a YAML list of mappings - \"\n                        f\"kernel launch terminating!\"\n                    )\n                keywords[name.lower()] = parsed\n            else:\n                keywords[name.lower()] = value\n\n    kernel_crd_template = keywords[\"kernel_crd_group\"] + \"-\" + keywords[\"kernel_crd_version\"]\n    if not re.match(r'^[a-z0-9][a-z0-9.\\-]*-v[a-z0-9]+$', kernel_crd_template):\n        sys.exit(\n            f\"ERROR - Invalid CRD template name: {kernel_crd_template} - kernel launch terminating!\"\n        )\n\n    custom_resource_yaml = generate_kernel_custom_resource_yaml(kernel_crd_template, keywords)\n\n    kernel_namespace = keywords[\"kernel_namespace\"]\n    group = keywords[\"kernel_crd_group\"]\n    version = keywords[\"kernel_crd_version\"]\n    plural = keywords[\"kernel_crd_plural\"]\n    custom_resource_object = yaml.safe_load(custom_resource_yaml)\n    if not isinstance(custom_resource_object, dict) or \"kind\" not in custom_resource_object:\n        sys.exit(\n            \"ERROR - Rendered CRD manifest is not a valid single-document YAML - kernel launch terminating!\"\n        )\n    if group == \"sparkoperator.k8s.io\":\n        extend_operator_env(custom_resource_object, \"driver\")\n        extend_operator_env(custom_resource_object, \"executor\")\n\n    try:\n        client.CustomObjectsApi().create_namespaced_custom_object(\n            group, version, kernel_namespace, plural, custom_resource_object\n        )\n    except client.exceptions.ApiException as ex:\n        if ex.status == 404:\n            sys.exit(\n                \"\\nERROR: The Kubernetes Operator for Apache Spark does not appear to be installed.  \"\n                \"See 'https://github.com/GoogleCloudPlatform/spark-on-k8s-operator#installation' for \"\n                \"instructions, then retry the operation.\\n\"\n            )\n        else:\n            print(\"ERROR processing Kubernetes Operator CRD - kernel launch terminating!\")\n            print(custom_resource_yaml)\n        raise ex\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\n        \"--kernel-id\",\n        \"--RemoteProcessProxy.kernel-id\",\n        dest=\"kernel_id\",\n        nargs=\"?\",\n        help=\"Indicates the id associated with the launched kernel.\",\n    )\n    parser.add_argument(\n        \"--port-range\",\n        \"--RemoteProcessProxy.port-range\",\n        dest=\"port_range\",\n        nargs=\"?\",\n        metavar=\"<lowerPort>..<upperPort>\",\n        help=\"Port range to impose for kernel ports\",\n    )\n    parser.add_argument(\n        \"--response-address\",\n        \"--RemoteProcessProxy.response-address\",\n        dest=\"response_address\",\n        nargs=\"?\",\n        metavar=\"<ip>:<port>\",\n        help=\"Connection address (<ip>:<port>) for returning connection file\",\n    )\n    parser.add_argument(\n        \"--public-key\",\n        \"--RemoteProcessProxy.public-key\",\n        dest=\"public_key\",\n        nargs=\"?\",\n        help=\"Public key used to encrypt connection information\",\n    )\n    parser.add_argument(\n        \"--spark-context-initialization-mode\",\n        \"--RemoteProcessProxy.spark-context-initialization-mode\",\n        dest=\"spark_context_init_mode\",\n        nargs=\"?\",\n        help=\"Indicates whether or how a spark context should be created\",\n        default=\"none\",\n    )\n\n    arguments = vars(parser.parse_args())\n    kernel_id = arguments[\"kernel_id\"]\n    port_range = arguments[\"port_range\"]\n    response_addr = arguments[\"response_address\"]\n    public_key = arguments[\"public_key\"]\n    spark_context_init_mode = arguments[\"spark_context_init_mode\"]\n\n    launch_custom_resource_kernel(\n        kernel_id, port_range, response_addr, public_key, spark_context_init_mode\n    )\n"
  },
  {
    "path": "etc/kernel-launchers/operators/scripts/sparkoperator.k8s.io-v1beta2.yaml.j2",
    "content": "apiVersion: \"sparkoperator.k8s.io/v1beta2\"\nkind: SparkApplication\nmetadata:\n  name: {{ kernel_resource_name | yaml_safe }}\nspec:\n  restartPolicy:\n    type: Never\n  type: Python\n  pythonVersion: \"3\"\n  sparkVersion: 2.4.5\n  image: {{ kernel_image | yaml_safe }}\n  mainApplicationFile: \"local:///usr/local/bin/kernel-launchers/python/scripts/launch_ipykernel.py\"\n  arguments:\n    - \"--kernel-id\"\n    - {{ kernel_id | yaml_safe }}\n    - \"--spark-context-initialization-mode\"\n    - {{ spark_context_initialization_mode | yaml_safe }}\n    - \"--response-address\"\n    - {{ eg_response_address | yaml_safe }}\n    - \"--port-range\"\n    - {{ eg_port_range | yaml_safe }}\n    - \"--public-key\"\n    - {{ eg_public_key | yaml_safe }}\n  driver:\n    annotations:\n      cluster-autoscaler.kubernetes.io/safe-to-evict: \"false\"\n    env:\n# Add any custom envs here that aren't already configured for the kernel's environment\n# Note: For envs to flow to the pods, the webhook server must be enabled during deployment\n# e.g., helm install my-release spark-operator/spark-operator --namespace spark-operator --set webhook.enable=true\n#    - name: MY_DRIVER_ENV\n#      value: \"my_driver_value\"\n    serviceAccount: {{ kernel_service_account_name | yaml_safe }}\n    labels:\n      kernel_id: {{ kernel_id | yaml_safe }}\n      app: enterprise-gateway\n      component: kernel\n    cores: 1\n    coreLimit: 1000m\n    memory: 1g\n    volumeMounts:\n      {% if kernel_volume_mounts is defined %}\n        {% for mount in kernel_volume_mounts %}\n      - {{ mount | yaml_safe }}\n        {% endfor %}\n      {% endif %}\n    volumes:\n      {% if kernel_volumes is defined %}\n        {% for volume in kernel_volumes %}\n      - {{ volume | yaml_safe }}\n        {% endfor %}\n      {% endif %}\n  executor:\n    env:\n# Add any custom envs here that aren't already configured for the kernel's environment\n# Note: For envs to flow to the pods, the webhook server must be enabled during deployment\n# e.g., helm install my-release spark-operator/spark-operator --namespace spark-operator --set webhook.enable=true\n#    - name: MY_EXECUTOR_ENV\n#      value: \"my_executor_value\"\n    labels:\n      kernel_id: {{ kernel_id | yaml_safe }}\n      app: enterprise-gateway\n      component: worker\n    image: {{ kernel_executor_image | yaml_safe }}\n    instances: 2\n    cores: 1\n    coreLimit: 1000m\n    memory: 1g\n    volumeMounts:\n      {% if kernel_volume_mounts is defined %}\n        {% for mount in kernel_volume_mounts %}\n      - {{ mount | yaml_safe }}\n        {% endfor %}\n      {% endif %}\n    volumes:\n      {% if kernel_volumes is defined %}\n        {% for volume in kernel_volumes %}\n      - {{ volume | yaml_safe }}\n        {% endfor %}\n      {% endif %}\n{% if kernel_sparkapp_config_map %}\n  sparkConfigMap: {{ kernel_sparkapp_config_map | yaml_safe }}\n{% endif %}\n"
  },
  {
    "path": "etc/kernel-launchers/python/scripts/launch_ipykernel.py",
    "content": "\"\"\"Launch an ipython kernel.\"\"\"\n\nimport argparse\nimport base64\nimport json\nimport logging\nimport os\nimport random\nimport signal\nimport socket\nimport tempfile\nimport uuid\nfrom multiprocessing import Process\nfrom threading import Thread\n\nfrom Cryptodome.Cipher import AES, PKCS1_v1_5\nfrom Cryptodome.PublicKey import RSA\nfrom Cryptodome.Random import get_random_bytes\nfrom Cryptodome.Util.Padding import pad\nfrom jupyter_client.connect import write_connection_file\n\nLAUNCHER_VERSION = 1  # Indicate to server the version of this launcher (payloads may vary)\n\n# Minimum port range size and max retries, let EG_ env values act as the default for b/c purposes\nmin_port_range_size = int(\n    os.getenv(\"MIN_PORT_RANGE_SIZE\", os.getenv(\"EG_MIN_PORT_RANGE_SIZE\", \"1000\"))\n)\nmax_port_range_retries = int(\n    os.getenv(\"MAX_PORT_RANGE_RETRIES\", os.getenv(\"EG_MAX_PORT_RANGE_RETRIES\", \"5\"))\n)\n\nlog_level = os.getenv(\"LOG_LEVEL\", os.getenv(\"EG_LOG_LEVEL\", \"10\"))\nlog_level = int(log_level) if log_level.isdigit() else log_level\n\nlogging.basicConfig(format=\"[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s] %(message)s\")\n\nlogger = logging.getLogger(\"launch_ipykernel\")\nlogger.setLevel(log_level)\n\nDEFAULT_KERNEL_CLASS_NAME = \"ipykernel.ipkernel.IPythonKernel\"\n__spark_context = None\n\n\nclass ExceptionThread(Thread):\n    \"\"\"Wrap thread to handle the exception.\"\"\"\n\n    def __init__(self, target):\n        \"\"\"Initialize the thread.\"\"\"\n        self.target = target\n        self.exc = None\n        Thread.__init__(self)\n\n    def run(self):\n        \"\"\"Run the thread.\"\"\"\n        try:\n            self.target()\n        except Exception as exc:\n            self.exc = exc\n\n\ndef initialize_namespace(namespace, cluster_type=\"spark\"):\n    \"\"\"Initialize the kernel namespace.\n\n    Parameters\n    ----------\n    cluster_type : {'spark', 'dask', 'none'}\n        The cluster type to initialize. ``'none'`` results in no variables in\n        the initial namespace.\n    \"\"\"\n    if cluster_type == \"spark\":\n        try:\n            from pyspark.sql import SparkSession\n        except ImportError:\n            logger.info(\n                \"A spark context was desired but the pyspark distribution is not present.  \"\n                \"Spark context creation will not occur.\"\n            )\n            return\n\n        def initialize_spark_session():\n            import atexit\n\n            \"\"\"Initialize Spark session and replace global variable\n            placeholders with real Spark session object references.\"\"\"\n            spark = SparkSession.builder.getOrCreate()\n\n            global __spark_context\n            __spark_context = spark.sparkContext\n\n            # Stop the spark session on exit\n            atexit.register(lambda: spark.stop())\n\n            namespace.update(\n                {\n                    \"spark\": spark,\n                    \"sc\": spark.sparkContext,\n                    \"sql\": spark.sql,\n                    \"sqlContext\": spark._wrapped,\n                    \"sqlCtx\": spark._wrapped,\n                }\n            )\n\n        init_thread = ExceptionThread(target=initialize_spark_session)\n        spark = WaitingForSparkSessionToBeInitialized(\"spark\", init_thread, namespace)\n        sc = WaitingForSparkSessionToBeInitialized(\"sc\", init_thread, namespace)\n        sqlContext = WaitingForSparkSessionToBeInitialized(\"sqlContext\", init_thread, namespace)\n\n        def sql(query):\n            \"\"\"Placeholder function. When called will wait for Spark session to be\n            initialized and call ``spark.sql(query)``\"\"\"\n            return spark.sql(query)\n\n        namespace.update(\n            {\"spark\": spark, \"sc\": sc, \"sql\": sql, \"sqlContext\": sqlContext, \"sqlCtx\": sqlContext}\n        )\n\n        init_thread.start()\n\n    elif cluster_type == \"dask\":\n        import dask_yarn\n\n        cluster = dask_yarn.YarnCluster.from_current()\n        namespace.update({\"cluster\": cluster})\n    elif cluster_type != \"none\":\n        raise RuntimeError(\"Unknown cluster_type: %r\" % cluster_type)\n\n\nclass WaitingForSparkSessionToBeInitialized:\n    \"\"\"Wrapper object for SparkContext and other Spark session variables while the real Spark session is being\n    initialized in a background thread. The class name is intentionally worded verbosely explicit as it will show up\n    when executing a cell that contains only a Spark session variable like ``sc`` or ``sqlContext``.\n    \"\"\"\n\n    # private and public attributes that show up for tab completion,\n    # to indicate pending initialization of Spark session\n    _WAITING_FOR_SPARK_SESSION_TO_BE_INITIALIZED = \"Spark Session not yet initialized ...\"\n    WAITING_FOR_SPARK_SESSION_TO_BE_INITIALIZED = \"Spark Session not yet initialized ...\"\n\n    # the same wrapper class is used for all Spark session variables, so we need to record the name of the variable\n    def __init__(self, global_variable_name, init_thread, namespace):\n        \"\"\"Initialize the waiter.\"\"\"\n        self._spark_session_variable = global_variable_name\n        self._init_thread = init_thread\n        self._namespace = namespace\n\n    # we intercept all method and attribute references on our temporary Spark session variable,\n    # wait for the thread to complete initializing the Spark sessions and then we forward the\n    # call to the real Spark objects\n    def __getattr__(self, name):\n        \"\"\"Handle attribute getter.\"\"\"\n        # ignore tab-completion request for __members__ or __methods__ and ignore meta property requests\n        if name.startswith(\"__\") or name.startswith(\"_ipython_\") or name.startswith(\"_repr_\"):\n            return\n        else:\n            # wait on thread to initialize the Spark session variables in global variable scope\n            self._init_thread.join(timeout=None)\n            exc = self._init_thread.exc\n            if exc:\n                msg = f\"Variable: {self._spark_session_variable} was not initialized properly.\"\n                raise RuntimeError(msg) from exc\n\n            # now return attribute/function reference from actual Spark object\n            return getattr(self._namespace[self._spark_session_variable], name)\n\n\ndef _validate_port_range(port_range):\n    # if no argument was provided, return a range of 0\n    if not port_range:\n        return 0, 0\n\n    try:\n        port_ranges = port_range.split(\"..\")\n        lower_port = int(port_ranges[0])\n        upper_port = int(port_ranges[1])\n\n        port_range_size = upper_port - lower_port\n        if port_range_size != 0 and port_range_size < min_port_range_size:\n            msg = (\n                f\"Port range validation failed for range: '{port_range}'.  Range size must be at least \"\n                f\"{min_port_range_size} as specified by env EG_MIN_PORT_RANGE_SIZE\"\n            )\n            raise RuntimeError(msg) from None\n    except ValueError as ve:\n        msg = f\"Port range validation failed for range: '{port_range}'.  Error was: {ve}\"\n        raise RuntimeError(msg) from None\n    except IndexError as ie:\n        msg = f\"Port range validation failed for range: '{port_range}'.  Error was: {ie}\"\n        raise RuntimeError(msg) from None\n\n    return lower_port, upper_port\n\n\ndef determine_connection_file(conn_file, kid):\n    \"\"\"If the directory exists, use the original file, else create a temporary file.\"\"\"\n    if conn_file is None or not os.path.exists(os.path.dirname(conn_file)):\n        if kid is not None:\n            basename = \"kernel-\" + kid\n        else:\n            basename = os.path.splitext(os.path.basename(conn_file))[0]\n        fd, conn_file = tempfile.mkstemp(suffix=\".json\", prefix=basename + \"_\")\n        os.close(fd)\n        logger.debug(f\"Using connection file '{conn_file}'.\")\n\n    return conn_file\n\n\ndef _encrypt(connection_info_str, public_key):\n    \"\"\"Encrypt the connection information using a generated AES key that is then encrypted using\n    the public key passed from the server.  Both are then returned in an encoded JSON payload.\n\n    This code also exists in the R kernel-launcher's server_listener.py script.\n    \"\"\"\n    aes_key = get_random_bytes(16)\n    cipher = AES.new(aes_key, mode=AES.MODE_ECB)\n\n    # Encrypt the connection info using the aes_key\n    encrypted_connection_info = cipher.encrypt(pad(connection_info_str, 16))\n    b64_connection_info = base64.b64encode(encrypted_connection_info)\n\n    # Encrypt the aes_key using the server's public key\n    imported_public_key = RSA.importKey(base64.b64decode(public_key.encode()))\n    cipher = PKCS1_v1_5.new(key=imported_public_key)\n    encrypted_key = base64.b64encode(cipher.encrypt(aes_key))\n\n    # Compose the payload and Base64 encode it\n    payload = {\n        \"version\": LAUNCHER_VERSION,\n        \"key\": encrypted_key.decode(),\n        \"conn_info\": b64_connection_info.decode(),\n    }\n    b64_payload = base64.b64encode(json.dumps(payload).encode(encoding=\"utf-8\"))\n    return b64_payload\n\n\ndef return_connection_info(\n    connection_file, response_addr, lower_port, upper_port, kernel_id, public_key\n):\n    \"\"\"Returns the connection information corresponding to this kernel.\n\n    This code also exists in the R kernel-launcher's server_listener.py script.\n    \"\"\"\n    response_parts = response_addr.split(\":\")\n    if len(response_parts) != 2:\n        logger.error(\n            f\"Invalid format for response address '{response_addr}'. Assuming 'pull' mode...\"\n        )\n        return\n\n    response_ip = response_parts[0]\n    try:\n        response_port = int(response_parts[1])\n    except ValueError:\n        logger.error(\n            f\"Invalid port component found in response address '{response_addr}'. Assuming 'pull' mode...\"\n        )\n        return\n\n    with open(connection_file) as fp:\n        cf_json = json.load(fp)\n        fp.close()\n\n    # add process and process group ids into connection info\n    pid = os.getpid()\n    cf_json[\"pid\"] = pid\n    cf_json[\"pgid\"] = os.getpgid(pid)\n\n    # prepare socket address for handling signals\n    comm_sock = prepare_comm_socket(lower_port, upper_port)\n    cf_json[\"comm_port\"] = comm_sock.getsockname()[1]\n    cf_json[\"kernel_id\"] = kernel_id\n\n    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n        s.connect((response_ip, response_port))\n        json_content = json.dumps(cf_json).encode(encoding=\"utf-8\")\n        logger.debug(f\"JSON Payload '{json_content}\")\n        payload = _encrypt(json_content, public_key)\n        logger.debug(f\"Encrypted Payload '{payload}\")\n        s.send(payload)\n\n    return comm_sock\n\n\ndef prepare_comm_socket(lower_port, upper_port):\n    \"\"\"Prepares the socket to which the server will send signal and shutdown requests.\n\n    This code also exists in the R kernel-launcher's server_listener.py script.\n    \"\"\"\n    sock = _select_socket(lower_port, upper_port)\n    logger.info(\n        f\"Signal socket bound to host: {sock.getsockname()[0]}, port: {sock.getsockname()[1]}\"\n    )\n    sock.listen(1)\n    sock.settimeout(5)\n    return sock\n\n\ndef _select_ports(count, lower_port, upper_port):\n    \"\"\"Select and return n random ports that are available and adhere to the given port range, if applicable.\n\n    This code also exists in the R kernel-launcher's server_listener.py script.\n    \"\"\"\n    ports = []\n    sockets = []\n    for _ in range(count):\n        sock = _select_socket(lower_port, upper_port)\n        ports.append(sock.getsockname()[1])\n        sockets.append(sock)\n    for sock in sockets:\n        sock.close()\n    return ports\n\n\ndef _select_socket(lower_port, upper_port):\n    \"\"\"Create and return a socket whose port is available and adheres to the given port range, if applicable.\n\n    This code also exists in the R kernel-launcher's server_listener.py script.\n    \"\"\"\n    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n    found_port = False\n    retries = 0\n    while not found_port:\n        try:\n            sock.bind((\"0.0.0.0\", _get_candidate_port(lower_port, upper_port)))  # noqa\n            found_port = True\n        except Exception:\n            retries = retries + 1\n            if retries > max_port_range_retries:\n                msg = (\n                    f\"Failed to locate port within range {lower_port}..{upper_port} \"\n                    f\"after {max_port_range_retries} retries!\"\n                )\n                raise RuntimeError(msg) from None\n    return sock\n\n\ndef _get_candidate_port(lower_port, upper_port):\n    \"\"\"Returns a port within the given range.  If the range is zero, the zero is returned.\n\n    This code also exists in the R kernel-launcher's server_listener.py script.\n    \"\"\"\n    range_size = upper_port - lower_port\n    if range_size == 0:\n        return 0\n    return random.randint(lower_port, upper_port)\n\n\ndef get_server_request(sock):\n    \"\"\"Gets a request from the server and returns the corresponding dictionary.\n\n    This code also exists in the R kernel-launcher's server_listener.py script.\n    \"\"\"\n    conn = None\n    data = \"\"\n    request_info = None\n    try:\n        conn, addr = sock.accept()\n        while True:\n            buffer = conn.recv(1024).decode(\"utf-8\")\n            if not buffer:  # send is complete\n                request_info = json.loads(data)\n                break\n            data = data + buffer  # append what we received until we get no more...\n    except Exception as e:\n        if type(e) is not socket.timeout:\n            raise e\n    finally:\n        if conn:\n            conn.close()\n\n    return request_info\n\n\ndef cancel_spark_jobs(sig, frame):\n    \"\"\"Cancel spark jobs.\"\"\"\n    if __spark_context is None:\n        return\n    try:\n        __spark_context.cancelAllJobs()\n    except Exception as e:\n        if e.__class__.__name__ == \"Py4JError\":\n            try:\n                __spark_context.cancelAllJobs()\n            except Exception as ex:\n                print(\n                    f\"Error occurred while re-attempting Spark job cancellation when interrupting the kernel: {ex}\"\n                )\n        else:\n            print(\n                f\"Error occurred while attempting Spark job cancellation when interrupting the kernel: {e}\"\n            )\n\n\ndef server_listener(sock, parent_pid, cluster_type):\n    \"\"\"Waits for requests from the server and processes each when received.  Currently,\n    these will be one of a sending a signal to the corresponding kernel process (signum) or\n    stopping the listener and exiting the kernel (shutdown).\n\n     This code also exists in the R kernel-launcher's server_listener.py script.\n    \"\"\"\n    shutdown = False\n    while not shutdown:\n        request = get_server_request(sock)\n        if request:\n            signum = -1  # prevent logging poll requests since that occurs every 3 seconds\n            if request.get(\"signum\") is not None:\n                signum = int(request.get(\"signum\"))\n                os.kill(parent_pid, signum)\n                if signum == 2 and cluster_type == \"spark\":\n                    os.kill(parent_pid, signal.SIGUSR2)\n            if request.get(\"shutdown\") is not None:\n                shutdown = bool(request.get(\"shutdown\"))\n            if signum != 0:\n                logger.info(f\"server_listener got request: {request}\")\n\n\ndef import_item(name):\n    \"\"\"Import and return ``bar`` given the string ``foo.bar``.\n    Calling ``bar = import_item(\"foo.bar\")`` is the functional equivalent of\n    executing the code ``from foo import bar``.\n    Parameters\n    ----------\n    name : string\n      The fully qualified name of the module/package being imported.\n    Returns\n    -------\n    mod : module object\n       The module that was imported.\n    \"\"\"\n\n    parts = name.rsplit(\".\", 1)\n    if len(parts) == 2:\n        # called with 'foo.bar....'\n        package, obj = parts\n        module = __import__(package, fromlist=[obj])\n        try:\n            pak = getattr(module, obj)\n        except AttributeError:\n            raise ImportError(\"No module named %s\" % obj) from None\n        return pak\n    else:\n        # called with un-dotted string\n        return __import__(parts[0])\n\n\ndef start_ipython(\n    namespace, cluster_type=\"spark\", kernel_class_name=DEFAULT_KERNEL_CLASS_NAME, **kwargs\n):\n    \"\"\"Start the ipython kernel.\"\"\"\n    from ipykernel.kernelapp import IPKernelApp\n\n    # Capture the kernel class before removing 'import_item' from the namespace\n    kernel_class = import_item(kernel_class_name)\n\n    # create an initial list of variables to clear\n    # we do this without deleting to preserve the locals so that\n    # initialize_namespace isn't affected by this mutation\n    to_delete = [k for k in namespace if not k.startswith(\"__\")]\n\n    # initialize the namespace with the proper variables\n    initialize_namespace(namespace, cluster_type=cluster_type)\n\n    # delete the extraneous variables\n    for k in to_delete:\n        del namespace[k]\n\n    # Start the kernel.\n    app = IPKernelApp.instance(kernel_class=kernel_class, user_ns=namespace, **kwargs)\n    app.initialize([])\n    app.start()\n\n    # cleanup\n    conn_file = kwargs[\"connection_file\"]\n    try:\n        import os  # re-import os since it's removed during namespace manipulation during startup\n\n        os.remove(conn_file)\n    except Exception as e:\n        print(f\"Could not delete connection file '{conn_file}' at exit due to error: {e}\")\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser()\n\n    parser.add_argument(\n        \"--response-address\",\n        dest=\"response_address\",\n        nargs=\"?\",\n        metavar=\"<ip>:<port>\",\n        help=\"Connection address (<ip>:<port>) for returning connection file\",\n    )\n    parser.add_argument(\n        \"--kernel-id\",\n        dest=\"kernel_id\",\n        nargs=\"?\",\n        help=\"Indicates the id associated with the launched kernel.\",\n    )\n    parser.add_argument(\n        \"--public-key\",\n        dest=\"public_key\",\n        nargs=\"?\",\n        help=\"Public key used to encrypt connection information\",\n    )\n    parser.add_argument(\n        \"--port-range\",\n        dest=\"port_range\",\n        nargs=\"?\",\n        metavar=\"<lowerPort>..<upperPort>\",\n        help=\"Port range to impose for kernel ports\",\n    )\n    parser.add_argument(\n        \"--spark-context-initialization-mode\",\n        dest=\"init_mode\",\n        nargs=\"?\",\n        help=\"the initialization mode of the spark context: lazy, eager or none\",\n    )\n    parser.add_argument(\n        \"--cluster-type\",\n        dest=\"cluster_type\",\n        nargs=\"?\",\n        help=\"the kind of cluster to initialize: spark, dask, or none\",\n    )\n    parser.add_argument(\n        \"--kernel-class-name\",\n        dest=\"kernel_class_name\",\n        nargs=\"?\",\n        default=DEFAULT_KERNEL_CLASS_NAME,\n        help=\"Indicates the name of the kernel class to use.  Must be a subclass of 'ipykernel.kernelbase.Kernel'.\",\n    )\n    # The following arguments are deprecated and will be used only if their mirroring arguments have no value.\n    # This means that the default values for --spark-context-initialization-mode (none) and --cluster-type (spark)\n    # will need to come from the mirrored args' default until deprecated items have been removed.\n    parser.add_argument(\n        \"connection_file\", nargs=\"?\", help=\"Connection file to write connection info (deprecated)\"\n    )\n    parser.add_argument(\n        \"--RemoteProcessProxy.response-address\",\n        dest=\"rpp_response_address\",\n        nargs=\"?\",\n        metavar=\"<ip>:<port>\",\n        help=\"Connection address (<ip>:<port>) for returning connection file (deprecated)\",\n    )\n    parser.add_argument(\n        \"--RemoteProcessProxy.kernel-id\",\n        dest=\"rpp_kernel_id\",\n        nargs=\"?\",\n        help=\"Indicates the id associated with the launched kernel. (deprecated)\",\n    )\n    parser.add_argument(\n        \"--RemoteProcessProxy.public-key\",\n        dest=\"rpp_public_key\",\n        nargs=\"?\",\n        help=\"Public key used to encrypt connection information (deprecated)\",\n    )\n    parser.add_argument(\n        \"--RemoteProcessProxy.port-range\",\n        dest=\"rpp_port_range\",\n        nargs=\"?\",\n        metavar=\"<lowerPort>..<upperPort>\",\n        help=\"Port range to impose for kernel ports (deprecated)\",\n    )\n    parser.add_argument(\n        \"--RemoteProcessProxy.spark-context-initialization-mode\",\n        dest=\"rpp_init_mode\",\n        nargs=\"?\",\n        default=\"none\",\n        help=\"the initialization mode of the spark context: lazy, eager or none (deprecated)\",\n    )\n    parser.add_argument(\n        \"--RemoteProcessProxy.cluster-type\",\n        dest=\"rpp_cluster_type\",\n        nargs=\"?\",\n        default=\"spark\",\n        help=\"the kind of cluster to initialize: spark, dask, or none (deprecated)\",\n    )\n\n    arguments = vars(parser.parse_args())\n    connection_file = arguments[\"connection_file\"]\n    response_addr = arguments[\"response_address\"] or arguments[\"rpp_response_address\"]\n    kernel_id = arguments[\"kernel_id\"] or arguments[\"rpp_kernel_id\"]\n    public_key = arguments[\"public_key\"] or arguments[\"rpp_public_key\"]\n    lower_port, upper_port = _validate_port_range(\n        arguments[\"port_range\"] or arguments[\"rpp_port_range\"]\n    )\n    spark_init_mode = arguments[\"init_mode\"] or arguments[\"rpp_init_mode\"]\n    cluster_type = arguments[\"cluster_type\"] or arguments[\"rpp_cluster_type\"]\n    kernel_class_name = arguments[\"kernel_class_name\"]\n    ip = \"0.0.0.0\"  # noqa\n\n    if connection_file is None and kernel_id is None:\n        msg = \"At least one of the parameters: 'connection_file' or '--kernel-id' must be provided!\"\n        raise RuntimeError(msg)\n\n    if kernel_id is None:\n        msg = \"Parameter '--kernel-id' must be provided!\"\n        raise RuntimeError(msg)\n\n    if public_key is None:\n        msg = \"Parameter '--public-key' must be provided!\"\n        raise RuntimeError(msg)\n\n    # Initialize the kernel namespace for the given cluster type\n    if cluster_type == \"spark\" and spark_init_mode == \"none\":\n        cluster_type = \"none\"\n\n    # If the connection file doesn't exist, then create it.\n    if (connection_file and not os.path.isfile(connection_file)) or kernel_id is not None:\n        key = str(uuid.uuid4()).encode()  # convert to bytes\n        connection_file = determine_connection_file(connection_file, kernel_id)\n\n        ports = _select_ports(5, lower_port, upper_port)\n\n        write_connection_file(\n            fname=connection_file,\n            ip=ip,\n            key=key,\n            shell_port=ports[0],\n            iopub_port=ports[1],\n            stdin_port=ports[2],\n            hb_port=ports[3],\n            control_port=ports[4],\n        )\n        if response_addr:\n            comm_socket = return_connection_info(\n                connection_file, response_addr, lower_port, upper_port, kernel_id, public_key\n            )\n            if comm_socket:  # socket in use, start server listener process\n                server_listener_process = Process(\n                    target=server_listener,\n                    args=(\n                        comm_socket,\n                        os.getpid(),\n                        cluster_type,\n                    ),\n                )\n                server_listener_process.start()\n\n    if cluster_type == \"spark\":\n        signal.signal(signal.SIGUSR2, cancel_spark_jobs)\n\n    # launch the IPython kernel instance\n    start_ipython(\n        locals(),\n        cluster_type=cluster_type,\n        connection_file=connection_file,\n        ip=ip,\n        kernel_class_name=kernel_class_name,\n    )\n"
  },
  {
    "path": "etc/kernel-launchers/scala/toree-launcher/build.sbt",
    "content": "/*\n * Copyright (c) Jupyter Development Team.\n * Distributed under the terms of the Modified BSD License.\n */\n\nname := \"toree-launcher\"\n\nversion := sys.props.getOrElse(\"version\", default = \"1.0\").replaceAll(\"dev[0-9]\", \"SNAPSHOT\")\n\nscalaVersion := \"2.12.12\"\n\nresolvers += \"Typesafe Repo\" at \"https://repo.typesafe.com/typesafe/releases/\"\n/* resolvers += \"Sonatype Repository\" at \"https://oss.sonatype.org/content/repositories/releases/\" */\nresolvers += \"Sonatype Maven Central Mirror\" at \"https://maven-central.storage-download.googleapis.com/maven2/\"\n\nlibraryDependencies += \"com.typesafe.play\" %% \"play-json\" % \"2.7.4\" // Apache v2\nlibraryDependencies += \"org.apache.toree\" % \"toree-assembly\" % \"0.5.0-incubating\"\n"
  },
  {
    "path": "etc/kernel-launchers/scala/toree-launcher/project/build.properties",
    "content": "#\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n#\nsbt.version = 1.3.12\n"
  },
  {
    "path": "etc/kernel-launchers/scala/toree-launcher/project/plugins.sbt",
    "content": "/*\n * Copyright (c) Jupyter Development Team.\n * Distributed under the terms of the Modified BSD License.\n */\n\nlogLevel := Level.Warn\n\n/*\n * Following plugins have a dependency on sbt v0.13\n */\naddSbtPlugin(\"com.eed3si9n\" % \"sbt-assembly\" % \"0.14.5\")\naddSbtPlugin(\"org.scalastyle\" %% \"scalastyle-sbt-plugin\" % \"1.0.0\")\n"
  },
  {
    "path": "etc/kernel-launchers/scala/toree-launcher/project/scalastyle-config.xml",
    "content": "<!--\n  Copyright (c) Jupyter Development Team.\n  Distributed under the terms of the Modified BSD License.\n-->\n\n<!--\n\nIf you wish to turn off checking for a section of code, you can put a comment in the source\nbefore and after the section, with the following syntax:\n\n  // scalastyle:off\n  ...  // stuff that breaks the styles\n  // scalastyle:on\n\nYou can also disable only one rule, by specifying its rule id, as specified in:\n  https://www.scalastyle.org/rules-0.7.0.html\n\n  // scalastyle:off no.finalize\n  override def finalize(): Unit = ...\n  // scalastyle:on no.finalize\n\nThis file is divided into 3 sections:\n (1) rules that we enforce.\n (2) rules that we would like to enforce, but haven't cleaned up the codebase to turn on yet\n     (or we need to make the scalastyle rule more configurable).\n (3) rules that we don't want to enforce.\n-->\n\n<scalastyle>\n <name>Scalastyle standard configuration</name>\n\n <!-- ================================================================================ -->\n <!--                               rules we enforce                                   -->\n <!-- ================================================================================ -->\n\n <check level=\"error\" class=\"org.scalastyle.file.FileTabChecker\" enabled=\"true\"></check>\n\n <check level=\"error\" class=\"org.scalastyle.file.HeaderMatchesChecker\" enabled=\"true\">\n  <parameters>\n   <parameter name=\"header\"><![CDATA[/*\n * Copyright (c) Jupyter Development Team.\n */]]></parameter>\n  </parameters>\n </check>\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.SpacesAfterPlusChecker\" enabled=\"true\"></check>\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.SpacesBeforePlusChecker\" enabled=\"true\"></check>\n\n <check level=\"error\" class=\"org.scalastyle.file.WhitespaceEndOfLineChecker\" enabled=\"true\"></check>\n\n <check level=\"error\" class=\"org.scalastyle.file.FileLineLengthChecker\" enabled=\"true\">\n  <parameters>\n   <parameter name=\"maxLineLength\"><![CDATA[100]]></parameter>\n   <parameter name=\"tabSize\"><![CDATA[2]]></parameter>\n   <parameter name=\"ignoreImports\">true</parameter>\n  </parameters>\n </check>\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.ClassNamesChecker\" enabled=\"true\">\n  <parameters><parameter name=\"regex\"><![CDATA[[A-Z][A-Za-z]*]]></parameter></parameters>\n </check>\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.ObjectNamesChecker\" enabled=\"true\">\n  <parameters><parameter name=\"regex\"><![CDATA[[A-Z][A-Za-z]*]]></parameter></parameters>\n </check>\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.PackageObjectNamesChecker\" enabled=\"true\">\n  <parameters><parameter name=\"regex\"><![CDATA[^[a-z][A-Za-z]*$]]></parameter></parameters>\n </check>\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.ParameterNumberChecker\" enabled=\"true\">\n  <parameters><parameter name=\"maxParameters\"><![CDATA[10]]></parameter></parameters>\n </check>\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.NoFinalizeChecker\" enabled=\"true\"></check>\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.CovariantEqualsChecker\" enabled=\"true\"></check>\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.StructuralTypeChecker\" enabled=\"true\"></check>\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.UppercaseLChecker\" enabled=\"true\"></check>\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.IfBraceChecker\" enabled=\"true\">\n  <parameters>\n   <parameter name=\"singleLineAllowed\"><![CDATA[true]]></parameter>\n   <parameter name=\"doubleLineAllowed\"><![CDATA[true]]></parameter>\n  </parameters>\n </check>\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.PublicMethodsHaveTypeChecker\" enabled=\"true\"></check>\n\n <check level=\"error\" class=\"org.scalastyle.file.NewLineAtEofChecker\" enabled=\"true\"></check>\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.NonASCIICharacterChecker\" enabled=\"true\"></check>\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.SpaceAfterCommentStartChecker\" enabled=\"true\"></check>\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.EnsureSingleSpaceBeforeTokenChecker\" enabled=\"true\">\n  <parameters>\n   <parameter name=\"tokens\">ARROW, EQUALS, ELSE, TRY, CATCH, FINALLY, LARROW, RARROW</parameter>\n  </parameters>\n </check>\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.EnsureSingleSpaceAfterTokenChecker\" enabled=\"true\">\n  <parameters>\n   <parameter name=\"tokens\">ARROW, EQUALS, COMMA, COLON, IF, ELSE, DO, WHILE, FOR, MATCH, TRY, CATCH, FINALLY, LARROW, RARROW</parameter>\n  </parameters>\n </check>\n\n <!-- ??? usually shouldn't be checked into the code base. -->\n <check level=\"error\" class=\"org.scalastyle.scalariform.NotImplementedErrorUsage\" enabled=\"true\"></check>\n\n <!-- All printlns need to be wrapped in '// scalastyle:off/on println' -->\n <check customId=\"println\" level=\"error\" class=\"org.scalastyle.scalariform.TokenChecker\" enabled=\"true\">\n  <parameters><parameter name=\"regex\">^println$</parameter></parameters>\n  <customMessage><![CDATA[Are you sure you want to println? If yes, wrap the code block with\n      // scalastyle:off println\n      println(...)\n      // scalastyle:on println]]></customMessage>\n </check>\n\n <check customId=\"classforname\" level=\"error\" class=\"org.scalastyle.file.RegexChecker\" enabled=\"true\">\n  <parameters><parameter name=\"regex\">Class\\.forName</parameter></parameters>\n  <customMessage><![CDATA[\n      Are you sure that you want to use Class.forName? In most cases, you should use Utils.classForName instead.\n      If you must use Class.forName, wrap the code block with\n      // scalastyle:off classforname\n      Class.forName(...)\n      // scalastyle:on classforname\n    ]]></customMessage>\n </check>\n\n <!-- ================================================================================ -->\n <!--       rules we'd like to enforce, but haven't cleaned up the codebase yet        -->\n <!-- ================================================================================ -->\n\n <!-- We cannot turn the following two on, because it'd fail a lot of string interpolation use cases. -->\n <!-- Ideally the following two rules should be configurable to rule out string interpolation. -->\n <check level=\"error\" class=\"org.scalastyle.scalariform.NoWhitespaceBeforeLeftBracketChecker\" enabled=\"false\"></check>\n <check level=\"error\" class=\"org.scalastyle.scalariform.NoWhitespaceAfterLeftBracketChecker\" enabled=\"false\"></check>\n\n <!-- This breaks symbolic method names so we don't turn it on. -->\n <!-- Maybe we should update it to allow basic symbolic names, and then we are good to go. -->\n <check level=\"error\" class=\"org.scalastyle.scalariform.MethodNamesChecker\" enabled=\"false\">\n  <parameters>\n   <parameter name=\"regex\"><![CDATA[^[a-z][A-Za-z0-9]*$]]></parameter>\n  </parameters>\n </check>\n\n <!-- Should turn this on, but we have a few places that need to be fixed first -->\n <check level=\"error\" class=\"org.scalastyle.scalariform.EqualsHashCodeChecker\" enabled=\"false\"></check>\n\n <!-- ================================================================================ -->\n <!--                               rules we don't want                                -->\n <!-- ================================================================================ -->\n\n <check level=\"error\" class=\"org.scalastyle.scalariform.IllegalImportsChecker\" enabled=\"false\">\n  <parameters><parameter name=\"illegalImports\"><![CDATA[sun._,java.awt._]]></parameter></parameters>\n </check>\n\n <!-- We want the opposite of this: NewLineAtEofChecker -->\n <check level=\"error\" class=\"org.scalastyle.file.NoNewLineAtEofChecker\" enabled=\"false\"></check>\n\n <!-- This one complains about all kinds of random things. Disable. -->\n <check level=\"error\" class=\"org.scalastyle.scalariform.SimplifyBooleanExpressionChecker\" enabled=\"false\"></check>\n\n <!-- We use return quite a bit for control flows and guards -->\n <check level=\"error\" class=\"org.scalastyle.scalariform.ReturnChecker\" enabled=\"false\"></check>\n\n <!-- We use null a lot in low level code and to interface with 3rd party code -->\n <check level=\"error\" class=\"org.scalastyle.scalariform.NullChecker\" enabled=\"false\"></check>\n\n <!-- Doesn't seem super big deal here ... -->\n <check level=\"error\" class=\"org.scalastyle.scalariform.NoCloneChecker\" enabled=\"false\"></check>\n\n <!-- Doesn't seem super big deal here ... -->\n <check level=\"error\" class=\"org.scalastyle.file.FileLengthChecker\" enabled=\"false\">\n  <parameters><parameter name=\"maxFileLength\">800></parameter></parameters>\n </check>\n\n <!-- Doesn't seem super big deal here ... -->\n <check level=\"error\" class=\"org.scalastyle.scalariform.NumberOfTypesChecker\" enabled=\"false\">\n  <parameters><parameter name=\"maxTypes\">30</parameter></parameters>\n </check>\n\n <!-- Doesn't seem super big deal here ... -->\n <check level=\"error\" class=\"org.scalastyle.scalariform.CyclomaticComplexityChecker\" enabled=\"false\">\n  <parameters><parameter name=\"maximum\">10</parameter></parameters>\n </check>\n\n <!-- Doesn't seem super big deal here ... -->\n <check level=\"error\" class=\"org.scalastyle.scalariform.MethodLengthChecker\" enabled=\"false\">\n  <parameters><parameter name=\"maxLength\">50</parameter></parameters>\n </check>\n\n <!-- Not exactly feasible to enforce this right now. -->\n <!-- It is also infrequent that somebody introduces a new class with a lot of methods. -->\n <check level=\"error\" class=\"org.scalastyle.scalariform.NumberOfMethodsInTypeChecker\" enabled=\"false\">\n  <parameters><parameter name=\"maxMethods\"><![CDATA[30]]></parameter></parameters>\n </check>\n\n <!-- Doesn't seem super big deal here, and we have a lot of magic numbers ... -->\n <check level=\"error\" class=\"org.scalastyle.scalariform.MagicNumberChecker\" enabled=\"false\">\n  <parameters><parameter name=\"ignore\">-1,0,1,2,3</parameter></parameters>\n </check>\n\n</scalastyle>\n"
  },
  {
    "path": "etc/kernel-launchers/scala/toree-launcher/src/main/scala/launcher/KernelProfile.scala",
    "content": "/**\n * Copyright (c) Jupyter Development Team.\n * Distributed under the terms of the Modified BSD License.\n */\n\npackage launcher\n\n\nimport java.util.UUID.randomUUID\nimport play.api.libs.json._\nimport scala.util.Random\nimport launcher.utils.SocketUtils\n\n\ncase class KernelProfile(hb_port : Int,\n                         control_port : Int,\n                         iopub_port : Int,\n                         stdin_port : Int,\n                         shell_port : Int,\n                         key : String,\n                         kernel_name : String,\n                         signature_scheme : String,\n                         transport : String,\n                         ip : String)\n\nobject KernelProfile {\n\n  def newKey() : String = randomUUID.toString\n\n  def createJsonProfile(portLowerBound: Int = -1,\n                        portUpperBound: Int = -1) : String = {\n\n    implicit val writes = Json.writes[KernelProfile]\n\n    val newKernelProfile = new KernelProfile(\n      hb_port = SocketUtils.findPort(portLowerBound, portUpperBound),\n      control_port = SocketUtils.findPort(portLowerBound, portUpperBound),\n      iopub_port = SocketUtils.findPort(portLowerBound, portUpperBound),\n      stdin_port = SocketUtils.findPort(portLowerBound, portUpperBound),\n      shell_port = SocketUtils.findPort(portLowerBound, portUpperBound),\n      key = newKey(),\n      kernel_name = \"Apache Toree Scala\", transport = \"tcp\", ip = \"0.0.0.0\",\n      signature_scheme = \"hmac-sha256\"\n    )\n\n    Json.prettyPrint(Json.toJson(newKernelProfile))\n  }\n}\n"
  },
  {
    "path": "etc/kernel-launchers/scala/toree-launcher/src/main/scala/launcher/ToreeLauncher.scala",
    "content": "/**\n * Copyright (c) Jupyter Development Team.\n * Distributed under the terms of the Modified BSD License.\n */\n\npackage launcher\n\nimport java.io.{BufferedWriter, File, FileWriter, PrintStream}\nimport java.nio.file.{Files, Paths}\nimport java.net.{InetAddress, ServerSocket, Socket}\n\nimport org.apache.toree.Main\nimport play.api.libs.json._\nimport java.lang.management.ManagementFactory\n\nimport scala.io.BufferedSource\nimport scala.collection.mutable.ArrayBuffer\n\nimport sun.misc.Signal\n\nimport launcher.utils.{SecurityUtils, SocketUtils}\n\nimport org.apache.toree.utils.LogLike\n\n\nobject ToreeLauncher extends LogLike {\n\n  val minPortRangeSize = sys.env.getOrElse(\"MIN_PORT_RANGE_SIZE\", sys.env.getOrElse(\"EG_MIN_PORT_RANGE_SIZE\", \"1000\")).toInt\n  val kernelTempDir : String = \"jupyter-kernel\"\n  var profilePath : String = _\n  var kernelId : String = _\n  var portLowerBound : Int = -1\n  var portUpperBound : Int = -1\n  var responseAddress : String = _\n  var publicKey : String = _\n  var alternateSigint : String = _\n  var initMode : String = \"lazy\"\n  var toreeArgs = ArrayBuffer[String]()\n\n  private def pathExists(filePath : String) : Boolean =\n    if (filePath == null) false\n    else Files.exists(Paths.get(filePath))\n\n  private def writeToFile(outputPath : String, content : String): Unit = {\n    val file = new File(outputPath)\n    if(!pathExists(file.getParentFile.toString)) {\n      file.getParentFile.mkdirs // mkdir if not exists\n    }\n    val bw = new BufferedWriter(new FileWriter(file))\n    try{\n      bw.write(content)\n    } finally {\n      bw.close()\n    }\n  }\n\n  private def initPortRange(portRange: String): Unit = {\n      val ports = portRange.split(\"\\\\.\\\\.\")\n\n      this.portLowerBound = ports(0).toInt\n      this.portUpperBound = ports(1).toInt\n\n      logger.info(\"Port Range: lower bound ( %s ) / upper bound ( %s )\"\n        .format(this.portLowerBound, this.portUpperBound))\n\n      if (this.portLowerBound != this.portUpperBound) {  // Range of zero disables port restrictions\n         if (this.portLowerBound < 0 || this.portUpperBound < 0 ||\n            (this.portUpperBound - this.portLowerBound < minPortRangeSize)) {\n            logger.error(\"Invalid port range, use --port-range <LowerBound>..<UpperBound>, \" +\n              \"range must be >= MIN_PORT_RANGE_SIZE ($minPortRangeSize)\")\n            sys.exit(-1)\n         }\n      }\n  }\n\n  private def initArguments(args: Array[String]): Unit = {\n\n    logger.info(\"Toree launcher arguments (initial):\")\n    args.foreach(logger.info(_))\n    logger.info(\"---------------------------\")\n\n    // Walk the arguments, collecting launcher options along the way and buildup a\n    // new toree arguments list.  There's got to be a better way to do this.\n    var i = 0\n    while ( i < args.length ) {\n      var arg: String = args(i)\n      arg match {\n\n        // Profile is a straight pass-thru to toree\n        case \"--profile\" =>\n          i += 1\n          profilePath = args(i).trim\n          toreeArgs += arg\n          toreeArgs += profilePath\n\n        // Alternate sigint is a straight pass-thru to toree\n        case \"--alternate-sigint\" =>\n          i += 1\n          alternateSigint = args(i).trim\n          toreeArgs += arg\n          toreeArgs += alternateSigint\n\n        // Initialization mode requires massaging for toree\n        case \"--spark-context-initialization-mode\" | \"--RemoteProcessProxy.spark-context-initialization-mode\" =>\n          i += 1\n          initMode = args(i).trim\n          initMode match {\n            case \"none\" =>\n              toreeArgs += \"--nosparkcontext\"\n            case _ =>\n              toreeArgs += \"--spark-context-initialization-mode\"\n              toreeArgs += initMode\n          }\n\n        // Port range doesn't apply to toree, consume here\n        case \"--port-range\" | \"--RemoteProcessProxy.port-range\" =>\n          i += 1\n          initPortRange(args(i).trim)\n\n        // Response address doesn't apply to toree, consume here\n        case \"--response-address\" | \"--RemoteProcessProxy.response-address\" =>\n          i += 1\n          responseAddress = args(i).trim\n\n        // kernel id doesn't apply to toree, consume here\n        case \"--kernel-id\" | \"--RemoteProcessProxy.kernel-id\" =>\n          i += 1\n          kernelId = args(i).trim\n\n        // Public key doesn't apply to toree, consume here\n        case \"--public-key\" | \"--RemoteProcessProxy.public-key\" =>\n          i += 1\n          publicKey = args(i).trim\n\n        // All other arguments should pass-thru to toree\n        case _ => toreeArgs += args(i).trim\n      }\n      i += 1\n    }\n  }\n\n  // Borrowed from toree to avoid dependency\n  private def deleteDirRecur(file: File): Unit = {\n    // delete directory recursively\n    if (file != null){\n      if (file.isDirectory){\n        file.listFiles.foreach(deleteDirRecur)\n      }\n      if (file.exists){\n        file.delete\n      }\n    }\n  }\n\n  private def determineConnectionFile(connectionFile: String, kernelId: String): String = {\n    // We know the connection file does not exist, so create a temporary directory\n    // and derive the filename from kernelId, if not null.\n    // If kernelId is null, then use the filename in the connectionFile.\n\n    val tmpPath = Files.createTempDirectory(kernelTempDir)\n    // tmpPath.toFile.deleteOnExit() doesn't appear to work, use system hook\n    sys.addShutdownHook{\n      deleteDirRecur(tmpPath.toFile)\n    }\n    val fileName = if (kernelId != null) \"kernel-\" + kernelId + \".json\"\n      else Paths.get(connectionFile).getFileName.toString\n    val newPath = Paths.get(tmpPath.toString, fileName)\n    val newConnectionFile = newPath.toString\n    // Locate --profile and replace next element with new name.  If it doesn't exist, add both.\n    val profileIndex = toreeArgs.indexOf(\"--profile\")\n    if (profileIndex >= 0) {\n      toreeArgs(profileIndex + 1) = newConnectionFile\n    } else {\n        toreeArgs += \"--profile\"\n        toreeArgs += newConnectionFile\n    }\n\n    newConnectionFile\n  }\n\n  private def getPID : String = {\n    // Return the current process ID. If not an integer string, server will ignore.\n    ManagementFactory.getRuntimeMXBean.getName.split('@')(0)\n  }\n\n  private def initProfile(args : Array[String]): ServerSocket = {\n\n    var commSocket : ServerSocket = null\n\n    initArguments(args)\n\n    if (profilePath == null && kernelId == null){\n      logger.error(\"At least one of '--profile' or '--kernel-id' \" +\n        \"must be provided - exiting!\")\n      sys.exit(-1)\n    }\n\n    if (kernelId == null) {\n      logger.error(\"Parameter '--kernel-id' must be provided - exiting!\")\n      sys.exit(-1)\n    }\n\n    if (publicKey == null) {\n      logger.error(\"Parameter '--public-key' must be provided - exiting!\")\n      sys.exit(-1)\n    }\n\n    if (!pathExists(profilePath)) {\n      profilePath = determineConnectionFile(profilePath, kernelId)\n\n      logger.info(\"The profile %s doesn't exist, now creating it...\".format(profilePath))\n\n      val content = KernelProfile.createJsonProfile(this.portLowerBound, this.portUpperBound)\n      writeToFile(profilePath, content)\n\n      if (pathExists(profilePath)) {\n        logger.info(\"%s saved\".format(profilePath))\n      } else {\n        logger.error(\"Failed to create: %s\".format(profilePath))\n        sys.exit(-1)\n      }\n\n      var connectionJson = Json.parse(content)\n\n      // Now need to also return the PID info in connection JSON\n      connectionJson = connectionJson.as[JsObject] ++ Json.obj(\"pid\" -> getPID)\n\n      // Add kernelId\n      connectionJson = connectionJson.as[JsObject] ++ Json.obj(\"kernel_id\" -> kernelId)\n\n      // Server wants to establish socket communication. Create socket and\n      // convey port number back to the server.\n      commSocket = SocketUtils.findSocket(this.portLowerBound, this.portUpperBound)\n      connectionJson = connectionJson.as[JsObject] ++ Json.obj(\"comm_port\" -> commSocket.getLocalPort)\n      val jsonContent = Json.toJson(connectionJson).toString()\n\n      if (responseAddress != null){\n        logger.info(\"JSON Payload: '%s'\".format(jsonContent))\n        val payload = SecurityUtils.encrypt(publicKey, jsonContent)\n        logger.info(\"Encrypted Payload: '%s'\".format(payload))\n        SocketUtils.writeToSocket(responseAddress, payload)\n      }\n    }\n    commSocket\n  }\n\n  private def getServerRequest(commSocket : ServerSocket): String = {\n    val s = commSocket.accept()\n    val data = new BufferedSource(s.getInputStream).getLines.mkString\n    s.close()\n    data\n  }\n\n  private def getReconciledSignalName(sigNum: Int): String = {\n    // To raise the signal, we must map the signal number back to the appropriate\n    // name as follows:  Take the common case and assume interrupt and check if an\n    // alternate interrupt signal has been given. If sigNum = 9, use \"TERM\", else\n    // if no alternate has been provided use \"INT\".  Note that use of SIGINT won't\n    // get received because the JVM won't propagate to background threads, buy it's\n    // the best we can do.  We'll still issue a warning in the log.\n\n    require(sigNum > 0, \"sigNum must be greater than zero\")\n\n    if (sigNum == 9) \"TERM\"\n    else {\n      if (alternateSigint == null) {\n        logger.warn(\"--alternate-sigint is not defined and signum %d has been \" +\n                 \"requested.  Using SIGINT, which probably won't get received due to JVM \" +\n                 \"preventing interrupts on background processes.  \" +\n                 \"Define --alternate-sigint using __TOREE_OPTS__.\"\n                   .format(sigNum))\n        \"INT\"\n      }\n      else alternateSigint\n    }\n  }\n\n  private def serverListener(commSocket : ServerSocket): Unit = {\n    var stop = false\n    while (!stop) {\n      val requestData = getServerRequest(commSocket)\n\n      // Handle each of the requests.  Note that we do not make an assumption that these are\n      // mutually exclusive - although that will probably be the case for now.  Over time,\n      // this should probably get refactored into a) better scala and b) token/classes for\n      // each request.\n\n      val requestJson = Json.parse(requestData).as[JsObject].value\n\n      // Signal the kernel...\n      if ( requestJson.contains(\"signum\")) {\n        val sigNum = requestJson(\"signum\").asInstanceOf[JsNumber].value.toInt\n        if ( sigNum > 0 ) {\n          // If sigNum anything but 0 (for poll), use Signal.raise(signal) to signal the kernel.\n          val sigName = getReconciledSignalName(sigNum)\n          val sigToRaise = new Signal(sigName)\n          logger.info(\"Server listener raising signal: '%s' (%d) for signum: %d\".\n                   format(sigToRaise.getName, sigToRaise.getNumber, sigNum))\n          Signal.raise(sigToRaise)\n        }\n      }\n      // Stop the listener...\n      if ( requestJson.contains(\"shutdown\")) {\n        val shutdown = requestJson(\"shutdown\").asInstanceOf[JsNumber].value.toInt\n        if ( shutdown == 1 ) {\n          // The server has been instructed to shutdown the kernel, so let's stop\n          // the listener so that it doesn't interfere with poll() calls.\n          logger.info(\"Stopping server listener.\")\n          stop = true\n        }\n      }\n    }\n  }\n\n  def main(args: Array[String]) {\n    val commSocket = initProfile(args)\n\n    // if commSocket is not null, start a thread to listen on socket\n    if ( commSocket != null ){\n      val serverListenerThread = new Thread {\n        override def run() {\n          serverListener(commSocket)\n        }\n      }\n      logger.info(\"Starting server listener...\")\n      serverListenerThread.start()\n    }\n\n    logger.info(\"Toree kernel arguments (final):\")\n    toreeArgs.foreach(logger.info(_))\n    logger.info(\"---------------------------\")\n    Main.main(toreeArgs.toArray)\n  }\n}\n"
  },
  {
    "path": "etc/kernel-launchers/scala/toree-launcher/src/main/scala/launcher/utils/SecurityUtils.scala",
    "content": "/**\n  * Copyright (c) Jupyter Development Team.\n  * Distributed under the terms of the Modified BSD License.\n  */\n\npackage launcher.utils\nimport scala.util.Random\nimport java.nio.charset.StandardCharsets\nimport java.security.Key\nimport java.security.KeyFactory\nimport java.security.PublicKey\nimport java.security.spec.X509EncodedKeySpec\nimport java.util.Base64\nimport javax.crypto.Cipher\nimport javax.crypto.spec.SecretKeySpec\nimport play.api.libs.json._\nimport org.apache.toree.utils.LogLike\n\n\ncase class Payload(key : String, conn_info : String, version : Int = 1)\n\nobject Payload {\n\n  def createJson(key: String, conn_info: String) : String = {\n    implicit val writes = Json.writes[Payload]\n    val newPayload = new Payload(key = key, conn_info = conn_info)\n    Json.prettyPrint(Json.toJson(newPayload))\n  }\n}\n\n\nobject SecurityUtils extends LogLike {\n\n  def encrypt(publicKey: String, jsonContent: String): String = {\n    // Generate an AES key and encrypt the connection information...\n    logger.info(\"publicKey: %s\".format(publicKey))\n    val random: Random = new Random()\n    val preKey: Array[Byte] = new Array[Byte](16)\n    random.nextBytes(preKey)\n    logger.info(\"aes_key: '%s'\".format(preKey))\n    val aesKey: Key = new SecretKeySpec(preKey, \"AES\")\n    val aesCipher: Cipher = Cipher.getInstance(\"AES\")\n    aesCipher.init(Cipher.ENCRYPT_MODE, aesKey)\n    val connInfo = Base64.getEncoder.encodeToString(aesCipher.doFinal(jsonContent.getBytes(StandardCharsets.UTF_8)))\n\n    // Encrypt the AES key using the public key...\n    val encodedPK: Array[Byte] = publicKey.getBytes(StandardCharsets.UTF_8)\n    val b64Key = Base64.getDecoder.decode(encodedPK)\n    val keySpec: X509EncodedKeySpec = new X509EncodedKeySpec(b64Key)\n    val keyFactory: KeyFactory = KeyFactory.getInstance(\"RSA\")\n    val rsaKey: PublicKey = keyFactory.generatePublic(keySpec)\n\n    val rsaCipher: Cipher = Cipher.getInstance(\"RSA\")\n    rsaCipher.init(Cipher.ENCRYPT_MODE, rsaKey)\n    val key = Base64.getEncoder.encodeToString(rsaCipher.doFinal(aesKey.getEncoded()))\n    Base64.getEncoder.encodeToString(Payload.createJson(key, connInfo).getBytes(StandardCharsets.UTF_8))\n  }\n}\n"
  },
  {
    "path": "etc/kernel-launchers/scala/toree-launcher/src/main/scala/launcher/utils/SocketUtils.scala",
    "content": "/**\n  * Copyright (c) Jupyter Development Team.\n  * Distributed under the terms of the Modified BSD License.\n  */\n\npackage launcher.utils\n\nimport java.io.PrintStream\nimport java.net.{InetAddress, ServerSocket, Socket}\n\nimport org.apache.toree.utils.LogLike\n\nimport scala.util.Random\n\n\nobject SocketUtils extends LogLike {\n\n  val random: Random = new Random (System.currentTimeMillis)\n\n  def writeToSocket(socketAddress : String, content : String): Unit = {\n    val ipPort = socketAddress.split(\":\")\n    if (ipPort.length == 2) {\n      logger.info(\"Sending connection info to gateway at %s\\n%s\".format(socketAddress, content)) // scalastyle:off\n      val ip = ipPort(0)\n      val port = ipPort(1).toInt\n      val s = new Socket(InetAddress.getByName(ip), port)\n      val out = new PrintStream(s.getOutputStream)\n      try {\n        out.append(content)\n        out.flush()\n      } finally {\n        s.close()\n      }\n    } else {\n      logger.error(\"Invalid format for response address '%s'!\".format(socketAddress)) // scalastyle:off\n    }\n  }\n\n  def findPort(portLowerBound: Int, portUpperBound: Int): Int = {\n\n    val socket = findSocket(portLowerBound, portUpperBound)\n    val port = socket.getLocalPort\n    logger.info(\"port %s is available\".format(port)) // scalastyle:off\n\n    // now Close the socket/port\n    socket.close()\n\n    logger.info(\"Port %s closed...\".format(port)) // scalastyle:off\n\n    port\n  }\n\n  def findSocket(portLowerBound: Int, portUpperBound: Int): ServerSocket = {\n\n    var foundAvailable: Boolean = false\n    var socket: ServerSocket = null\n\n    while (foundAvailable == false) {\n\n      val candidatePort = getCandidatePort(portLowerBound, portUpperBound)\n\n      // try candidatePort - only display 'Trying...' if in range\n      if ( candidatePort > 0 )\n        logger.info(\"Trying port %s ...\".format(candidatePort)) // scalastyle:off\n\n      try {\n        socket = new ServerSocket(candidatePort)\n        // return the socket to be used\n        foundAvailable = true\n      } catch {\n        case _ : Throwable => logger.info(\"port %s is in use\".format(candidatePort)) // scalastyle:off\n        socket = null\n      }\n    }\n\n    socket\n  }\n\n  private def getCandidatePort(portLowerBound: Int, portUpperBound: Int): Int = {\n\n    val portRange = portUpperBound - portLowerBound\n    if ( portRange <= 0 )\n        return 0\n\n    val port = portLowerBound + random.nextInt(portRange)\n\n    port\n  }\n}\n"
  },
  {
    "path": "etc/kernel-resources/ir/kernel.js",
    "content": "const cmd_key = /Mac/.test(navigator.platform) ? \"Cmd\" : \"Ctrl\";\n\nconst edit_actions = [\n  {\n    name: \"R Assign\",\n    shortcut: \"Alt--\",\n    icon: \"fa-long-arrow-left\",\n    help: \"R: Inserts the left-assign operator (<-)\",\n    handler(cm) {\n      cm.replaceSelection(\" <- \");\n    },\n  },\n  {\n    name: \"R Pipe\",\n    shortcut: `Shift-${cmd_key}-M`,\n    icon: \"fa-angle-right\",\n    help: \"R: Inserts the magrittr pipe operator (%>%)\",\n    handler(cm) {\n      cm.replaceSelection(\" %>% \");\n    },\n  },\n  {\n    name: \"R Help\",\n    shortcut: \"F1\",\n    icon: \"fa-book\",\n    help: \"R: Shows the manpage for the item under the cursor\",\n    handler(cm, cell) {\n      const { anchor, head } = cm.findWordAt(cm.getCursor());\n      const word = cm.getRange(anchor, head);\n\n      const callbacks = cell.get_callbacks();\n      const options = {\n        silent: false,\n        store_history: false,\n        stop_on_error: true,\n      };\n      cell.last_msg_id = cell.notebook.kernel.execute(\n        `help(\\`${word}\\`)`,\n        callbacks,\n        options,\n      );\n    },\n  },\n];\n\nconst prefix = \"irkernel\";\n\nfunction add_edit_shortcut(notebook, actions, keyboard_manager, edit_action) {\n  const { name, shortcut, icon, help, handler } = edit_action;\n\n  const action = {\n    icon,\n    help,\n    help_index: \"zz\",\n    handler: () => {\n      const cell = notebook.get_selected_cell();\n      handler(cell.code_mirror, cell);\n    },\n  };\n\n  const full_name = actions.register(action, name, prefix);\n\n  Jupyter.keyboard_manager.edit_shortcuts.add_shortcut(shortcut, full_name);\n}\n\nfunction render_math(pager, html) {\n  if (!html) return;\n  const $container = pager.pager_element.find(\"#pager-container\");\n  $container\n    .find('p[style=\"text-align: center;\"]')\n    .map((i, e) => (e.outerHTML = `\\\\[${e.querySelector(\"i\").innerHTML}\\\\]`));\n  $container.find(\"i\").map((i, e) => (e.outerHTML = `\\\\(${e.innerHTML}\\\\)`));\n  MathJax.Hub.Queue([\"Typeset\", MathJax.Hub, $container[0]]);\n}\n\ndefine([\"base/js/namespace\"], ({\n  notebook,\n  actions,\n  keyboard_manager,\n  pager,\n}) => ({\n  onload() {\n    edit_actions.forEach((a) =>\n      add_edit_shortcut(notebook, actions, keyboard_manager, a),\n    );\n\n    pager.events.on(\n      \"open_with_text.Pager\",\n      (event, { data: { \"text/html\": html } }) => render_math(pager, html),\n    );\n  },\n}));\n"
  },
  {
    "path": "etc/kernelspecs/R_docker/kernel.json",
    "content": "{\n  \"language\": \"R\",\n  \"display_name\": \"R on Docker\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.docker_swarm.DockerSwarmProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-r:VERSION\"\n      }\n    }\n  },\n  \"env\": {},\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/R_docker/scripts/launch_docker.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/R_kubernetes/kernel.json",
    "content": "{\n  \"language\": \"R\",\n  \"display_name\": \"R on Kubernetes\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-r:VERSION\"\n      }\n    }\n  },\n  \"env\": {},\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/R_kubernetes/scripts/launch_kubernetes.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/dask_python_yarn_remote/bin/run.sh",
    "content": "#!/usr/bin/env bash\n\nif [ \"${EG_IMPERSONATION_ENABLED}\" = \"True\" ]; then\n    IMPERSONATION_OPTS=\"--user ${KERNEL_USERNAME:-UNSPECIFIED}\"\n    USER_CLAUSE=\"as user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nelse\n    IMPERSONATION_OPTS=\"\"\n    USER_CLAUSE=\"on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nfi\n\necho\necho \"Starting IPython kernel for Dask ${USER_CLAUSE}\"\necho\n\nPROG_HOME=\"$(cd \"`dirname \"$0\"`\"/..; pwd)\"\n\nset -x\neval exec \\\n     \"${DASK_YARN_EXE}\" submit \\\n     \"${DASK_OPTS}\" \\\n     \"${IMPERSONATION_OPTS}\" \\\n     \"${PROG_HOME}/scripts/launch_ipykernel.py\" \\\n     \"${LAUNCH_OPTS}\" \\\n     \"$@\"\nset +x\n"
  },
  {
    "path": "etc/kernelspecs/dask_python_yarn_remote/kernel.json",
    "content": "{\n  \"language\": \"python\",\n  \"display_name\": \"Dask - Python (YARN Remote Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy\"\n    },\n    \"debugger\": true\n  },\n  \"env\": {\n    \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n    \"DASK_YARN_EXE\": \"/opt/conda/bin/dask-yarn\",\n    \"DASK_OPTS\": \"--name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --environment python:///opt/conda/bin/python --temporary-security-credentials --deploy-mode remote\",\n    \"LAUNCH_OPTS\": \"\"\n  },\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/dask_python_yarn_remote/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.cluster-type\",\n    \"dask\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/python_distributed/kernel.json",
    "content": "{\n  \"display_name\": \"Python 3 (distributed)\",\n  \"language\": \"python\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy\"\n    },\n    \"debugger\": true\n  },\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/python_distributed/scripts/launch_ipykernel.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.spark-context-initialization-mode\",\n    \"none\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/python_docker/kernel.json",
    "content": "{\n  \"language\": \"python\",\n  \"display_name\": \"Python on Docker\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.docker_swarm.DockerSwarmProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-py:VERSION\"\n      }\n    },\n    \"debugger\": true\n  },\n  \"env\": {},\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/python_docker/scripts/launch_docker.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/python_kubernetes/kernel.json",
    "content": "{\n  \"language\": \"python\",\n  \"display_name\": \"Python on Kubernetes\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-py:VERSION\"\n      }\n    },\n    \"debugger\": true\n  },\n  \"env\": {},\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/python_kubernetes/scripts/launch_kubernetes.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/python_tf_docker/kernel.json",
    "content": "{\n  \"language\": \"python\",\n  \"display_name\": \"Python on Docker with Tensorflow\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.docker_swarm.DockerSwarmProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-tf-py:VERSION\"\n      }\n    },\n    \"debugger\": true\n  },\n  \"env\": {},\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/python_tf_docker/scripts/launch_docker.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/python_tf_gpu_docker/kernel.json",
    "content": "{\n  \"language\": \"python\",\n  \"display_name\": \"Python on Docker with Tensorflow with GPUs\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.docker_swarm.DockerSwarmProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-tf-gpu-py:VERSION\"\n      }\n    },\n    \"debugger\": true\n  },\n  \"env\": {},\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/python_tf_gpu_docker/scripts/launch_docker.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/python_tf_gpu_kubernetes/kernel.json",
    "content": "{\n  \"language\": \"python\",\n  \"display_name\": \"Python on Kubernetes with Tensorflow with GPUs\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-tf-gpu-py:VERSION\"\n      }\n    },\n    \"debugger\": true\n  },\n  \"env\": {},\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/python_tf_gpu_kubernetes/scripts/launch_kubernetes.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/python_tf_kubernetes/kernel.json",
    "content": "{\n  \"language\": \"python\",\n  \"display_name\": \"Python on Kubernetes with Tensorflow\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-tf-py:VERSION\"\n      }\n    },\n    \"debugger\": true\n  },\n  \"env\": {},\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/python_tf_kubernetes/scripts/launch_kubernetes.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/scala_docker/kernel.json",
    "content": "{\n  \"language\": \"scala\",\n  \"display_name\": \"Scala on Docker\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.docker_swarm.DockerSwarmProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-scala:VERSION\"\n      }\n    }\n  },\n  \"env\": {},\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/scala_docker/scripts/launch_docker.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/scala_kubernetes/kernel.json",
    "content": "{\n  \"language\": \"scala\",\n  \"display_name\": \"Scala on Kubernetes\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-scala:VERSION\"\n      }\n    }\n  },\n  \"env\": {},\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/scala_kubernetes/scripts/launch_kubernetes.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/spark_R_conductor_cluster/bin/run.sh",
    "content": "#!/usr/bin/env bash\n\nif [ \"${EG_IMPERSONATION_ENABLED}\" = \"True\" ]; then\n        IMPERSONATION_OPTS=\"--proxy-user ${KERNEL_USERNAME:-UNSPECIFIED}\"\n        USER_CLAUSE=\"as user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nelse\n        IMPERSONATION_OPTS=\"\"\n        USER_CLAUSE=\"on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nfi\n\necho\necho \"Starting IRkernel for Spark Cluster mode ${USER_CLAUSE}\"\necho\n\nif [ -z \"${SPARK_HOME}\" ]; then\n  echo \"SPARK_HOME must be set to the location of a Spark distribution!\"\n  exit 1\nfi\n\nPROG_HOME=\"$(cd \"`dirname \"$0\"`\"/..; pwd)\"\n\n# Add server_listener.py to files for spark-opts\nADDITIONAL_OPTS=\"--files ${PROG_HOME}/scripts/server_listener.py\"\n\neval exec \\\n     \"${SPARK_HOME}/bin/spark-submit\" \\\n     \"${SPARK_OPTS}\" \\\n     \"${ADDITIONAL_OPTS}\" \\\n     \"${IMPERSONATION_OPTS}\" \\\n     \"${PROG_HOME}/scripts/launch_IRkernel.R\" \\\n     \"${LAUNCH_OPTS}\" \\\n     \"$@\"\n"
  },
  {
    "path": "etc/kernelspecs/spark_R_conductor_cluster/kernel.json",
    "content": "{\n  \"language\": \"R\",\n  \"display_name\": \"Spark R (Spark Cluster Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.conductor.ConductorClusterProcessProxy\"\n    }\n  },\n  \"env\": {\n    \"SPARK_OPTS\": \"--name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.maxAppAttempts=1 ${KERNEL_EXTRA_SPARK_OPTS}\",\n    \"LAUNCH_OPTS\": \"--customAppName ${KERNEL_ID}\"\n  },\n  \"argv\": [\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.spark-context-initialization-mode\",\n    \"eager\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/spark_R_kubernetes/bin/run.sh",
    "content": "#!/usr/bin/env bash\n\nif [ \"${EG_IMPERSONATION_ENABLED}\" = \"True\" ]; then\n#        IMPERSONATION_OPTS=\"--proxy-user ${KERNEL_USERNAME:-UNSPECIFIED}\"\n        USER_CLAUSE=\"as user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nelse\n#        IMPERSONATION_OPTS=\"\"\n        USER_CLAUSE=\"on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nfi\n\necho\necho \"Starting IRkernel for Spark in Kubernetes mode ${USER_CLAUSE}\"\necho\n\nif [ -z \"${SPARK_HOME}\" ]; then\n  echo \"SPARK_HOME must be set to the location of a Spark distribution!\"\n  exit 1\nfi\n\nif [ -z \"${KERNEL_ID}\" ]; then\n  echo \"KERNEL_ID must be set for discovery and lifecycle management!\"\n  exit 1\nfi\n\nKERNEL_LAUNCHERS_DIR=${KERNEL_LAUNCHERS_DIR:-/usr/local/bin/kernel-launchers}\nPROG_HOME=${KERNEL_LAUNCHERS_DIR}/R\n\nEG_POD_TEMPLATE_DIR=${EG_POD_TEMPLATE_DIR:-/tmp}\nSCRIPTS_HOME=\"$(cd \"`dirname \"$0\"`\"/../scripts; pwd)\"\npod_template_file=${EG_POD_TEMPLATE_DIR}/kpt_${KERNEL_ID}\nspark_opts_out=${EG_POD_TEMPLATE_DIR}/spark_opts_${KERNEL_ID}\npython ${SCRIPTS_HOME}/launch_kubernetes.py $@ --pod-template=${pod_template_file} --spark-opts-out=${spark_opts_out}\nadditional_spark_opts=`cat ${spark_opts_out}`\nSPARK_OPTS=\"${SPARK_OPTS} ${additional_spark_opts}\"\nrm -f ${spark_opts_out}\n\nset -x\neval exec \\\n     \"${SPARK_HOME}/bin/spark-submit\" \\\n     \"${SPARK_OPTS}\" \\\n     \"local://${PROG_HOME}/scripts/launch_IRkernel.R\" \\\n     \"${LAUNCH_OPTS}\" \\\n     \"$@\"\nset +x\n"
  },
  {
    "path": "etc/kernelspecs/spark_R_kubernetes/kernel.json",
    "content": "{\n  \"language\": \"R\",\n  \"display_name\": \"Spark - R (Kubernetes Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-spark-r:VERSION\",\n        \"executor_image_name\": \"elyra/kernel-spark-r:VERSION\"\n      }\n    }\n  },\n  \"env\": {\n    \"SPARK_HOME\": \"/opt/spark\",\n    \"SPARK_OPTS\": \"--master k8s://https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT} --deploy-mode cluster --name ${KERNEL_USERNAME}-${KERNEL_ID} --conf spark.kubernetes.namespace=${KERNEL_NAMESPACE} --conf spark.kubernetes.driver.label.app=enterprise-gateway --conf spark.kubernetes.driver.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.driver.label.component=kernel --conf spark.kubernetes.executor.label.app=enterprise-gateway --conf spark.kubernetes.executor.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.executor.label.component=worker --conf spark.kubernetes.driver.container.image=${KERNEL_IMAGE} --conf spark.kubernetes.executor.container.image=${KERNEL_EXECUTOR_IMAGE} --conf spark.kubernetes.authenticate.driver.serviceAccountName=${KERNEL_SERVICE_ACCOUNT_NAME} --conf spark.kubernetes.submission.waitAppCompletion=false --conf spark.kubernetes.driverEnv.HTTP2_DISABLE=true ${KERNEL_EXTRA_SPARK_OPTS}\",\n    \"HTTP2_DISABLE\": \"true\",\n    \"LAUNCH_OPTS\": \"\"\n  },\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/spark_R_kubernetes/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.spark-context-initialization-mode\",\n    \"lazy\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/spark_R_yarn_client/bin/run.sh",
    "content": "#!/usr/bin/env bash\n\nif [ \"${EG_IMPERSONATION_ENABLED}\" = \"True\" ]; then\n        IMPERSONATION_OPTS=\"sudo PATH=${PATH} -H -E -u ${KERNEL_USERNAME:-UNSPECIFIED}\"\n        USER_CLAUSE=\"as user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nelse\n        IMPERSONATION_OPTS=\"\"\n        USER_CLAUSE=\"on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nfi\n\necho\necho \"Starting IRkernel for Spark in Yarn Client mode ${USER_CLAUSE}\"\necho\n\nif [ -z \"${SPARK_HOME}\" ]; then\n  echo \"SPARK_HOME must be set to the location of a Spark distribution!\"\n  exit 1\nfi\n\nPROG_HOME=\"$(cd \"`dirname \"$0\"`\"/..; pwd)\"\n\nset -x\neval exec \"${IMPERSONATION_OPTS}\" \\\n     \"${SPARK_HOME}/bin/spark-submit\" \\\n     \"${SPARK_OPTS}\" \\\n     \"${PROG_HOME}/scripts/launch_IRkernel.R\" \\\n     \"${LAUNCH_OPTS}\" \\\n     \"$@\"\nset +x\n"
  },
  {
    "path": "etc/kernelspecs/spark_R_yarn_client/kernel.json",
    "content": "{\n  \"language\": \"R\",\n  \"display_name\": \"Spark - R (YARN Client Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy\"\n    }\n  },\n  \"env\": {\n    \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n    \"SPARK_OPTS\": \"--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.sparkr.r.command=/opt/conda/lib/R/bin/Rscript ${KERNEL_EXTRA_SPARK_OPTS}\",\n    \"LAUNCH_OPTS\": \"\"\n  },\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/spark_R_yarn_client/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.spark-context-initialization-mode\",\n    \"lazy\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/spark_R_yarn_cluster/bin/run.sh",
    "content": "#!/usr/bin/env bash\n\nif [ \"${EG_IMPERSONATION_ENABLED}\" = \"True\" ]; then\n        IMPERSONATION_OPTS=\"--proxy-user ${KERNEL_USERNAME:-UNSPECIFIED}\"\n        USER_CLAUSE=\"as user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nelse\n        IMPERSONATION_OPTS=\"\"\n        USER_CLAUSE=\"on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nfi\n\necho\necho \"Starting IRkernel for Spark in Yarn Cluster mode ${USER_CLAUSE}\"\necho\n\nif [ -z \"${SPARK_HOME}\" ]; then\n  echo \"SPARK_HOME must be set to the location of a Spark distribution!\"\n  exit 1\nfi\n\nPROG_HOME=\"$(cd \"`dirname \"$0\"`\"/..; pwd)\"\n\n# Add server_listener.py to files for spark-opts\nADDITIONAL_OPTS=\"--files ${PROG_HOME}/scripts/server_listener.py\"\n\nset -x\neval exec \\\n     \"${SPARK_HOME}/bin/spark-submit\" \\\n     \"${SPARK_OPTS}\" \\\n     \"${ADDITIONAL_OPTS}\" \\\n     \"${IMPERSONATION_OPTS}\" \\\n     \"${PROG_HOME}/scripts/launch_IRkernel.R\" \\\n     \"${LAUNCH_OPTS}\" \\\n     \"$@\"\nset +x\n"
  },
  {
    "path": "etc/kernelspecs/spark_R_yarn_cluster/kernel.json",
    "content": "{\n  \"language\": \"R\",\n  \"display_name\": \"Spark - R (YARN Cluster Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy\"\n    }\n  },\n  \"env\": {\n    \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n    \"SPARK_OPTS\": \"--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.am.waitTime=1d --conf spark.yarn.appMasterEnv.PATH=/opt/conda/bin:$PATH --conf spark.sparkr.r.command=/opt/conda/lib/R/bin/Rscript --conf spark.yarn.maxAppAttempts=1 ${KERNEL_EXTRA_SPARK_OPTS}\",\n    \"LAUNCH_OPTS\": \"\"\n  },\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/spark_R_yarn_cluster/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.spark-context-initialization-mode\",\n    \"eager\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/spark_python_conductor_cluster/bin/run.sh",
    "content": "#!/usr/bin/env bash\n\nif [ \"${EG_IMPERSONATION_ENABLED}\" = \"True\" ]; then\n        IMPERSONATION_OPTS=\"sudo PATH=${PATH} -H -E -u ${KERNEL_USERNAME:-UNSPECIFIED}\"\n        USER_CLAUSE=\"as user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nelse\n        IMPERSONATION_OPTS=\"\"\n        USER_CLAUSE=\"on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nfi\n\necho\necho \"Starting IPython kernel for Spark Cluster mode ${USER_CLAUSE}\"\necho\n\nif [ -z \"${SPARK_HOME}\" ]; then\n  echo \"SPARK_HOME must be set to the location of a Spark distribution!\"\n  exit 1\nfi\n\nif [ -z \"${KERNEL_IG_UUID}\" ]; then\n  PROG_HOME=\"$(cd \"`dirname \"$0\"`\"/..; pwd)\"\nelse\n  PROG_HOME=\"${SPARK_HOME}\"\nfi\n\neval exec \"${IMPERSONATION_OPTS}\" \\\n     \"${SPARK_HOME}/bin/spark-submit\" \\\n     \"${SPARK_OPTS}\" \\\n     \"${PROG_HOME}/scripts/launch_ipykernel.py\" \\\n     \"${LAUNCH_OPTS}\" \\\n     \"$@\"\n"
  },
  {
    "path": "etc/kernelspecs/spark_python_conductor_cluster/kernel.json",
    "content": "{\n  \"language\": \"python\",\n  \"display_name\": \"Spark Python (Spark Cluster Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.conductor.ConductorClusterProcessProxy\"\n    },\n    \"debugger\": true\n  },\n  \"env\": {\n    \"SPARK_OPTS\": \"--name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.maxAppAttempts=1 ${KERNEL_EXTRA_SPARK_OPTS}\",\n    \"LAUNCH_OPTS\": \"\"\n  },\n  \"argv\": [\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.spark-context-initialization-mode\",\n    \"eager\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/spark_python_kubernetes/bin/run.sh",
    "content": "#!/usr/bin/env bash\n\nif [ \"${EG_IMPERSONATION_ENABLED}\" = \"True\" ]; then\n#        IMPERSONATION_OPTS=\"--proxy-user ${KERNEL_USERNAME:-UNSPECIFIED}\"\n        USER_CLAUSE=\"as user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nelse\n#        IMPERSONATION_OPTS=\"\"\n        USER_CLAUSE=\"on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nfi\n\necho\necho \"Starting IPython kernel for Spark in Kubernetes mode ${USER_CLAUSE}\"\necho\n\nif [ -z \"${SPARK_HOME}\" ]; then\n  echo \"SPARK_HOME must be set to the location of a Spark distribution!\"\n  exit 1\nfi\n\nif [ -z \"${KERNEL_ID}\" ]; then\n  echo \"KERNEL_ID must be set for discovery and lifecycle management!\"\n  exit 1\nfi\n\nKERNEL_LAUNCHERS_DIR=${KERNEL_LAUNCHERS_DIR:-/usr/local/bin/kernel-launchers}\nPROG_HOME=${KERNEL_LAUNCHERS_DIR}/python\n\nEG_POD_TEMPLATE_DIR=${EG_POD_TEMPLATE_DIR:-/tmp}\nSCRIPTS_HOME=\"$(cd \"`dirname \"$0\"`\"/../scripts; pwd)\"\npod_template_file=${EG_POD_TEMPLATE_DIR}/kpt_${KERNEL_ID}\nspark_opts_out=${EG_POD_TEMPLATE_DIR}/spark_opts_${KERNEL_ID}\npython ${SCRIPTS_HOME}/launch_kubernetes.py $@ --pod-template=${pod_template_file} --spark-opts-out=${spark_opts_out}\nadditional_spark_opts=`cat ${spark_opts_out}`\nSPARK_OPTS=\"${SPARK_OPTS} ${additional_spark_opts}\"\nrm -f ${spark_opts_out}\n\nset -x\neval exec \\\n     \"${SPARK_HOME}/bin/spark-submit\" \\\n     \"${SPARK_OPTS}\" \\\n     \"local://${PROG_HOME}/scripts/launch_ipykernel.py\" \\\n     \"${LAUNCH_OPTS}\" \\\n     \"$@\"\nset +x\n"
  },
  {
    "path": "etc/kernelspecs/spark_python_kubernetes/kernel.json",
    "content": "{\n  \"language\": \"python\",\n  \"display_name\": \"Spark - Python (Kubernetes Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-spark-py:VERSION\",\n        \"executor_image_name\": \"elyra/kernel-spark-py:VERSION\"\n      }\n    },\n    \"debugger\": true\n  },\n  \"env\": {\n    \"SPARK_HOME\": \"/opt/spark\",\n    \"SPARK_OPTS\": \"--master k8s://https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT} --deploy-mode cluster --name ${KERNEL_USERNAME}-${KERNEL_ID} --conf spark.kubernetes.namespace=${KERNEL_NAMESPACE} --conf spark.kubernetes.driver.label.app=enterprise-gateway --conf spark.kubernetes.driver.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.driver.label.component=kernel --conf spark.kubernetes.executor.label.app=enterprise-gateway --conf spark.kubernetes.executor.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.executor.label.component=worker --conf spark.kubernetes.driver.container.image=${KERNEL_IMAGE} --conf spark.kubernetes.executor.container.image=${KERNEL_EXECUTOR_IMAGE} --conf spark.kubernetes.authenticate.driver.serviceAccountName=${KERNEL_SERVICE_ACCOUNT_NAME} --conf spark.kubernetes.submission.waitAppCompletion=false --conf spark.kubernetes.driverEnv.HTTP2_DISABLE=true ${KERNEL_EXTRA_SPARK_OPTS}\",\n    \"HTTP2_DISABLE\": \"true\",\n    \"LAUNCH_OPTS\": \"\"\n  },\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/spark_python_kubernetes/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.spark-context-initialization-mode\",\n    \"lazy\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/spark_python_operator/kernel.json",
    "content": "{\n  \"language\": \"python\",\n  \"display_name\": \"Spark Operator (Python)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.spark_operator.SparkOperatorProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-spark-py:VERSION\",\n        \"executor_image_name\": \"elyra/kernel-spark-py:VERSION\"\n      }\n    }\n  },\n  \"argv\": [\n    \"python\",\n    \"/usr/local/share/jupyter/kernels/spark_python_operator/scripts/launch_custom_resource.py\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/spark_python_yarn_client/bin/run.sh",
    "content": "#!/usr/bin/env bash\n\nif [ \"${EG_IMPERSONATION_ENABLED}\" = \"True\" ]; then\n        IMPERSONATION_OPTS=\"sudo PATH=${PATH} -H -E -u ${KERNEL_USERNAME:-UNSPECIFIED}\"\n        USER_CLAUSE=\"as user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nelse\n        IMPERSONATION_OPTS=\"\"\n        USER_CLAUSE=\"on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nfi\n\necho\necho \"Starting IPython kernel for Spark in Yarn Client mode ${USER_CLAUSE}\"\necho\n\nif [ -z \"${SPARK_HOME}\" ]; then\n  echo \"SPARK_HOME must be set to the location of a Spark distribution!\"\n  exit 1\nfi\n\nPROG_HOME=\"$(cd \"`dirname \"$0\"`\"/..; pwd)\"\n\nset -x\neval exec \"${IMPERSONATION_OPTS}\" \\\n     \"${SPARK_HOME}/bin/spark-submit\" \\\n     \"${SPARK_OPTS}\" \\\n     \"${PROG_HOME}/scripts/launch_ipykernel.py\" \\\n     \"${LAUNCH_OPTS}\" \\\n     \"$@\"\nset +x\n"
  },
  {
    "path": "etc/kernelspecs/spark_python_yarn_client/kernel.json",
    "content": "{\n  \"language\": \"python\",\n  \"display_name\": \"Spark - Python (YARN Client Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy\"\n    },\n    \"debugger\": true\n  },\n  \"env\": {\n    \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n    \"PYSPARK_PYTHON\": \"/opt/conda/bin/python\",\n    \"PYTHONPATH\": \"${HOME}/.local/lib/python3.8/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip\",\n    \"SPARK_OPTS\": \"--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} ${KERNEL_EXTRA_SPARK_OPTS}\",\n    \"LAUNCH_OPTS\": \"\"\n  },\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/spark_python_yarn_client/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.spark-context-initialization-mode\",\n    \"lazy\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/spark_python_yarn_cluster/bin/run.sh",
    "content": "#!/usr/bin/env bash\n\nif [ \"${EG_IMPERSONATION_ENABLED}\" = \"True\" ]; then\n        IMPERSONATION_OPTS=\"--proxy-user ${KERNEL_USERNAME:-UNSPECIFIED}\"\n        USER_CLAUSE=\"as user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nelse\n        IMPERSONATION_OPTS=\"\"\n        USER_CLAUSE=\"on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nfi\n\necho\necho \"Starting IPython kernel for Spark in Yarn Cluster mode ${USER_CLAUSE}\"\necho\n\nif [ -z \"${SPARK_HOME}\" ]; then\n  echo \"SPARK_HOME must be set to the location of a Spark distribution!\"\n  exit 1\nfi\n\nPROG_HOME=\"$(cd \"`dirname \"$0\"`\"/..; pwd)\"\n\nset -x\neval exec \\\n     \"${SPARK_HOME}/bin/spark-submit\" \\\n     \"${SPARK_OPTS}\" \\\n     \"${IMPERSONATION_OPTS}\" \\\n     \"${PROG_HOME}/scripts/launch_ipykernel.py\" \\\n     \"${LAUNCH_OPTS}\" \\\n     \"$@\"\nset +x\n"
  },
  {
    "path": "etc/kernelspecs/spark_python_yarn_cluster/kernel.json",
    "content": "{\n  \"language\": \"python\",\n  \"display_name\": \"Spark - Python (YARN Cluster Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy\"\n    },\n    \"debugger\": true\n  },\n  \"env\": {\n    \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n    \"PYSPARK_PYTHON\": \"/opt/conda/bin/python\",\n    \"PYTHONPATH\": \"${HOME}/.local/lib/python3.8/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip\",\n    \"SPARK_OPTS\": \"--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.appMasterEnv.PYTHONUSERBASE=/home/${KERNEL_USERNAME}/.local --conf spark.yarn.appMasterEnv.PYTHONPATH=${HOME}/.local/lib/python3.8/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip --conf spark.yarn.appMasterEnv.PATH=/opt/conda/bin:$PATH --conf spark.yarn.maxAppAttempts=1 ${KERNEL_EXTRA_SPARK_OPTS}\",\n    \"LAUNCH_OPTS\": \"\"\n  },\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/spark_python_yarn_cluster/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.spark-context-initialization-mode\",\n    \"lazy\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/spark_scala_conductor_cluster/bin/run.sh",
    "content": "#!/usr/bin/env bash\n\nif [ \"${EG_IMPERSONATION_ENABLED}\" = \"True\" ]; then\n        IMPERSONATION_OPTS=\"--proxy-user ${KERNEL_USERNAME:-UNSPECIFIED}\"\n        USER_CLAUSE=\"as user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nelse\n        IMPERSONATION_OPTS=\"\"\n        USER_CLAUSE=\"on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nfi\n\necho\necho \"Starting Scala kernel for Spark Cluster mode ${USER_CLAUSE}\"\necho\n\nif [ -z \"${SPARK_HOME}\" ]; then\n  echo \"SPARK_HOME must be set to the location of a Spark distribution!\"\n  exit 1\nfi\n\nPROG_HOME=\"$(cd \"`dirname \"$0\"`\"/..; pwd)\"\nKERNEL_ASSEMBLY=`(cd \"${PROG_HOME}/lib\"; ls -1 toree-assembly-*.jar;)`\nTOREE_ASSEMBLY=\"${PROG_HOME}/lib/${KERNEL_ASSEMBLY}\"\nif [ ! -f ${TOREE_ASSEMBLY} ]; then\n    echo \"Toree assembly '${PROG_HOME}/lib/toree-assembly-*.jar' is missing.  Exiting...\"\n    exit 1\nfi\n\n# The SPARK_OPTS values during installation are stored in __TOREE_SPARK_OPTS__. This allows values to be specified during\n# install, but also during runtime. The runtime options take precedence over the install options.\nif [ \"${SPARK_OPTS}\" = \"\" ]; then\n   SPARK_OPTS=${__TOREE_SPARK_OPTS__}\nfi\n\nif [ \"${TOREE_OPTS}\" = \"\" ]; then\n   TOREE_OPTS=${__TOREE_OPTS__}\nfi\n\n# Toree launcher jar path, plus required lib jars (toree-assembly)\nJARS=\"${TOREE_ASSEMBLY}\"\n# Toree launcher app path\nLAUNCHER_JAR=`(cd \"${PROG_HOME}/lib\"; ls -1 toree-launcher*.jar;)`\nLAUNCHER_APP=\"${PROG_HOME}/lib/${LAUNCHER_JAR}\"\nif [ ! -f ${LAUNCHER_APP} ]; then\n    echo \"Scala launcher jar '${PROG_HOME}/lib/toree-launcher*.jar' is missing.  Exiting...\"\n    exit 1\nfi\n\neval exec \\\n     \"${SPARK_HOME}/bin/spark-submit\" \\\n     \"${SPARK_OPTS}\" \\\n     \"${IMPERSONATION_OPTS}\" \\\n     --jars \"${JARS}\" \\\n     --class launcher.ToreeLauncher \\\n     \"${LAUNCHER_APP}\" \\\n     \"${TOREE_OPTS}\" \\\n     \"${LAUNCH_OPTS}\" \\\n     \"$@\"\n"
  },
  {
    "path": "etc/kernelspecs/spark_scala_conductor_cluster/kernel.json",
    "content": "{\n  \"language\": \"scala\",\n  \"display_name\": \"Spark Scala (Spark Cluster Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.conductor.ConductorClusterProcessProxy\"\n    }\n  },\n  \"env\": {\n    \"SPARK_OPTS\": \"--name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.maxAppAttempts=1 ${KERNEL_EXTRA_SPARK_OPTS}\",\n    \"__TOREE_OPTS__\": \"--alternate-sigint USR2 --spark-context-initialization-mode eager\",\n    \"LAUNCH_OPTS\": \"\",\n    \"DEFAULT_INTERPRETER\": \"Scala\"\n  },\n  \"argv\": [\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/spark_scala_kubernetes/bin/run.sh",
    "content": "#!/usr/bin/env bash\n\nif [ \"${EG_IMPERSONATION_ENABLED}\" = \"True\" ]; then\n#        IMPERSONATION_OPTS=\"--proxy-user ${KERNEL_USERNAME:-UNSPECIFIED}\"\n        USER_CLAUSE=\"as user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nelse\n#        IMPERSONATION_OPTS=\"\"\n        USER_CLAUSE=\"on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nfi\n\necho\necho \"Starting Toree kernel for Spark in Kubernetes mode ${USER_CLAUSE}\"\necho\n\nif [ -z \"${SPARK_HOME}\" ]; then\n  echo \"SPARK_HOME must be set to the location of a Spark distribution!\"\n  exit 1\nfi\n\nif [ -z \"${KERNEL_ID}\" ]; then\n  echo \"KERNEL_ID must be set for discovery and lifecycle management!\"\n  exit 1\nfi\n\nKERNEL_LAUNCHERS_DIR=${KERNEL_LAUNCHERS_DIR:-/usr/local/bin/kernel-launchers}\nPROG_HOME=${KERNEL_LAUNCHERS_DIR}/scala\nKERNEL_ASSEMBLY=`(cd \"${PROG_HOME}/lib\"; ls -1 toree-assembly-*.jar;)`\nTOREE_ASSEMBLY=\"${PROG_HOME}/lib/${KERNEL_ASSEMBLY}\"\nif [ ! -f ${TOREE_ASSEMBLY} ]; then\n    echo \"Toree assembly '${PROG_HOME}/lib/toree-assembly-*.jar' is missing.  Exiting...\"\n    exit 1\nfi\n\n# The SPARK_OPTS values during installation are stored in __TOREE_SPARK_OPTS__. This allows values to be specified during\n# install, but also during runtime. The runtime options take precedence over the install options.\nif [ \"${SPARK_OPTS}\" = \"\" ]; then\n   SPARK_OPTS=${__TOREE_SPARK_OPTS__}\nfi\n\nif [ \"${TOREE_OPTS}\" = \"\" ]; then\n   TOREE_OPTS=${__TOREE_OPTS__}\nfi\n\n# Toree launcher jar path, plus required lib jars (toree-assembly)\nJARS=\"local://${TOREE_ASSEMBLY}\"\n# Toree launcher app path\nLAUNCHER_JAR=`(cd \"${PROG_HOME}/lib\"; ls -1 toree-launcher*.jar;)`\nLAUNCHER_APP=\"${PROG_HOME}/lib/${LAUNCHER_JAR}\"\nif [ ! -f ${LAUNCHER_APP} ]; then\n    echo \"Scala launcher jar '${PROG_HOME}/lib/toree-launcher*.jar' is missing.  Exiting...\"\n    exit 1\nfi\n\nEG_POD_TEMPLATE_DIR=${EG_POD_TEMPLATE_DIR:-/tmp}\nSCRIPTS_HOME=\"$(cd \"`dirname \"$0\"`\"/../scripts; pwd)\"\npod_template_file=${EG_POD_TEMPLATE_DIR}/kpt_${KERNEL_ID}\nspark_opts_out=${EG_POD_TEMPLATE_DIR}/spark_opts_${KERNEL_ID}\npython ${SCRIPTS_HOME}/launch_kubernetes.py $@ --pod-template=${pod_template_file} --spark-opts-out=${spark_opts_out}\nadditional_spark_opts=`cat ${spark_opts_out}`\nSPARK_OPTS=\"${SPARK_OPTS} ${additional_spark_opts}\"\nrm -f ${spark_opts_out}\n\nset -x\neval exec \"${IMPERSONATION_OPTS}\" \\\n     \"${SPARK_HOME}/bin/spark-submit\" \\\n     \"${SPARK_OPTS}\" \\\n     --jars \"${JARS}\" \\\n     --class launcher.ToreeLauncher \\\n     \"local://${LAUNCHER_APP}\" \\\n     \"${TOREE_OPTS}\" \\\n     \"${LAUNCH_OPTS}\" \\\n     \"$@\"\nset +x\n"
  },
  {
    "path": "etc/kernelspecs/spark_scala_kubernetes/kernel.json",
    "content": "{\n  \"language\": \"scala\",\n  \"display_name\": \"Spark - Scala (Kubernetes Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy\",\n      \"config\": {\n        \"image_name\": \"elyra/kernel-scala:VERSION\",\n        \"executor_image_name\": \"elyra/kernel-scala:VERSION\"\n      }\n    }\n  },\n  \"env\": {\n    \"SPARK_HOME\": \"/opt/spark\",\n    \"__TOREE_SPARK_OPTS__\": \"--master k8s://https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT} --deploy-mode cluster --name ${KERNEL_USERNAME}-${KERNEL_ID} --conf spark.kubernetes.namespace=${KERNEL_NAMESPACE} --driver-memory 2G --conf spark.kubernetes.driver.label.app=enterprise-gateway --conf spark.kubernetes.driver.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.driver.label.component=kernel --conf spark.kubernetes.executor.label.app=enterprise-gateway --conf spark.kubernetes.executor.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.executor.label.component=worker --conf spark.kubernetes.driver.container.image=${KERNEL_IMAGE} --conf spark.kubernetes.executor.container.image=${KERNEL_EXECUTOR_IMAGE} --conf spark.kubernetes.authenticate.driver.serviceAccountName=${KERNEL_SERVICE_ACCOUNT_NAME} --conf spark.kubernetes.submission.waitAppCompletion=false --conf spark.kubernetes.driverEnv.HTTP2_DISABLE=true ${KERNEL_EXTRA_SPARK_OPTS}\",\n    \"__TOREE_OPTS__\": \"--alternate-sigint USR2\",\n    \"HTTP2_DISABLE\": \"true\",\n    \"LAUNCH_OPTS\": \"\",\n    \"DEFAULT_INTERPRETER\": \"Scala\"\n  },\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/spark_scala_kubernetes/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.spark-context-initialization-mode\",\n    \"eager\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/spark_scala_yarn_client/bin/run.sh",
    "content": "#!/usr/bin/env bash\n\nif [ \"${EG_IMPERSONATION_ENABLED}\" = \"True\" ]; then\n        IMPERSONATION_OPTS=\"sudo PATH=${PATH} -H -E -u ${KERNEL_USERNAME:-UNSPECIFIED}\"\n        USER_CLAUSE=\"as user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nelse\n        IMPERSONATION_OPTS=\"\"\n        USER_CLAUSE=\"on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nfi\n\necho\necho \"Starting Scala kernel for Spark in Yarn Client mode ${USER_CLAUSE}\"\necho\n\nif [ -z \"${SPARK_HOME}\" ]; then\n  echo \"SPARK_HOME must be set to the location of a Spark distribution!\"\n  exit 1\nfi\n\nPROG_HOME=\"$(cd \"`dirname \"$0\"`\"/..; pwd)\"\nKERNEL_ASSEMBLY=`(cd \"${PROG_HOME}/lib\"; ls -1 toree-assembly-*.jar;)`\nTOREE_ASSEMBLY=\"${PROG_HOME}/lib/${KERNEL_ASSEMBLY}\"\nif [ ! -f ${TOREE_ASSEMBLY} ]; then\n    echo \"Toree assembly '${PROG_HOME}/lib/toree-assembly-*.jar' is missing.  Exiting...\"\n    exit 1\nfi\n\n# The SPARK_OPTS values during installation are stored in __TOREE_SPARK_OPTS__. This allows values to be specified during\n# install, but also during runtime. The runtime options take precedence over the install options.\nif [ \"${SPARK_OPTS}\" = \"\" ]; then\n   SPARK_OPTS=${__TOREE_SPARK_OPTS__}\nfi\n\nif [ \"${TOREE_OPTS}\" = \"\" ]; then\n   TOREE_OPTS=${__TOREE_OPTS__}\nfi\n\n# Toree launcher jar path, plus required lib jars (toree-assembly)\nJARS=\"${TOREE_ASSEMBLY}\"\n# Toree launcher app path\nLAUNCHER_JAR=`(cd \"${PROG_HOME}/lib\"; ls -1 toree-launcher*.jar;)`\nLAUNCHER_APP=\"${PROG_HOME}/lib/${LAUNCHER_JAR}\"\nif [ ! -f ${LAUNCHER_APP} ]; then\n    echo \"Scala launcher jar '${PROG_HOME}/lib/toree-launcher*.jar' is missing.  Exiting...\"\n    exit 1\nfi\n\nset -x\neval exec \"${IMPERSONATION_OPTS}\" \\\n     \"${SPARK_HOME}/bin/spark-submit\" \\\n     \"${SPARK_OPTS}\" \\\n     --jars \"${JARS}\" \\\n     --class launcher.ToreeLauncher \\\n     \"${LAUNCHER_APP}\" \\\n     \"${TOREE_OPTS}\" \\\n     \"${LAUNCH_OPTS}\" \\\n     \"$@\"\nset +x\n"
  },
  {
    "path": "etc/kernelspecs/spark_scala_yarn_client/kernel.json",
    "content": "{\n  \"language\": \"scala\",\n  \"display_name\": \"Spark - Scala (YARN Client Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy\"\n    }\n  },\n  \"env\": {\n    \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n    \"__TOREE_SPARK_OPTS__\": \"--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} ${KERNEL_EXTRA_SPARK_OPTS}\",\n    \"__TOREE_OPTS__\": \"--alternate-sigint USR2\",\n    \"LAUNCH_OPTS\": \"\",\n    \"DEFAULT_INTERPRETER\": \"Scala\"\n  },\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/spark_scala_yarn_client/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.spark-context-initialization-mode\",\n    \"lazy\"\n  ]\n}\n"
  },
  {
    "path": "etc/kernelspecs/spark_scala_yarn_cluster/bin/run.sh",
    "content": "#!/usr/bin/env bash\n\nif [ \"${EG_IMPERSONATION_ENABLED}\" = \"True\" ]; then\n        IMPERSONATION_OPTS=\"--proxy-user ${KERNEL_USERNAME:-UNSPECIFIED}\"\n        USER_CLAUSE=\"as user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nelse\n        IMPERSONATION_OPTS=\"\"\n        USER_CLAUSE=\"on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}\"\nfi\n\necho\necho \"Starting Scala kernel for Spark in Yarn Cluster mode ${USER_CLAUSE}\"\necho\n\nif [ -z \"${SPARK_HOME}\" ]; then\n  echo \"SPARK_HOME must be set to the location of a Spark distribution!\"\n  exit 1\nfi\n\nPROG_HOME=\"$(cd \"`dirname \"$0\"`\"/..; pwd)\"\nKERNEL_ASSEMBLY=`(cd \"${PROG_HOME}/lib\"; ls -1 toree-assembly-*.jar;)`\nTOREE_ASSEMBLY=\"${PROG_HOME}/lib/${KERNEL_ASSEMBLY}\"\nif [ ! -f ${TOREE_ASSEMBLY} ]; then\n    echo \"Toree assembly '${PROG_HOME}/lib/toree-assembly-*.jar' is missing.  Exiting...\"\n    exit 1\nfi\n\n# The SPARK_OPTS values during installation are stored in __TOREE_SPARK_OPTS__. This allows values to be specified during\n# install, but also during runtime. The runtime options take precedence over the install options.\nif [ \"${SPARK_OPTS}\" = \"\" ]; then\n   SPARK_OPTS=${__TOREE_SPARK_OPTS__}\nfi\n\nif [ \"${TOREE_OPTS}\" = \"\" ]; then\n   TOREE_OPTS=${__TOREE_OPTS__}\nfi\n\n# Toree launcher jar path, plus required lib jars (toree-assembly)\nJARS=\"${TOREE_ASSEMBLY}\"\n# Toree launcher app path\nLAUNCHER_JAR=`(cd \"${PROG_HOME}/lib\"; ls -1 toree-launcher*.jar;)`\nLAUNCHER_APP=\"${PROG_HOME}/lib/${LAUNCHER_JAR}\"\nif [ ! -f ${LAUNCHER_APP} ]; then\n    echo \"Scala launcher jar '${PROG_HOME}/lib/toree-launcher*.jar' is missing.  Exiting...\"\n    exit 1\nfi\n\nset -x\neval exec \\\n     \"${SPARK_HOME}/bin/spark-submit\" \\\n     \"${SPARK_OPTS}\" \\\n     \"${IMPERSONATION_OPTS}\" \\\n     --jars \"${JARS}\" \\\n     --class launcher.ToreeLauncher \\\n     \"${LAUNCHER_APP}\" \\\n     \"${TOREE_OPTS}\" \\\n     \"${LAUNCH_OPTS}\" \\\n     \"$@\"\nset +x\n"
  },
  {
    "path": "etc/kernelspecs/spark_scala_yarn_cluster/kernel.json",
    "content": "{\n  \"language\": \"scala\",\n  \"display_name\": \"Spark - Scala (YARN Cluster Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy\"\n    }\n  },\n  \"env\": {\n    \"SPARK_HOME\": \"/usr/hdp/current/spark2-client\",\n    \"__TOREE_SPARK_OPTS__\": \"--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.am.waitTime=1d --conf spark.yarn.maxAppAttempts=1 ${KERNEL_EXTRA_SPARK_OPTS}\",\n    \"__TOREE_OPTS__\": \"--alternate-sigint USR2\",\n    \"LAUNCH_OPTS\": \"\",\n    \"DEFAULT_INTERPRETER\": \"Scala\"\n  },\n  \"argv\": [\n    \"/usr/local/share/jupyter/kernels/spark_scala_yarn_cluster/bin/run.sh\",\n    \"--RemoteProcessProxy.kernel-id\",\n    \"{kernel_id}\",\n    \"--RemoteProcessProxy.response-address\",\n    \"{response_address}\",\n    \"--RemoteProcessProxy.public-key\",\n    \"{public_key}\",\n    \"--RemoteProcessProxy.port-range\",\n    \"{port_range}\",\n    \"--RemoteProcessProxy.spark-context-initialization-mode\",\n    \"lazy\"\n  ]\n}\n"
  },
  {
    "path": "etc/kubernetes/helm/enterprise-gateway/Chart.yaml",
    "content": "apiVersion: v2\nname: enterprise-gateway\ndescription: A helm chart to deploy Jupyter Enterprise Gateway\n# This is the chart version. This version number should be incremented each time you make changes\n# to the chart and its templates, including the app version.\n# Versions are expected to follow Semantic Versioning (https://semver.org/)\nversion: 3.3.0-dev0\n\n# This is the version number of the application being deployed. This version number should be\n# incremented each time you make changes to the application. Versions are not expected to\n# follow Semantic Versioning. They should reflect the version the application is using.\nappVersion: 3.3.0.dev0\n\nicon: https://avatars1.githubusercontent.com/u/7388996?s=200&v=4\nhome: https://jupyter.org\n\n# A chart can be either an 'application' or a 'library' chart.\n#\n# Application charts are a collection of templates that can be packaged into versioned archives\n# to be deployed.\n#\n# Library charts provide useful utilities or functions for the chart developer. They're included as\n# a dependency of application charts to inject those utilities and functions into the rendering\n# pipeline. Library charts do not define any templates and therefore cannot be deployed.\ntype: application\n\nsources:\n  - https://github.com/jupyter-server/enterprise_gateway\nkubeVersion: '>=1.18.0-0'\n"
  },
  {
    "path": "etc/kubernetes/helm/enterprise-gateway/templates/daemonset.yaml",
    "content": "{{- if .Values.kip.enabled }}\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: kernel-image-puller\n  namespace: {{ .Values.namespace | default .Release.Namespace }}\n  labels:\n    gateway-selector: enterprise-gateway\n    app: enterprise-gateway\n    component: kernel-image-puller\n    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n    release: {{ .Release.Name }}\n    heritage: {{ .Release.Service }}\n  {{- range $key, $val := .Values.global.commonLabels }}\n    {{ $key }}: \"{{ $val }}\"\n  {{- end }}\nspec:\n  selector:\n    matchLabels:\n      name: kernel-image-puller\n  template:\n    metadata:\n      labels:\n        name: kernel-image-puller\n        app: enterprise-gateway\n        component: kernel-image-puller\n        chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n        release: {{ .Release.Name }}\n        heritage: {{ .Release.Service }}\n      {{- range $key, $val := .Values.global.commonLabels }}\n        {{ $key }}: \"{{ $val }}\"\n      {{- end }}\n    spec:\n      serviceAccountName: {{ .Values.kip.serviceAccountName }}\n      containers:\n      - name: kernel-image-puller\n        image: {{ .Values.kip.image }}\n        imagePullPolicy: {{ .Values.kip.imagePullPolicy }}\n        env:\n        - name: KIP_LOG_LEVEL\n          value: {{ .Values.logLevel }}\n        - name: KIP_GATEWAY_HOST\n          value: \"{{ .Values.kip.server.protocol }}://{{ .Values.kip.serviceName }}.{{ .Release.Namespace }}:{{ .Values.kip.server.port }}\"\n        - name: KIP_VALIDATE_CERT\n          value: {{ .Values.kip.server.validate_cert | quote }}\n        - name: KIP_INTERVAL\n          value: !!str {{ .Values.kip.interval }}\n        - name: KIP_PULL_POLICY\n          value: {{ .Values.kip.pullPolicy }}\n        - name: KIP_CRI_ENDPOINT\n          value: \"unix://{{ .Values.kip.criSocket }}\"\n        {{- if .Values.kip.defaultContainerRegistry }}\n        - name: KIP_DEFAULT_CONTAINER_REGISTRY\n          value: {{ .Values.kip.defaultContainerRegistry }}\n        {{- end }}\n        # Optional authorization token passed in all requests (should match EG_AUTH_TOKEN)\n        {{- if .Values.authToken }}\n        - name: KIP_AUTH_TOKEN\n          value: {{ .Values.authToken }}\n        {{- end }}\n        # fetcher to fetch image names, defaults to KernelSpecsFetcher\n        {{- if .Values.kip.fetcher }}\n        - name: KIP_IMAGE_FETCHER\n          value: \"{{ .Values.kip.fetcher }}\"\n        {{- end }}\n        # if CombinedImagesFetcher is used KIP_INTERNAL_FETCHERS defines the fetchers that get used internally\n        {{- if .Values.kip.internalFetcher }}\n        - name: KIP_INTERNAL_FETCHERS\n          value: \"{{ .Values.kip.internalFetcher }}\"\n        {{- end }}\n        # if StaticListFetcher is used KIP_IMAGES defines the list of images pullers will fetch\n        {{- if .Values.kip.images}}\n        - name: KIP_IMAGES\n          value: \"{{ .Values.kip.images }}\"\n        {{- end }}\n        {{- if .Values.kip.resources }}\n        resources:\n          {{- toYaml .Values.kip.resources | nindent 10 }}\n        {{- end }}\n\n        volumeMounts:\n        - name: cri-socket\n          mountPath: !!str {{ .Values.kip.criSocket }}  # see env KIP_CRI_ENDPOINT\n          readOnly: true\n      volumes:\n      - name: cri-socket\n        hostPath:\n          path: {{ .Values.kip.criSocket }}\n\n  {{- if .Values.kip.tolerations }}\n      tolerations:\n        {{- toYaml .Values.kip.tolerations | nindent 8 }}\n  {{- end }}\n  {{- if .Values.kip.nodeSelector }}\n      nodeSelector:\n        {{- toYaml .Values.kip.nodeSelector | nindent 8 }}\n  {{- end }}\n  {{- if .Values.kip.affinity }}\n      affinity:\n        {{- toYaml .Values.kip.affinity | nindent 8 }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "etc/kubernetes/helm/enterprise-gateway/templates/deployment.yaml",
    "content": "{{- if .Values.deployment.enabled }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: enterprise-gateway\n  namespace: {{ .Values.namespace | default .Release.Namespace }}\n  labels:\n    gateway-selector: enterprise-gateway\n    app: enterprise-gateway\n    component: enterprise-gateway\n    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n    release: {{ .Release.Name }}\n    heritage: {{ .Release.Service }}\n  {{- range $key, $val := .Values.global.commonLabels }}\n    {{ $key }}: \"{{ $val }}\"\n  {{- end }}\nspec:\n  replicas: {{ .Values.deployment.replicas }}\n  selector:\n    matchLabels:\n      gateway-selector: enterprise-gateway\n  template:\n    metadata:\n      labels:\n        gateway-selector: enterprise-gateway\n        app: enterprise-gateway\n        component: enterprise-gateway\n      {{- range $key, $val := .Values.global.commonLabels }}\n        {{ $key }}: \"{{ $val }}\"\n      {{- end }}\n    spec:\n      # Created by this chart.\n      serviceAccountName: {{ .Values.deployment.serviceAccountName }}\n      terminationGracePeriodSeconds: {{ .Values.deployment.terminationGracePeriodSeconds }}\n  {{- if .Values.kernelspecs.image }}\n      initContainers:\n      - name: kernelspecs\n        image: {{ .Values.kernelspecs.image }}\n        imagePullPolicy: {{ .Values.kernelspecs.imagePullPolicy }}\n        args: [\"cp\", \"-r\", \"/kernels\", \"/usr/local/share/jupyter\"]\n        volumeMounts:\n        - name: image-kernelspecs\n          mountPath: \"/usr/local/share/jupyter/kernels\"\n  {{- end }}\n      containers:\n      - name: enterprise-gateway\n        image: {{ .Values.image }}\n        imagePullPolicy: {{ .Values.imagePullPolicy }}\n        env:\n        - name: EG_PORT\n          {{ with index .Values.service.ports 0 }}\n          value: !!str {{ .port }}\n          {{- end }}\n        - name: EG_RESPONSE_PORT\n          {{ with index .Values.service.ports 1 }}\n          value: !!str {{ .port }}\n          {{- end }}\n        - name: EG_NAMESPACE\n          value: {{ .Release.Namespace }}\n        - name: EG_KERNEL_CLUSTER_ROLE\n          value: {{ .Values.kernel.clusterRole }}\n        - name: EG_SHARED_NAMESPACE\n          value: {{ if .Values.kernel.shareGatewayNamespace }}\"True\"{{ else }}\"False\"{{ end }}\n        - name: EG_MIRROR_WORKING_DIRS\n          value: {{ if .Values.mirrorWorkingDirs }}\"True\"{{ else }}\"False\"{{ end }}\n        - name: EG_CULL_IDLE_TIMEOUT\n          value: !!str {{ .Values.kernel.cullIdleTimeout }}\n        - name: EG_CULL_CONNECTED\n          value: {{ if .Values.kernel.cullConnected }}\"True\"{{ else }}\"False\"{{ end }}\n        - name: EG_LOG_LEVEL\n          value: {{ .Values.logLevel }}\n        - name: EG_KERNEL_LAUNCH_TIMEOUT\n          value: !!str {{ .Values.kernel.launchTimeout }}\n        - name: EG_KERNEL_INFO_TIMEOUT\n          value: !!str {{ .Values.kernel.infoTimeout }}\n        - name: EG_ALLOWED_KERNELS\n          value: {{ toJson .Values.kernel.allowedKernels | squote }}\n        - name: EG_DEFAULT_KERNEL_NAME\n          value: {{ .Values.kernel.defaultKernelName }}\n        - name: EG_DEFAULT_KERNEL_SERVICE_ACCOUNT_NAME\n          value: {{ .Values.kernel.defaultServiceAccountName }}\n        # Optional authorization token passed in all requests\n        {{- if .Values.authToken }}\n        - name: EG_AUTH_TOKEN\n          value: {{ .Values.authToken }}\n        {{- end }}\n        {{- if .Values.deployment.extraEnv }}\n        {{- range $key, $val := .Values.deployment.extraEnv }}\n        - name: {{ $key }}\n          value: |-\n{{ $val | indent 12 }}\n        {{- end }}\n        {{- end }}\n        ports:\n        {{ with index .Values.service.ports 0 }}\n        - containerPort: {{ .port }}\n        {{- end }}\n        {{ with index .Values.service.ports 1 }}\n        - containerPort: {{ .port }}\n        {{- end }}\n        {{- if .Values.deployment.resources }}\n        resources:\n          {{- toYaml .Values.deployment.resources | nindent 10 }}\n        {{- end }}\n\n  {{- if .Values.nfs.enabled }}\n        volumeMounts:\n        - name: nfs-kernelspecs\n          mountPath: \"/usr/local/share/jupyter/kernels\"\n      volumes:\n      - name: nfs-kernelspecs\n        nfs:\n          server: {{ .Values.nfs.internalServerIPAddress }}\n          path: \"/usr/local/share/jupyter/kernels\"\n  {{- else if .Values.kernelspecsPvc.enabled }}\n        volumeMounts:\n        - name: pvc-kernelspecs\n          mountPath: \"/usr/local/share/jupyter/kernels\"\n      volumes:\n      - name: pvc-kernelspecs\n        persistentVolumeClaim:\n          claimName: {{ .Values.kernelspecsPvc.name }}\n  {{- else if .Values.kernelspecs.image }}\n        volumeMounts:\n        - name: image-kernelspecs\n          mountPath: \"/usr/local/share/jupyter/kernels\"\n      volumes:\n      - name: image-kernelspecs\n        emptyDir:\n          medium: Memory\n  {{- end }}\n\n  {{- if .Values.deployment.tolerations }}\n      tolerations:\n      {{- toYaml .Values.deployment.tolerations | nindent 8 }}\n  {{- end }}\n  {{- if .Values.deployment.nodeSelector }}\n      nodeSelector:\n      {{- toYaml .Values.deployment.nodeSelector | nindent 8 }}\n  {{- end }}\n  {{- if .Values.deployment.affinity }}\n      affinity:\n      {{- toYaml .Values.deployment.affinity | nindent 8 }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "etc/kubernetes/helm/enterprise-gateway/templates/eg-clusterrole.yaml",
    "content": "{{- if and  (.Values.deployment.serviceAccountName) (.Values.global.rbac) }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: enterprise-gateway-controller\n  labels:\n    app: enterprise-gateway\n    component: enterprise-gateway\n    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n    release: {{ .Release.Name }}\n    heritage: {{ .Release.Service }}\n  {{- range $key, $val := .Values.global.commonLabels }}\n    {{ $key }}: \"{{ $val }}\"\n  {{- end }}\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"pods\", \"namespaces\", \"services\", \"configmaps\", \"secrets\", \"persistentvolumes\", \"persistentvolumeclaims\"]\n    verbs: [\"get\", \"watch\", \"list\", \"create\", \"delete\"]\n  - apiGroups: [\"rbac.authorization.k8s.io\"]\n    resources: [\"rolebindings\"]\n    verbs: [\"get\", \"list\", \"create\", \"delete\"]\n  - apiGroups: [\"sparkoperator.k8s.io\"]\n    resources: [\"sparkapplications\", \"sparkapplications/status\", \"scheduledsparkapplications\", \"scheduledsparkapplications/status\"]\n    verbs: [\"get\", \"watch\", \"list\", \"create\", \"delete\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  # Referenced by EG_KERNEL_CLUSTER_ROLE in the Deployment\n  name: kernel-controller\n  labels:\n    app: enterprise-gateway\n    component: kernel\n    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n    release: {{ .Release.Name }}\n    heritage: {{ .Release.Service }}\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"pods\"]\n    verbs: [\"get\", \"watch\", \"list\", \"create\", \"delete\"]\n  - apiGroups: [\"\"]\n    resources: [\"configmaps\"]\n    verbs: [\"list\", \"create\"]\n  - apiGroups: [\"\"]\n    resources: [\"services\", \"persistentvolumeclaims\"]\n    verbs: [\"list\"]\n{{- end }}\n"
  },
  {
    "path": "etc/kubernetes/helm/enterprise-gateway/templates/eg-clusterrolebinding.yaml",
    "content": "{{- if and  (.Values.deployment.serviceAccountName) (.Values.global.rbac) }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: enterprise-gateway-controller\n  labels:\n    app: enterprise-gateway\n    component: enterprise-gateway\n    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n    release: {{ .Release.Name }}\n    heritage: {{ .Release.Service }}\n  {{- range $key, $val := .Values.global.commonLabels }}\n    {{ $key }}: \"{{ $val }}\"\n  {{- end }}\nsubjects:\n  - kind: ServiceAccount\n    name: {{ .Values.deployment.serviceAccountName }}\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: enterprise-gateway-controller\n  apiGroup: rbac.authorization.k8s.io\n{{- end }}\n"
  },
  {
    "path": "etc/kubernetes/helm/enterprise-gateway/templates/eg-serviceaccount.yaml",
    "content": "{{- if and  (.Values.deployment.serviceAccountName) (.Values.global.rbac) }}\n---\napiVersion: v1\nkind: ServiceAccount\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n  {{- $parent := . -}}\n  {{- range .Values.global.imagePullSecrets }}\n  - name: {{ . }}\n  {{- end }}\n{{- end }}\nmetadata:\n  name: {{ .Values.deployment.serviceAccountName }}\n  namespace: {{ .Values.namespace | default .Release.Namespace }}\n  labels:\n    app: enterprise-gateway\n    component: enterprise-gateway\n    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n    release: {{ .Release.Name }}\n    heritage: {{ .Release.Service }}\n  {{- range $key, $val := .Values.global.commonLabels }}\n    {{ $key }}: \"{{ $val }}\"\n  {{- end }}\n{{- if .Values.deployment.annotations }}\n  annotations:\n  {{- range $key, $val := .Values.deployment.annotations }}\n    {{ $key }}: \"{{ $val }}\"\n  {{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "etc/kubernetes/helm/enterprise-gateway/templates/imagepullSecret.yaml",
    "content": "{{- if and (.Values.imagePullSecretsCreate.enabled) (.Values.imagePullSecretsCreate.secrets) -}}\n---\n{{- $root := .Values }}\n{{- range .Values.imagePullSecretsCreate.secrets }}\napiVersion: v1\ndata:\n  .dockerconfigjson: {{ .data }}\nkind: Secret\nmetadata:\n  name: {{ .name }}\n{{- if ($root.imagePullSecretsCreate.annotations) -}}\n  {{- with $root.imagePullSecretsCreate.annotations }}\n  annotations:\n    {{- toYaml . | nindent 4 }}\n  {{- end }}\n{{- end }}\n\ntype: kubernetes.io/dockerconfigjson\n{{- end }}\n---\n{{- end }}\n"
  },
  {
    "path": "etc/kubernetes/helm/enterprise-gateway/templates/ingress.yaml",
    "content": "{{ if .Values.ingress.enabled }}\n{{- $parent := . -}}\n{{- if semverCompare \">=1.19-0\" .Capabilities.KubeVersion.GitVersion -}}\napiVersion: networking.k8s.io/v1\n{{- else if semverCompare \">=1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\napiVersion: networking.k8s.io/v1beta1\n{{- else -}}\napiVersion: extensions/v1beta1\n{{- end }}\nkind: Ingress\nmetadata:\n  namespace: {{ .Values.namespace | default .Release.Namespace }}\n  name: enterprise-gateway-ingress\n{{- if .Values.ingress.annotations }}\n  annotations:\n{{ toYaml .Values.ingress.annotations | indent 4}}\n{{- end }}\nspec:\n{{ if .Values.ingress.ingressClassName }}\n  ingressClassName: {{ .Values.ingress.ingressClassName }}\n{{ end }}\n  rules:\n    - host: {{ .Values.ingress.hostName }}\n      http:\n        paths:\n          - path: {{ .Values.ingress.path }}\n            {{- if semverCompare \">=1.19-0\" $.Capabilities.KubeVersion.GitVersion }}\n            pathType: {{ .Values.ingress.pathType }}\n            {{- end }}\n            backend:\n            {{- if semverCompare \">=1.19-0\" $.Capabilities.KubeVersion.GitVersion }}\n              service:\n                name: \"enterprise-gateway\"\n                port:\n                {{ with index .Values.service.ports 0 }}\n                  number: {{ .port }}\n                {{ end }}\n              {{- else }}\n              serviceName: \"enterprise-gateway\"\n              {{ with index .Values.service.ports 0 }}\n              servicePort: {{ .port }}\n              {{- end }}\n            {{- end }}\n  {{- if .Values.ingress.tls }}\n  tls:\n    {{- range .Values.ingress.tls }}\n    - hosts:\n      {{- range .hosts }}\n        - {{ . }}\n      {{- end }}\n      secretName: {{ .secretName }}\n    {{- end }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "etc/kubernetes/helm/enterprise-gateway/templates/kip-clusterrole.yaml",
    "content": "{{- if and  (.Values.kip.serviceAccountName) (.Values.global.rbac) }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: kip-controller\n  labels:\n    app: enterprise-gateway\n    component: kernel-image-puller\n    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n    release: {{ .Release.Name }}\n    heritage: {{ .Release.Service }}\n  {{- range $key, $val := .Values.global.commonLabels }}\n    {{ $key }}: \"{{ $val }}\"\n  {{- end }}\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"pods\"]\n    verbs: [\"get\", \"watch\", \"list\", \"create\", \"delete\"]\n  {{- if .Values.kip.podSecurityPolicy.create }}\n  - apiGroups:\n      - policy\n    resources:\n      - podsecuritypolicies\n    resourceNames:\n      - \"kip-psp\"\n    verbs:\n      - use\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "etc/kubernetes/helm/enterprise-gateway/templates/kip-clusterrolebinding.yaml",
    "content": "{{- if and  (.Values.kip.serviceAccountName) (.Values.global.rbac) }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: enterprise-gateway-kip\n  labels:\n    app: enterprise-gateway\n    component: kernel-image-puller\n    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n    release: {{ .Release.Name }}\n    heritage: {{ .Release.Service }}\n  {{- range $key, $val := .Values.global.commonLabels }}\n    {{ $key }}: \"{{ $val }}\"\n  {{- end }}\n\nsubjects:\n  - kind: ServiceAccount\n    name: {{ .Values.kip.serviceAccountName }}\n    namespace: {{ .Release.Namespace }}\nroleRef:\n  kind: ClusterRole\n  name: kip-controller\n  apiGroup: rbac.authorization.k8s.io\n{{- end }}\n"
  },
  {
    "path": "etc/kubernetes/helm/enterprise-gateway/templates/kip-serviceaccount.yaml",
    "content": "{{- if and  (.Values.kip.serviceAccountName) (.Values.global.rbac) }}\n\napiVersion: v1\nkind: ServiceAccount\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n  {{- $parent := . -}}\n  {{- range .Values.global.imagePullSecrets }}\n  - name: {{ . }}\n  {{- end }}\n{{- end }}\nmetadata:\n  name: {{ .Values.kip.serviceAccountName }}\n  namespace: {{ .Values.namespace | default .Release.Namespace }}\n  labels:\n    app: enterprise-gateway\n    component: enterprise-gateway\n    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n    release: {{ .Release.Name }}\n    heritage: {{ .Release.Service }}\n  {{- range $key, $val := .Values.global.commonLabels }}\n    {{ $key }}: \"{{ $val }}\"\n  {{- end }}\n{{- if .Values.kip.annotations }}\n  annotations:\n  {{- range $key, $val := .Values.kip.annotations }}\n    {{ $key }}: \"{{ $val }}\"\n  {{- end }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "etc/kubernetes/helm/enterprise-gateway/templates/psp.yaml",
    "content": "{{- if and (.Values.kip.podSecurityPolicy.create) (.Values.global.rbac) }}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n  name: \"kip-psp\"\n{{- if .Values.kip.podSecurityPolicy.annotations }}\n  annotations:\n    {{- toYaml .Values.kip.podSecurityPolicy.annotations | nindent 4 }}\n{{- end }}\nspec:\n  privileged: false\n  # Required to prevent escalations to root.\n  allowPrivilegeEscalation: false\n  # This is redundant with non-root + disallow privilege escalation,\n  # but we can provide it for defense in depth.\n  requiredDropCapabilities:\n    - ALL\n  hostNetwork: false\n  hostIPC: false\n  hostPID: false\n  runAsUser:\n    # TODO: Require the container to run without root privileges.\n    rule: 'RunAsAny'\n  seLinux:\n    # This policy assumes the nodes are using AppArmor rather than SELinux.\n    rule: 'RunAsAny'\n  supplementalGroups:\n    rule: 'MustRunAs'\n    ranges:\n      # Forbid adding the root group.\n      - min: 1\n        max: 65535\n  fsGroup:\n    rule: 'MustRunAs'\n    ranges:\n      # Forbid adding the root group.\n      - min: 1\n        max: 65535\n  readOnlyRootFilesystem: false\n  allowedHostPaths:\n    - pathPrefix: /var/run\n      readOnly: true # only allow read-only mounts\n  volumes:\n    - '*'\n    # - 'secret'\n    # - 'hostPath'\n{{- end }}\n"
  },
  {
    "path": "etc/kubernetes/helm/enterprise-gateway/templates/service.yaml",
    "content": "{{- if and .Values.deployment.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n  labels:\n    app: enterprise-gateway\n    component: enterprise-gateway\n    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n    release: {{ .Release.Name }}\n    heritage: {{ .Release.Service }}\n  {{- range $key, $val := .Values.global.commonLabels }}\n    {{ $key }}: \"{{ $val }}\"\n  {{- end }}\n\n  name: enterprise-gateway\n  namespace: {{ .Values.namespace | default .Release.Namespace }}\nspec:\n  ports:\n    {{- range $key, $val := .Values.service.ports }}\n    -\n      {{- range $pkey, $pval := $val }}\n      {{ $pkey}}: {{ $pval }}\n      {{- end }}\n    {{- end }}\n  selector:\n    gateway-selector: enterprise-gateway\n  sessionAffinity: ClientIP\n  type: {{ .Values.service.type }}\n  {{- if .Values.service.externalIPs.k8sMasterPublicIP }}\n  externalIPs:\n  - {{ .Values.service.externalIPs.k8sMasterPublicIP }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "etc/kubernetes/helm/enterprise-gateway/values.yaml",
    "content": "global:\n  # Create RBAC resources\n  rbac: true\n  # ImagePullSecrets for a ServiceAccount, list of secrets in the same namespace\n  # to use for pulling any images in pods that reference this ServiceAccount.\n  # Must be set for any cluster configured with private docker registry.\n  imagePullSecrets: []\n    # - private-registry-key\n  commonLabels: {}\n    # app.kubernetes.io/name: [your app name]\n\n# You can optionally create imagePull Secrets\nimagePullSecretsCreate:\n  enabled: false\n  annotations: {}\n    # this annotatoin allows to keep secret even if helm release is deleted\n    # \"helm.sh/resource-policy\": \"keep\"\n  secrets: []\n    # - name: \"private\"\n    # # base64 encoded value\n    #   --set imagePullSecretsCreate.secrets[0].data=\"UHJvZCBTZWNyZXQgSW5mb3JtYXRpb24K\"\n    # - name: \"private2\"\n    #   --set imagePullSecretsCreate.secrets[1].data=\"UHJvZCBTZWNyZXQgSW5mb3JtYXRpb24K\"\n\n# Enterprise Gateway image name and tag to use.\nimage: elyra/enterprise-gateway:dev\n# Enterprise Gateway image pull policy.\nimagePullPolicy: IfNotPresent\n# K8s Enterprise Gateway Service\nservice:\n  type: \"NodePort\"\n  externalIPs:\n    # Master public IP on which to expose EG.\n    k8sMasterPublicIP: ''\n  ports:\n    # The primary port on which Enterprise Gateway is servicing requests.\n    - name: \"http\"\n      port: 8888\n      targetPort: 8888\n      # nodePort: 32652 # optional nodePort\n    # The  port on which Enterprise Gateway will receive kernel connection info responses.\n    - name: \"http-response\"\n      port: 8877\n      targetPort: 8877\n      # nodePort: 30481 # optional nodePort\n\ndeployment:\n  enabled: true\n  serviceAccountName: 'enterprise-gateway-sa'\n  terminationGracePeriodSeconds: 30\n  annotations: {}\n  resources: {}\n  # resources:\n  #   limits:\n  #     cpu: 2\n  #     memory: 10Gi\n  #   requests:\n  #     cpu: 1\n  #     memory: 2Gi\n  # Update to deploy multiple replicas of EG.\n  replicas: 1\n  tolerations: []\n  affinity: {}\n  nodeSelector: {}\n  extraEnv: {\n    # SOME_ENV_VAR_WITH_SIMPLE_VALUE: \"example\"\n    # SOME_ENV_VAR_WITH_LONG_VALUE: >\n    #   'this example',\n    #   'will not preserve',\n    #   'line breaks',\n    # SOME_ENV_VAR_WITH_MULTILINE_VALUE: |\n    #   this example\n    #   will preserve\n    #   line breaks\n    EG_INHERITED_ENVS: \"PATH\"\n  }\n\n# Log output level.\nlogLevel: DEBUG\n# Whether to mirror working directories.\nmirrorWorkingDirs: false\n# Optional authorization token passed in all requests (see --EnterpriseGatewayApp.auth_token)\nauthToken:\n\nkernel:\n  # Kernel cluster role created by this chart.\n  clusterRole: kernel-controller\n  # Will start kernels in the same namespace as EG if True.\n  shareGatewayNamespace: false\n  # Timeout for kernel launching in seconds.\n  launchTimeout: 60\n  # Timeout for an idle kernel before its culled in seconds. Default is 1 hour.\n  cullIdleTimeout: 3600\n  # Whether to cull idle kernels with connecting clients\n  cullConnected: false\n  # List of kernel names that are available for use. To allow additional kernelspecs without\n  # requiring redeployment (and assuming kernelspecs are mounted or otherwise accessible\n  # outside the pod), comment out (or remove) the entries, leaving only `allowedKernels:`.\n  allowedKernels:\n    - r_kubernetes\n    - python_kubernetes\n    - python_tf_kubernetes\n    - python_tf_gpu_kubernetes\n    - scala_kubernetes\n    - spark_r_kubernetes\n    - spark_python_kubernetes\n    - spark_scala_kubernetes\n    - spark_python_operator\n  # Default kernel name should be something from the allowedKernels\n  defaultKernelName: python_kubernetes\n  # Service account name to use for kernel pods when no service account is specified.\n  # This service account should exist in the namespace where kernel pods are launched.\n  defaultServiceAccountName: default\n\nkernelspecs:\n  # Optional custom data image containing kernelspecs to use.\n  image:\n  # Kernelspecs image pull policy.\n  imagePullPolicy: Always\n\nnfs:\n  enabled: false\n  # IP address of NFS server. Required if enabled.\n  internalServerIPAddress:\n\nkernelspecsPvc:\n  enabled: false\n  # PVC name. Required if want mount kernelspecs without nfs. PVC should create in the same namespace before EG deployed.\n  name:\n\ningress:\n  ingressClassName: # available since k8s 1.18. Depending on the ingress controller, you can use annotation as alternative\n  enabled: false\n  hostName: \"\" # Ingress resource host\n  pathType: \"Prefix\"\n  path: \"/\"\n  annotations: {}\n  # Optional TLS section\n  # tls:\n  #   - secretName: \"mysecret-name-tls\"\n  #     hosts:\n  #       - myhost.example.com\n\n\n# Kernel Image Puller (daemonset)\nkip:\n  enabled: true\n  serviceAccountName: 'kernel-image-puller-sa'\n  serviceName: enterprise-gateway\n  server: # EG server properties.\n    protocol: http\n    port: 8888\n    validate_cert: false\n  podSecurityPolicy:\n    # Note: PodSecurityPolicy is deprecated as of 1.21 and removed in 1.25.\n    # Operators deploying into k8s clusters >= 1.25 that require PSP equivalency will need to\n    # look into alternatives like Gatekeeper (https://github.com/open-policy-agent/gatekeeper).\n    # Creation of PSP in KIP is disabled by default.\n    create: false\n    annotations: {}\n  # Kernel Image Puller image name and tag to use.\n  image: elyra/kernel-image-puller:dev\n  # Kernel Image Puller image pull policy.\n  imagePullPolicy: IfNotPresent\n  # Determines whether the Kernel Image Puller will pull kernel images it has previously pulled\n  pullPolicy: IfNotPresent\n  # The interval (in seconds) at which the Kernel Image Puller fetches kernelspecs to pull kernel images.\n  interval: 300\n  # The container runtime interface socket, use /run/containerd/containerd.sock for containerd installations\n  criSocket: /var/run/docker.sock\n  # Prefix to use if a registry is not already specified on image name (e.g., quay.io/elyra/kernel-py:2.5.0)\n  defaultContainerRegistry: docker.io\n  fetcher: KernelSpecsFetcher\n  annotations: {}\n  tolerations: []\n  affinity: {}\n  nodeSelector: {}\n  resources: {}\n  # resources:\n  #   limits:\n  #     cpu: 1\n  #     memory: 1Gi\n  #   requests:\n  #     cpu: 1\n  #     memory: 1Gi\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[build-system]\nrequires = [\"hatchling>=1.21.1\"]\nbuild-backend = \"hatchling.build\"\n\n[project]\nname = \"jupyter_enterprise_gateway\"\nversion = \"3.3.0.dev0\"\ndescription = \"A web server for spawning and communicating with remote Jupyter kernels\"\nlicense = { file = \"LICENSE.md\" }\nkeywords = [\"Interactive\",\"Interpreter\",\"Kernel\", \"Web\", \"Cloud\"]\nclassifiers = [\n  \"Intended Audience :: Developers\", \"Intended Audience :: Science/Research\",\n  \"Intended Audience :: System Administrators\",\n  \"License :: OSI Approved :: BSD License\",\n  \"Programming Language :: Python\",\n  \"Programming Language :: Python :: 3\",\n  \"Programming Language :: Python :: 3 :: Only\",\n  \"Programming Language :: Python :: 3.10\",\n  \"Programming Language :: Python :: 3.11\"\n]\nrequires-python = \">=3.10\"\ndependencies = [\n  \"docker>=3.5.0\",\n  \"future\",\n  \"jinja2>=3.1\",\n  \"jupyter_client>=6.1.12,<7\",  # Remove cap once EG supports kernel provisioners\n  \"jupyter_core>=4.7.0\",\n  \"kubernetes>=18.20.0\",\n  \"jupyter_server>=1.7,<2.0\",  # Remove cap (increase floor) once EG suport kernel provisioners\n  \"paramiko>=2.11\",\n  \"pexpect>=4.8.0\",\n  \"pycryptodomex>=3.9.7\",\n  \"pyzmq>=20.0,<25.0\",  # Pyzmq 25 removes deprecated code that jupyter_client 6 uses, remove if v6 gets updated\n  \"requests>=2.14.2\",\n  \"tornado>=6.1\",\n  \"traitlets>=5.3.0\",\n  \"watchdog>=2.1.3\",\n  \"yarn-api-client>=1.0\"\n]\n\n[[project.authors]]\nname = \"Jupyter Development Team\"\nemail = \"jupyter@googlegroups.com\"\n\n[project.readme]\ntext = \"A lightweight, multi-tenant, scalable, and secure gateway that enables\\nJupyter Notebooks to share resources across distributed clusters such as\\nApache Spark, Kubernetes and others.\"\ncontent-type = \"text/plain\"\n\n[project.urls]\nHomepage = \"http://github.com/jupyter/enterprise_gateway\"\n\n[project.scripts]\njupyter-enterprisegateway = \"enterprise_gateway.enterprisegatewayapp:launch_instance\"\n\n[project.optional-dependencies]\ntest = [\n  \"coverage\",\n  \"pytest<8.1.0\",\n  \"pytest-tornasync\",\n  \"ipykernel\",\n  \"pre-commit\",\n  \"websocket-client\"\n]\nlint = [\n  \"black[jupyter]==24.2.0\",\n  \"mdformat>0.7\",\n  \"mdformat-gfm>=0.3.5\",\n  \"ruff==0.3.0\"\n]\n\n[tool.ruff.pylint]\nmax-args = 10\nmax-statements = 60\n\n[tool.hatch.build.targets.wheel]\ninclude = [\"enterprise_gateway\"]\n\n[tool.tbump.version]\ncurrent = \"3.3.0.dev0\"\nregex = '''\n  (?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)\n  ((?P<channel>a|b|rc|.dev)(?P<release>\\d+))?\n'''\n\n[tool.tbump.git]\nmessage_template = \"Bump to {new_version}\"\ntag_template = \"v{new_version}\"\n\n[[tool.tbump.file]]\nsrc = \"enterprise_gateway/_version.py\"\n\n[[tool.tbump.file]]\nsrc = \"pyproject.toml\"\n\n[[tool.tbump.file]]\nsrc = \"Makefile\"\n\n[[tool.tbump.file]]\nsrc = \"etc/kubernetes/helm/enterprise-gateway/Chart.yaml\"\nsearch = 'appVersion: {current_version}'\n\n[[tool.tbump.field]]\nname = \"channel\"\ndefault = \"\"\n\n[[tool.tbump.field]]\nname = \"release\"\ndefault = \"\"\n\n[tool.pytest.ini_options]\naddopts = \"-raXs --durations 10 --color=yes --doctest-modules\"\ntestpaths = [\n    \"enterprise_gateway/tests/\"\n]\nfilterwarnings = [\n  \"error\",\n  \"ignore:There is no current event loop:DeprecationWarning\",\n  \"ignore:Passing unrecognized arguments to super:DeprecationWarning:jupyter_client\",\n  \"ignore:Jupyter is migrating its paths to use standard platformdirs:DeprecationWarning\",\n]\n\n[tool.coverage.report]\nexclude_lines = [\n  \"pragma: no cover\",\n  \"def __repr__\",\n  \"if self.debug:\",\n  \"if settings.DEBUG\",\n  \"raise AssertionError\",\n  \"raise NotImplementedError\",\n  \"if 0:\",\n  \"if __name__ == .__main__.:\",\n  \"class .*\\bProtocol\\\\):\",\n\"@(abc\\\\.)?abstractmethod\",\n]\n\n[tool.black]\nline-length = 100\ntarget-version = [\"py37\"]\nskip-string-normalization = true\nextend-exclude = \"enterprise_gateway/.*ipynb\"\n\n[tool.ruff]\ntarget-version = \"py37\"\nline-length = 100\nselect = [\n  \"A\", \"B\", \"C\", \"DTZ\", \"E\", \"EM\", \"F\", \"FBT\", \"I\", \"ICN\", \"ISC\", \"N\",\n  \"PLC\", \"PLE\", \"PLW\", \"Q\", \"RUF\", \"S\", \"SIM\", \"T\", \"TID\", \"UP\",\n  \"W\", \"YTT\",\n]\nignore = [\n  # Q000 Single quotes found but double quotes preferred\n  \"Q000\",\n  # FBT001 Boolean positional arg in function definition\n  \"FBT001\", \"FBT002\", \"FBT003\",\n  # E501 Line too long (158 > 100 characters)\n  \"E501\",\n  # SIM105 Use `contextlib.suppress(...)`\n  \"SIM105\",\n  # S507 Paramiko call with policy set to automatically trust the unknown host key\n  \"S507\",\n  # S311 Standard pseudo-random generators are not suitable for cryptographic purposes\n  \"S311\",\n  # S603 `subprocess` call: check for execution of untrusted input\n  \"S603\",\n  # TID252 Relative imports from parent modules are banned\n  \"TID252\",\n  # N806 Variable `sqlContext` in function should be lowercase\n  \"N806\",\n  # PLR2004 Magic value used in comparison\n  \"PLR2004\",\n  # PLW0603 Using the global statement to update is discouraged\n  \"PLW0603\",\n  # PLW1508 Invalid type for environment variable default\n  \"PLW1508\",\n]\nunfixable = [\n  # Don't touch print statements\n  \"T201\",\n  # Don't touch noqa lines\n  \"RUF100\",\n]\n\n[tool.ruff.per-file-ignores]\n# S101 Use of `assert` detected\n# F841 local variable 'foo' is assigned to but never used\n# S105 Possible hardcoded password: `\"NeverHeardOf\"`\n# T201 `print` found\n\"enterprise_gateway/tests/*\" = [\"S101\", \"F841\", \"S105\", \"T201\"]\n\"enterprise_gateway/itests/*\" = [\"S101\", \"F841\", \"S105\", \"T201\"]\n# T201 `print` found\n\"etc/*\" = [\"T201\"]\n# C901 Function is too complex\n\"enterprise_gateway/client/gateway_client.py\" = [\"C901\"]  # `_read_responses` is too complex (12)\n\"etc/docker/kernel-image-puller/kernel_image_puller.py\" = [\"C901\"]  # `fetch_image_names` is too complex (13)\n\"enterprise_gateway/services/processproxies/k8s.py\" = [\"C901\"]  # `terminate_container_resources` is too complex (13)\n\"etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py\" = [\"C901\"]  # `launch_kubernetes_kernel` is too complex (32)\n\"etc/docker/kernel-image-puller/image_fetcher.py\" = [\"C901\"]  # `fetch_image_names` is too complex (11)\n\n\n[tool.interrogate]\nignore-init-module=true\nignore-private=true\nignore-semiprivate=true\nignore-property-decorators=true\nignore-nested-functions=true\nignore-nested-classes=true\nfail-under=100\nexclude = [\"docs\", \"*/tests\", \"*/itests\",\"conftest\"]\n"
  },
  {
    "path": "release.sh",
    "content": "#!/usr/bin/env bash\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfunction exit_with_usage {\n  cat << EOF\nrelease - Creates build distributions from a git commit hash or from HEAD.\nSYNOPSIS\nusage: release.sh [--release-prepare | --release-publish]\n\nDESCRIPTION\nPerform necessary tasks for a Jupyter Enterprise Gateway release\n\nrelease-prepare: This form creates a release tag and builds the release distribution artifacts.\n\n--release-prepare --currentVersion=\"2.0.0.dev0\" --releaseVersion=\"2.0.0\" --developmentVersion=\"2.1.0.dev1\" --previousVersion=\"2.0.0rc2\" [--tag=\"v2.0.0\"] [--gitCommitHash=\"a874b73\"]\n\nrelease-publish: Publish the release distribution artifacts to PyPI.\n\n--release-publish --tag=\"v2.0.0\"\n\nOPTIONS\n--currentVersion     - Current development version\n--releaseVersion     - Release identifier used when publishing\n--developmentVersion - Release identifier used for next development cycle\n--previousVersion    - Release identifier left in download links from previous release\n--tag                - Release Tag identifier used when taging the release, default 'v$releaseVersion'\n--gitCommitHash      - Release tag or commit to build from, default master HEAD\n--dryRun             - Dry run only, mostly used for testing.\n\nEXAMPLES\nrelease.sh --release-prepare --currentVersion=\"2.0.0.dev0\" --releaseVersion=\"2.0.0\" --developmentVersion=\"2.1.0.dev1\" --previousVersion=\"2.0.0rc2\"\nrelease.sh --release-prepare --currentVersion=\"2.0.0.dev0\" --releaseVersion=\"2.0.0\" --developmentVersion=\"2.1.0.dev1\" --previousVersion=\"2.0.0rc2\" --tag=\"v2.0.0\" --dryRun\n\nrelease.sh --release-publish --gitTag=\"v2.0.0\"\nEOF\n  exit 1\n}\n\nset -e\n\nif [ $# -eq 0 ]; then\n  exit_with_usage\nfi\n\n\n# Process each provided argument configuration\nwhile [ \"${1+defined}\" ]; do\n  IFS=\"=\" read -ra PARTS <<< \"$1\"\n  case \"${PARTS[0]}\" in\n    --release-prepare)\n      GOAL=\"release-prepare\"\n      RELEASE_PREPARE=true\n      shift\n      ;;\n    --release-publish)\n      GOAL=\"release-publish\"\n      RELEASE_PUBLISH=true\n      shift\n      ;;\n    --release-snapshot)\n      GOAL=\"release-snapshot\"\n      RELEASE_SNAPSHOT=true\n      shift\n      ;;\n    --gitCommitHash)\n      GIT_REF=\"${PARTS[1]}\"\n      shift\n      ;;\n    --gitTag)\n      GIT_TAG=\"${PARTS[1]}\"\n      shift\n      ;;\n    --currentVersion)\n      CURRENT_VERSION=\"${PARTS[1]}\"\n      shift\n      ;;\n    --releaseVersion)\n      RELEASE_VERSION=\"${PARTS[1]}\"\n      shift\n      ;;\n    --developmentVersion)\n      DEVELOPMENT_VERSION=\"${PARTS[1]}\"\n      shift\n      ;;\n    --previousVersion)\n      PREVIOUS_VERSION=\"${PARTS[1]}\"\n      shift\n      ;;\n    --tag)\n      RELEASE_TAG=\"${PARTS[1]}\"\n      shift\n      ;;\n    --dryRun)\n      DRY_RUN=\"-DdryRun=true\"\n      shift\n      ;;\n\n    *help* | -h)\n      exit_with_usage\n     exit 0\n     ;;\n    -*)\n     echo \"Error: Unknown option: $1\" >&2\n     exit 1\n     ;;\n    *)  # No more options\n     break\n     ;;\n  esac\ndone\n\n\nif [[ \"$RELEASE_PREPARE\" == \"true\" && -z \"$RELEASE_VERSION\" ]]; then\n    echo \"ERROR: --releaseVersion must be passed as an argument to run this script\"\n    exit_with_usage\nfi\n\nif [[ \"$RELEASE_PREPARE\" == \"true\" && -z \"$DEVELOPMENT_VERSION\" ]]; then\n    echo \"ERROR: --developmentVersion must be passed as an argument to run this script\"\n    exit_with_usage\nfi\n\nif [[ \"$RELEASE_PREPARE\" == \"true\" && -z \"$PREVIOUS_VERSION\" ]]; then\n    echo \"ERROR: --previousVersion must be passed as an argument to run this script\"\n    exit_with_usage\nfi\n\nif [[ \"$RELEASE_PUBLISH\" == \"true\"  ]]; then\n    if [[ \"$GIT_REF\" && \"$GIT_TAG\" ]]; then\n        echo \"ERROR: Only one argumented permitted when publishing : --gitCommitHash or --gitTag\"\n        exit_with_usage\n    fi\n    if [[ -z \"$GIT_REF\" && -z \"$GIT_TAG\" ]]; then\n        echo \"ERROR: --gitCommitHash OR --gitTag must be passed as an argument to run this script\"\n        exit_with_usage\n    fi\nfi\n\nif [[ \"$RELEASE_PUBLISH\" == \"true\" && \"$DRY_RUN\" ]]; then\n    echo \"ERROR: --dryRun not supported for --release-publish\"\n    exit_with_usage\nfi\n\n# Commit ref to checkout when building\nGIT_REF=${GIT_REF:-HEAD}\nif [[ \"$RELEASE_PUBLISH\" == \"true\" && \"$GIT_TAG\" ]]; then\n    GIT_REF=\"tags/$GIT_TAG\"\nfi\n\nBASE_DIR=$(pwd)\nWORK_DIR=$(pwd)/build/release\nSOURCE_DIR=$(pwd)/build/release/enterprise_gateway\n\nif [ -z \"$RELEASE_TAG\" ]; then\n  RELEASE_TAG=\"v$RELEASE_VERSION\"\nfi\n\n\necho \"  \"\necho \"Base directory:   $BASE_DIR\"\necho \"Work directory:   $WORK_DIR\"\necho \"Source directory: $SOURCE_DIR\"\necho \"  \"\necho \"-------------------------------------------------------------\"\necho \"------- Release preparation with the following parameters ---\"\necho \"-------------------------------------------------------------\"\necho \"Executing            ==> $GOAL\"\necho \"Git reference        ==> $GIT_REF\"\necho \"Release version      ==> $RELEASE_VERSION\"\necho \"Development version  ==> $DEVELOPMENT_VERSION\"\necho \"Tag                  ==> $RELEASE_TAG\"\nif [ \"$DRY_RUN\" ]; then\n   echo \"dry run ?           ==> true\"\nfi\n\n\nset -o xtrace\n\nfunction checkout_code {\n    rm -rf $WORK_DIR\n    mkdir -p $WORK_DIR\n    cd $WORK_DIR\n    # Checkout code\n    git clone git@github.com:jupyter/enterprise_gateway.git\n    cd enterprise_gateway\n    git checkout $GIT_REF\n    git_hash=`git rev-parse --short HEAD`\n    echo \"Checked out Jupyter Enterprise Gateway git hash $git_hash\"\n}\n\nfunction update_version_to_release {\n    cd $SOURCE_DIR\n\n    # Update tbump-managed versions\n    pip install tbump\n    tbump --non-interactive --no-tag --no-push $RELEASE_VERSION\n\n    # Update Kubernetes Helm chart and values files (tbump will handle appVersion in Chart.yaml)\n    # We need to inject \"-\" prior to pre-release suffices for 'version:' since it follows strict semantic version rules.\n    # For example 3.0.0rc1 -> 3.0.0-rc1\n    k8s_version=`echo $RELEASE_VERSION | sed 's/\\([0-9]\\)\\([a-z]\\)/\\1-\\2/'`\n    sed -i .bak \"s@version: [0-9,\\.,a-z,-]*@version: $k8s_version@g\" etc/kubernetes/helm/enterprise-gateway/Chart.yaml\n\n    sed -i .bak \"s@elyra/enterprise-gateway:dev@elyra/enterprise-gateway:$RELEASE_VERSION@g\" etc/kubernetes/helm/enterprise-gateway/values.yaml\n    sed -i .bak \"s@elyra/kernel-image-puller:dev@elyra/kernel-image-puller:$RELEASE_VERSION@g\" etc/kubernetes/helm/enterprise-gateway/values.yaml\n\n    # Update Docker compose version\n    sed -i .bak \"s@elyra/enterprise-gateway:dev@elyra/enterprise-gateway:$RELEASE_VERSION@g\" etc/docker/docker-compose.yml\n    sed -i .bak \"s@elyra/kernel-image-puller:dev@elyra/kernel-image-puller:$RELEASE_VERSION@g\" etc/docker/docker-compose.yml\n\n    # Update documentation - this is a one-way change since links will not be valid in dev \"releases\".\n    find docs/source -name \"*.md\" -type f -exec sed -i .bak \"s@$PREVIOUS_VERSION@$RELEASE_VERSION@g\" {} \\;\n}\n\nfunction update_version_to_development {\n    cd $SOURCE_DIR\n\n    # Update tbump-managed versions\n    pip install tbump\n    tbump --non-interactive --no-tag --no-push $DEVELOPMENT_VERSION\n\n    # Update Kubernetes Helm chart and values files (tbump will handle appVersion in Chart.yaml)\n    # We need to replace \".devN\" suffix with \"-devN for 'version:' since it follows strict semantic version rules.\n    # For example 3.0.0.dev1 -> 3.0.0-dev1\n    k8s_version=`echo $DEVELOPMENT_VERSION | sed 's/\\.\\(dev\\)/-\\1/'`\n    sed -i .bak \"s@version: [0-9,\\.,a-z,-]*@version: $k8s_version@g\" etc/kubernetes/helm/enterprise-gateway/Chart.yaml\n\n    sed -i .bak \"s@elyra/enterprise-gateway:$RELEASE_VERSION@elyra/enterprise-gateway:dev@g\" etc/kubernetes/helm/enterprise-gateway/values.yaml\n    sed -i .bak \"s@elyra/kernel-image-puller:$RELEASE_VERSION@elyra/kernel-image-puller:dev@g\" etc/kubernetes/helm/enterprise-gateway/values.yaml\n\n    # Update Docker compose version\n    sed -i .bak \"s@elyra/enterprise-gateway:$RELEASE_VERSION@elyra/enterprise-gateway:dev@g\" etc/docker/docker-compose.yml\n    sed -i .bak \"s@elyra/kernel-image-puller:$RELEASE_VERSION@elyra/kernel-image-puller:dev@g\" etc/docker/docker-compose.yml\n}\n\nif [[ \"$RELEASE_PREPARE\" == \"true\" ]]; then\n    echo \"Preparing release $RELEASE_VERSION ($RELEASE_VERSION)\"\n    # Checkout code\n    checkout_code\n\n    update_version_to_release\n\n    cd $SOURCE_DIR\n    if [ -z \"$DRY_RUN\" ]; then\n        make MULTIARCH_BUILD=y clean dist release docs docker-images\n    else\n        make clean dist docs docker-images\n    fi\n    mkdir -p $WORK_DIR/$RELEASE_TAG\n    cp $SOURCE_DIR/dist/jupyter_enterprise_gateway* $WORK_DIR/$RELEASE_TAG\n\n    # Build and prepare the release\n    git commit -a -m \"Prepare release $RELEASE_VERSION\"\n    git tag $RELEASE_TAG\n\n    update_version_to_development\n\n    cd $SOURCE_DIR\n    mv dist $WORK_DIR/$RELEASE_TAG\n    mv build $WORK_DIR/$RELEASE_TAG\n    make clean dist docs\n\n    # Build next development iteraction\n    git commit -a -m\"Prepare for next development interaction $DEVELOPMENT_VERSION\"\n\n    if [ -z \"$DRY_RUN\" ]; then\n        git push\n        git push --tags\n    fi\n\n    cd \"$BASE_DIR\" #exit target\n\n    exit 0\nfi\n\n\nif [[ \"$RELEASE_PUBLISH\" == \"true\" ]]; then\n    echo \"Publishing release $RELEASE_VERSION\"\n    # Checkout code\n    checkout_code\n    cd $SOURCE_DIR\n    git checkout $RELEASE_TAG\n    git clean -d -f -x\n\n    make clean dist docs\n\n    cd \"$BASE_DIR\" #exit target\n\n    exit 0\nfi\n\ncd \"$BASE_DIR\" #return to base dir\nrm -rf target\necho \"ERROR: wrong execution goals\"\nexit_with_usage\n"
  },
  {
    "path": "requirements.yml",
    "content": "channels:\n  - conda-forge\n  - defaults\ndependencies:\n  - docker-py>=3.5.0\n  - future\n  - jinja2>=3.1\n  - jupyter_client>=6.1,<7 # Remove cap once EG supports kernel provisioners\n  - jupyter_core>=4.7.0\n  - jupyter_server>=1.7,<2 # Remove cap (increase floor) once EG suport kernel provisioners\n  - paramiko>=2.1.2\n  - pexpect>=4.2.0\n  - pip\n  - pre-commit\n  - pycryptodomex>=3.9.7\n  - python-kubernetes>=18.20.0\n  - pyzmq>=20.0.0,<25 # Pyzmq 25 removes deprecated code that jupyter_client 6 uses, remove if v6 gets updated\n  - requests>=2.14.2\n  - tornado>=6.1\n  - traitlets>=5.3.0\n  - watchdog>=2.1.3\n  - yarn-api-client>=1.0\n\n  # Test Requirements\n  - coverage\n  - ipykernel\n  - mock\n  - pytest<8.1.0\n  - pytest-tornasync\n  - websocket-client\n\n  # Code Style\n  - flake8\n\n  - pip:\n      - .\n      - -r docs/doc-requirements.txt\n"
  },
  {
    "path": "website/.gitignore",
    "content": "_site/\n.sass-cache/\n"
  },
  {
    "path": "website/README.md",
    "content": "# Jupyter Enterprise Gateway website\n\nA Jekyll based website describing a general overview of the Jupyter Enterprise Gateway project\n\n## Building the project\n\njekyll serve --watch\n"
  },
  {
    "path": "website/_config.yml",
    "content": "# Site settings\ntitle: Jupyter Enterprise Gateway\nemail:\ndescription: > # this means to ignore newlines until \"baseurl:\"\nbaseurl: \"/enterprise_gateway\" # the subpath of your site, e.g. /blog/\nurl: \"\" # the base hostname & protocol for your site\n\ntwitter_username:\ngithub_username:\n\n# Build settings\nmarkdown: kramdown\n"
  },
  {
    "path": "website/_data/navigation.yml",
    "content": "topnav:\n\n- title: Enterprise Gateway\n  subcategories:\n  - title: What is it?\n    url: /enterprise_gateway/#about\n  - title: Features\n    url: /enterprise_gateway/#features\n  - title: Supported Platforms\n    url: /enterprise_gateway/#platforms\n  - title: Contact\n    url: /enterprise_gateway/#contact\n\n\n- title: Documentation\n  url: https://jupyter-enterprise-gateway.readthedocs.io/en/latest/\n\n- title: GitHub\n  url: https://github.com/jupyter/enterprise_gateway\n\n- title: Privacy\n  url: /enterprise_gateway/privacy-policy\n"
  },
  {
    "path": "website/_includes/call-to-action.html",
    "content": "<section class=\"bg-primary\" id=\"about\">\n    <div class=\"container\">\n        <div class=\"row\">\n            <div class=\"col-lg-8 col-lg-offset-2 text-center\">\n                <h2 class=\"section-heading\">Jupyter Enterprise Gateway</h2>\n                <hr class=\"light\">\n                <p class=\"text-faded\">A lightweight, multi-tenant, scalable and secure gateway that enables Jupyter Notebooks to share resources across distributed clusters such as Apache Spark, Kubernetes and others.</p>\n                <a href=\"https://jupyter-enterprise-gateway.readthedocs.io/en/latest/\" class=\"btn btn-default btn-xl\">Get Started!</a>\n            </div>\n        </div>\n    </div>\n</section>\n"
  },
  {
    "path": "website/_includes/contact.html",
    "content": "<hr class=\"primary\">\n\n<section id=\"contact\">\n    <div class=\"container\">\n        <div class=\"row\">\n            <div class=\"col-lg-8 col-lg-offset-2 text-center\">\n                <h2 class=\"section-heading\">Let's Get In Touch!</h2>\n                <p>Join the Jupyter Gateway community by interacting with us on the Jupyter mailing list or via the project Github.</p>\n            </div>\n            <div class=\"col-lg-4 col-lg-offset-1 text-center\">\n                <a href=\"https://groups.google.com/forum/#!forum/jupyter\">\n                <i class=\"fa fa-envelope-o fa-3x wow bounceIn\" data-wow-delay=\".1s\"></i>\n                    <p>{{ site.email }}</p>\n                </a>\n            </div>\n            <div class=\"col-lg-4 col-lg-offset-1 text-center\">\n                <a href=\"https://github.com/jupyter-server/enterprise_gateway\">\n                <i class=\"fa fa-github fa-3x wow bounceIn\" data-wow-delay=\".1s\"></i>\n                    <p>{{ site.github_username }}</p>\n                </a>\n            </div>\n        </div>\n    </div>\n</section>\n"
  },
  {
    "path": "website/_includes/features.html",
    "content": "<section id=\"features\">\n    <div class=\"container\">\n        <div class=\"row\">\n            <div class=\"col-lg-12 text-center\">\n                <h2 class=\"section-heading\">Key capabilities offered by Jupyter Enterprise Gateway</h2>\n                <!-- <hr class=\"primary\"> -->\n            </div>\n        </div>\n    </div>\n    <div class=\"container\">\n        <div class=\"row top-buffer\"/>\n\n        <div class=\"row\">\n            <div class=\"col-lg-3 col-md-6 text-center\">\n                <div class=\"service-box\">\n                    <i class=\"fa fa-4x fa-share-alt wow bounceIn text-primary\"></i>\n                    <h3>Optimized and Distributed Resource Allocation</h3>\n                    <p class=\"text-muted\">It enables Jupyter to utilize distributed cluster resources by running kernels as Apache Spark applications in YARN cluster mode or as independent pods in a Kubernetes cluster.</p>\n                </div>\n            </div>\n            <div class=\"col-lg-3 col-md-6 text-center\">\n                <div class=\"service-box\">\n                    <i class=\"fa fa-4x fa-lock wow bounceIn text-primary\" data-wow-delay=\".1s\"></i>\n                    <h3>Enhanced Security</h3>\n                    <p class=\"text-muted\">It provides end-to-end security such as: encrypted HTTP communication between Jupyter Notebook and Gateway and secured socket communications between Gateway and remote kernels.</p>\n                </div>\n            </div>\n            <div class=\"col-lg-3 col-md-6 text-center\">\n                <div class=\"service-box\">\n                    <i class=\"fa fa-4x fa-users wow bounceIn text-primary\" data-wow-delay=\".2s\"></i>\n                    <h3>Multiuser Support with User Impersonation</h3>\n                    <p class=\"text-muted\">It enhances security and sandboxing of all kernels by enabling user impersonation leveraging Kerberos.</p>\n                </div>\n            </div>\n            <div class=\"col-lg-3 col-md-6 text-center\">\n                <div class=\"service-box\">\n                    <i class=\"fa fa-4x fa-heart wow bounceIn text-primary\" data-wow-delay=\".3s\"></i>\n                    <h3>Open Source</h3>\n                    <p class=\"text-muted\">All of these, available as free open source software, built by a community for the community.</p>\n                </div>\n            </div>\n        </div>\n    </div>\n</section>\n"
  },
  {
    "path": "website/_includes/head.html",
    "content": "<head>\n\n    <meta charset=\"utf-8\">\n    <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n    <meta name=\"description\" content=\"\">\n    <meta name=\"author\" content=\"\">\n\n    <title>Jupyter Enterprise Gateway</title>\n\n    <!-- Bootstrap Core CSS -->\n    <link rel=\"stylesheet\" href=\"css/bootstrap.min.css\" type=\"text/css\">\n\n    <!-- Custom Fonts -->\n    <link href='https://fonts.googleapis.com/css?family=Open+Sans:300italic,400italic,600italic,700italic,800italic,400,300,600,700,800' rel='stylesheet' type='text/css'>\n    <link href='https://fonts.googleapis.com/css?family=Merriweather:400,300,300italic,400italic,700,700italic,900,900italic' rel='stylesheet' type='text/css'>\n    <link rel=\"stylesheet\" href=\"font-awesome/css/font-awesome.min.css\" type=\"text/css\">\n\n    <!-- Plugin CSS -->\n    <link rel=\"stylesheet\" href=\"css/animate.min.css\" type=\"text/css\">\n\n    <!-- Custom CSS -->\n    <link rel=\"stylesheet\" href=\"css/main.css\" type=\"text/css\">\n\n    <!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->\n    <!-- WARNING: Respond.js doesn't work if you view the page via file:// -->\n    <!--[if lt IE 9]>\n        <script src=\"https://oss.maxcdn.com/libs/html5shiv/3.7.0/html5shiv.js\"></script>\n        <script src=\"https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js\"></script>\n    <![endif]-->\n\n</head>\n"
  },
  {
    "path": "website/_includes/header.html",
    "content": "<header>\n    <div class=\"header-content\">\n        <div class=\"header-content-inner\">\n            <h1>Jupyter <br/> Enterprise Gateway</h1>\n            <hr>\n            <p>The Jupyter Enterprise Gateway project is dedicated to making Jupyter Notebook stack multi-tenant, scalable, secure and ready for Enterprise scenarios such as Big Data Analytics, Machine Learning and Deep Learning model development.</p>\n            <a href=\"#about\" class=\"btn btn-primary btn-xl page-scroll\">Find Out More</a>\n        </div>\n    </div>\n</header>\n"
  },
  {
    "path": "website/_includes/nav.html",
    "content": "<nav id=\"mainNav\" class=\"navbar navbar-inverse navbar-fixed-top\">\n    <div class=\"container-fluid\">\n        <!-- Brand and toggle get grouped for better mobile display -->\n        <div class=\"navbar-header\">\n            <button type=\"button\" class=\"navbar-toggle collapsed\" data-toggle=\"collapse\" data-target=\"#bs-example-navbar-collapse-1\">\n                <span class=\"sr-only\">Toggle navigation</span>\n                <span class=\"icon-bar\"></span>\n                <span class=\"icon-bar\"></span>\n                <span class=\"icon-bar\"></span>\n            </button>\n            <a class=\"navbar-brand page-scroll\" href=\"/enterprise_gateway/index.html\">Jupyter Enterprise Gateway</a>\n        </div>\n\n        <!-- Collect the nav links, forms, and other content for toggling -->\n        <nav class=\"navbar-collapse collapse\" role=\"navigation\">\n          <ul class=\"nav navbar-nav navbar-right\">\n            {% for entry in site.data.navigation.topnav %}\n            <li id=\"{{ entry.title | slugify }}\">\n              {% if entry.subcategories %}\n              <a href=\"#\" data-toggle=\"dropdown\" class=\"dropdown-toggle\">{{ entry.title }}<b class=\"caret\"></b></a>\n                <ul class=\"dropdown-menu dropdown-left\">\n                {% for subitem in entry.subcategories %}\n                  {% if subitem.url contains \"http\" %}\n                    {% assign target = \"_blank\" %}{% else %}\n                    {% assign target = \"_self\" %}{% endif %}\n                  <li><a href=\"{{ subitem.url }}\" target=\"{{ target }}\">{{ subitem.title }}</a></li>\n                {% endfor %}\n                </ul>\n              {% else %}\n                {% if entry.url contains \"http\" %}\n                  {% assign target = \"_blank\" %}{% else %}\n                  {% assign target = \"_self\" %}{% endif %}\n                <a href=\"{{ entry.url }}\" target=\"{{ target }}\">{{ entry.title }}</a>\n              {% endif %}\n            </li>\n            {% endfor %}\n          </ul>\n        </nav><!--/.navbar-collapse -->\n        <!-- /.navbar-collapse -->\n    </div>\n    <!-- /.container-fluid -->\n</nav>\n"
  },
  {
    "path": "website/_includes/platforms.html",
    "content": "<hr class=\"primary\">\n\n<section id=\"platforms\">\n    <div class=\"container\">\n        <div class=\"row\">\n            <div class=\"col-lg-12 text-center\">\n                <h2 class=\"section-heading\">Supported Platforms</h2>\n            </div>\n        </div>\n    </div>\n    <div class=\"container\">\n        <div class=\"row top-buffer\"/>\n\n        <div class=\"row\">\n            <div class=\"col-lg-3 text-center\">\n              <img src=\"./img/spark-logo-trademark.png\" height=\"75%\" width=\"75%\">\n            </div>\n            <div class=\"col-lg-8 text-center\">\n              <h3>Apache Spark running on YARN cluster mode</h3>\n              <p class=\"text-muted\">Jupyter Enterprise Gateway enables Jupyter Notebook kernels to run as Apache Spark applications in YARN cluster mode, which enables the kernels to run on differente nodes of the cluster.</p>\n              <a href=\"{{ site.baseurl }}/platform-spark.html\">Learn more...</a>\n            </div>\n        </div>\n\n        <div class=\"row top-buffer\"/>\n\n        <div class=\"row\">\n            <div class=\"col-lg-8 text-center\">\n              <h3>Kubernetes</h3>\n              <p class=\"text-muted\">Jupyter Enterprise Gateway enables Jupyter Notebook kernels to run as independent pods distributed in a Kubernetes cluster.</p>\n              <a href=\"{{ site.baseurl }}/platform-kubernetes.html\">Learn more...</a>\n            </div>\n            <div class=\"col-lg-3 text-center\">\n              <img src=\"./img/kubernetes-logo.png\" height=\"75%\" width=\"75%\">\n            </div>\n        </div>\n\n        <div class=\"row top-buffer\"/>\n\n        <div class=\"row\">\n            <div class=\"col-lg-3 text-center\">\n              <img src=\"./img/docker-swarm-logo.png\" height=\"75%\" width=\"75%\">\n            </div>\n            <div class=\"col-lg-8 text-center\">\n              <h3>Docker Swarm</h3>\n              <p class=\"text-muted\">Jupyter Enterprise Gateway enables Jupyter Notebook kernels to run as independent containers distributed in a Docker Swarm cluster.</p>\n            </div>\n        </div>\n\n        <div class=\"row top-buffer\"/>\n\n        <div class=\"row\">\n            <div class=\"col-lg-8 text-center\">\n              <h3>Dask</h3>\n              <p class=\"text-muted\">Jupyter Enterprise Gateway enables Jupyter Notebook kernels to be scaled using DASK in YARN cluster mode, which enables the kernels to run on differente nodes of the cluster.</p>\n            </div>\n            <div class=\"col-lg-3 text-center\">\n              <img src=\"./img/dask-logo.png\" height=\"75%\" width=\"75%\">\n            </div>\n        </div>\n\n        <div class=\"row top-buffer\"/>\n\n        <div class=\"row\">\n            <div class=\"col-lg-3 text-center\">\n              <img src=\"./img/spectrum-conductor-logo.png\" height=\"75%\" width=\"75%\">\n            </div>\n            <div class=\"col-lg-8 text-center\">\n              <h3>IBM Spectrum Conductor</h3>\n              <p class=\"text-muted\">Jupyter Enterprise Gateway enables Jupyter Notebook kernels to run as Apache Spark applications in the IBM Spectrum Conductor platform.</p>\n            </div>\n        </div>\n\n\n    </div>\n\n</section>\n"
  },
  {
    "path": "website/_includes/scripts.html",
    "content": "<!-- jQuery -->\n<script src=\"js/jquery.js\"></script>\n\n<!-- Bootstrap Core JavaScript -->\n<script src=\"js/bootstrap.min.js\"></script>\n\n<!-- Plugin JavaScript -->\n<script src=\"js/jquery.easing.min.js\"></script>\n<script src=\"js/jquery.fittext.js\"></script>\n<script src=\"js/wow.min.js\"></script>\n\n<!-- Custom Theme JavaScript -->\n<script src=\"js/creative.js\"></script>\n\n<!-- Global site tag (gtag.js) - Google Analytics -->\n<script async src=\"https://www.googletagmanager.com/gtag/js?id=UA-130853690-1\"></script>\n<script>\n  window.dataLayer = window.dataLayer || [];\n  function gtag(){dataLayer.push(arguments);}\n  gtag('js', new Date());\n\n  gtag('config', 'UA-130853690-1');\n</script>\n"
  },
  {
    "path": "website/_layouts/home.html",
    "content": "<!DOCTYPE html>\n<html lang=\"en\">\n\n{% include head.html %}\n\n<body id=\"page-top\">\n  {% include nav.html %}\n  {% include header.html %}\n  {% include call-to-action.html %}\n  {% include features.html %}\n  {% include platforms.html %}\n  {% include contact.html %}\n  {% include scripts.html %}\n</body>\n\n</html>\n"
  },
  {
    "path": "website/_layouts/page.html",
    "content": "<!DOCTYPE html>\n<html lang=\"en\">\n\n{% include head.html %}\n\n<body id=\"page-top\">\n  {% include nav.html %}\n\n  <div style=\"padding: 50px\"></div>\n  <div class=\"container-fluid\" style=\"max-width: 800px; margin: auto\">\n    <div class=\"row\">\n      <div class=\"col-md-12\">\n        {{ content }}\n      </div>\n    </div>\n  </div>\n\n  {% include scripts.html %}\n</body>\n\n</html>\n"
  },
  {
    "path": "website/_sass/_base.scss",
    "content": "html,\nbody {\n    height: 100%;\n    width: 100%;\n}\n\nbody {\n    @include serif-font;\n}\n\nhr {\n    border-color: $theme-primary;\n    border-width: 3px;\n    //max-width: 50px;\n}\n\nhr.light {\n    border-color: white;\n}\n\na {\n    @include transition-all;\n    color: $theme-primary;\n    &:hover,\n    &:focus {\n        color: darken($theme-primary, 10%);\n    }\n}\n\nh1,\nh2,\nh3,\nh4,\nh5,\nh6 {\n    @include sans-serif-font;\n}\n\np {\n    font-size: 16px;\n    line-height: 1.5;\n    margin-bottom: 20px;\n}\n\n.bg-primary {\n    background-color: $theme-primary;\n}\n\n.bg-dark {\n    background-color: $theme-dark;\n    color: white;\n}\n\n.text-faded {\n    color: rgba(white, .7);\n}\n\nsection {\n    padding: 100px 0;\n}\n\naside {\n    padding: 50px 0;\n}\n\n.no-padding {\n    padding: 0;\n}\n\n// Navigation\n\n.navbar-default {\n    background-color: white;\n    border-color: rgba($theme-dark, .5);\n    @include sans-serif-font;\n    @include transition-all;\n    .navbar-header .navbar-brand {\n        color: $theme-primary;\n        @include sans-serif-font;\n        font-weight: 700;\n        text-transform: uppercase;\n        &:hover,\n        &:focus {\n            color: darken($theme-primary, 10%);\n        }\n    }\n    .nav {\n        > li {\n            > a,\n            > a:focus {\n                text-transform: uppercase;\n                font-weight: 700;\n                font-size: 13px;\n                color: $theme-dark;\n                &:hover {\n                    color: $theme-primary;\n                }\n            }\n            &.active {\n                > a,\n                > a:focus {\n                    color: $theme-primary !important;\n                    background-color: transparent;\n                    &:hover {\n                        background-color: transparent;\n                    }\n                }\n            }\n        }\n    }\n    @media (min-width: 768px) {\n        background-color: transparent;\n        border-color: rgba(white, .3);\n        .navbar-header .navbar-brand {\n            color: rgba(white, .7);\n            &:hover,\n            &:focus {\n                color: white;\n            }\n        }\n        .nav > li > a,\n        .nav > li > a:focus {\n            color: rgba(white, .7);\n            &:hover {\n                color: white;\n            }\n        }\n        &.affix {\n            background-color: white;\n            border-color: rgba($theme-dark, .5);\n            .navbar-header .navbar-brand {\n                color: $theme-primary;\n                font-size: 14px;\n                &:hover,\n                &:focus {\n                    color: darken($theme-primary, 10%);\n                }\n            }\n            .nav > li > a,\n            .nav > li > a:focus {\n                color: $theme-dark;\n                &:hover {\n                    color: $theme-primary;\n                }\n            }\n        }\n    }\n}\n\n// Homepage Header\n\nheader {\n    position: relative;\n    width: 100%;\n    min-height: auto;\n    @include background-cover;\n    background-position: center;\n    background-image: url('../img/header.jpg');\n    text-align: center;\n    color: white;\n    .header-content {\n        position: relative;\n        text-align: center;\n        padding: 100px 15px 100px;\n        width: 100%;\n        .header-content-inner {\n            h1 {\n                font-weight: 700;\n                text-transform: uppercase;\n                margin-top: 0;\n                margin-bottom: 0;\n            }\n            hr {\n                margin: 30px auto;\n            }\n            p {\n                font-weight: 300;\n                color: rgba(white, .7);\n                font-size: 16px;\n                margin-bottom: 50px;\n            }\n        }\n    }\n    @media (min-width: 768px) {\n        min-height: 100%;\n        .header-content {\n            position: absolute;\n            top: 50%;\n            -webkit-transform: translateY(-50%);\n            -ms-transform: translateY(-50%);\n            transform: translateY(-50%);\n            padding: 0 50px;\n            .header-content-inner {\n                max-width: 1000px;\n                margin-left: auto;\n                margin-right: auto;\n                p {\n                    font-size: 18px;\n                    max-width: 80%;\n                    margin-left: auto;\n                    margin-right: auto;\n                }\n            }\n        }\n    }\n}\n\n// Sections\n\n.section-heading {\n    margin-top: 0;\n}\n\n.service-box {\n    max-width: 400px;\n    margin: 50px auto 0;\n    @media (min-width: 992px) {\n        margin: 20px auto 0;\n    }\n    p {\n        margin-bottom: 0;\n    }\n    h3 {\n      height: 125px;\n      padding: 25px;\n    }\n}\n\n.top-buffer { margin-top:75px; }\n\n.portfolio-box {\n    position: relative;\n    display: block;\n    max-width: 650px;\n    margin: 0 auto;\n    .portfolio-box-caption {\n        color: white;\n        opacity: 0;\n        display: block;\n        background: rgba( $theme-primary, .9 );\n        position: absolute;\n        bottom: 0;\n        text-align: center;\n        width: 100%;\n        height: 100%;\n        @include transition-all;\n        .portfolio-box-caption-content {\n            width: 100%;\n            text-align: center;\n            position: absolute;\n            top: 50%;\n            transform: translateY(-50%);\n            .project-category,\n            .project-name {\n                @include sans-serif-font;\n                padding: 0 15px;\n            }\n            .project-category {\n                text-transform: uppercase;\n                font-weight: 600;\n                font-size: 14px;\n            }\n            .project-name {\n                font-size: 18px;\n            }\n        }\n    }\n    &:hover {\n        .portfolio-box-caption {\n            opacity: 1;\n        }\n    }\n    @media (min-width: 768px) {\n        .portfolio-box-caption {\n            .portfolio-box-caption-content {\n                .project-category {\n                    font-size: 16px;\n                }\n                .project-name {\n                    font-size: 22px;\n                }\n            }\n        }\n    }\n}\n\n.call-to-action {\n    h2 {\n        margin: 0 auto 20px;\n    }\n}\n\n// Bootstrap Overrides\n.text-primary {\n    color: $theme-primary;\n}\n\n.no-gutter > [class*='col-'] {\n    padding-right:0;\n    padding-left:0;\n}\n\n// Button Styles\n.btn-default {\n    @include button-variant($theme-dark, white, white);\n}\n\n.btn-primary {\n    @include button-variant(white, $theme-primary, $theme-primary);\n}\n\n.btn {\n    @include sans-serif-font;\n    border: none;\n    border-radius: 300px;\n    font-weight: 700;\n    text-transform: uppercase;\n}\n\n.btn-xl {\n    padding: 15px 30px;\n}\n\n// Contact\n#contact .fa {\n    color: $theme-dark;\n    font-size: 4em;\n}\n\n// Extras\n// -- Highlight Color Customization\n::-moz-selection {\n    color: white;\n    text-shadow: none;\n    background: $theme-dark;\n}\n\n::selection {\n    color: white;\n    text-shadow: none;\n    background: $theme-dark;\n}\n\nimg::selection {\n    color: white;\n    background: transparent;\n}\n\nimg::-moz-selection {\n    color: white;\n    background: transparent;\n}\n\nbody {\n    -webkit-tap-highlight-color: $theme-dark;\n}\n"
  },
  {
    "path": "website/_sass/_mixins.scss",
    "content": "@mixin transition-all()\n{\n    -webkit-transition: all 0.35s;\n    -moz-transition: all 0.35s;\n    transition: all 0.35s;\n}\n\n@mixin background-cover()\n{\n    -webkit-background-size: cover;\n    -moz-background-size: cover;\n    background-size: cover;\n    -o-background-size: cover;\n}\n\n@mixin button-variant($color, $background, $border)\n{\n\tcolor: $color;\n\tbackground-color: $background;\n\tborder-color: $border;\n\t@include transition-all;\n\n\t&:hover,\n\t&:focus,\n\t&.focus,\n\t&:active,\n\t&.active,\n\t.open > .dropdown-toggle & {\n\t\tcolor: $color;\n\t\tbackground-color: darken($background, 5%);\n    border-color: darken($border, 7%);\n\t}\n\t&:active,\n\t&.active,\n\t.open > .dropdown-toggle & {\n\t\tbackground-image: none;\n\t}\n\t&.disabled,\n\t&[disabled],\n\tfieldset[disabled] & {\n\t\t&,\n\t\t&:hover,\n\t\t&:focus,\n\t\t&.focus,\n\t\t&:active,\n\t\t&.active {\n\t\t\tbackground-color: $background;\n\t\t\tborder-color: $border;\n\t\t}\n\t}\n\n\t.badge {\n\t\tcolor: $background;\n\t\tbackground-color: $color;\n\t}\n}\n\n@mixin sans-serif-font()\n{\n\tfont-family: 'Open Sans', 'Helvetica Neue', Arial, sans-serif;\n}\n\n@mixin serif-font()\n{\n\tfont-family: 'Merriweather', 'Helvetica Neue', Arial, sans-serif;\n}\n"
  },
  {
    "path": "website/css/bootstrap.css",
    "content": "/*!\n * Bootstrap v3.3.2 (http://getbootstrap.com)\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n\n/*! normalize.css v3.0.2 | MIT License | git.io/normalize */\nhtml {\n  font-family: sans-serif;\n  -webkit-text-size-adjust: 100%;\n      -ms-text-size-adjust: 100%;\n}\nbody {\n  margin: 0;\n}\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n  display: block;\n}\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block;\n  vertical-align: baseline;\n}\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n[hidden],\ntemplate {\n  display: none;\n}\na {\n  background-color: transparent;\n}\na:active,\na:hover {\n  outline: 0;\n}\nabbr[title] {\n  border-bottom: 1px dotted;\n}\nb,\nstrong {\n  font-weight: bold;\n}\ndfn {\n  font-style: italic;\n}\nh1 {\n  margin: .67em 0;\n  font-size: 2em;\n}\nmark {\n  color: #000;\n  background: #ff0;\n}\nsmall {\n  font-size: 80%;\n}\nsub,\nsup {\n  position: relative;\n  font-size: 75%;\n  line-height: 0;\n  vertical-align: baseline;\n}\nsup {\n  top: -.5em;\n}\nsub {\n  bottom: -.25em;\n}\nimg {\n  border: 0;\n}\nsvg:not(:root) {\n  overflow: hidden;\n}\nfigure {\n  margin: 1em 40px;\n}\nhr {\n  height: 0;\n  -webkit-box-sizing: content-box;\n     -moz-box-sizing: content-box;\n          box-sizing: content-box;\n}\npre {\n  overflow: auto;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  margin: 0;\n  font: inherit;\n  color: inherit;\n}\nbutton {\n  overflow: visible;\n}\nbutton,\nselect {\n  text-transform: none;\n}\nbutton,\nhtml input[type=\"button\"],\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button;\n  cursor: pointer;\n}\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  padding: 0;\n  border: 0;\n}\ninput {\n  line-height: normal;\n}\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  -webkit-box-sizing: border-box;\n     -moz-box-sizing: border-box;\n          box-sizing: border-box;\n  padding: 0;\n}\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\ninput[type=\"search\"] {\n  -webkit-box-sizing: content-box;\n     -moz-box-sizing: content-box;\n          box-sizing: content-box;\n  -webkit-appearance: textfield;\n}\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\nfieldset {\n  padding: .35em .625em .75em;\n  margin: 0 2px;\n  border: 1px solid #c0c0c0;\n}\nlegend {\n  padding: 0;\n  border: 0;\n}\ntextarea {\n  overflow: auto;\n}\noptgroup {\n  font-weight: bold;\n}\ntable {\n  border-spacing: 0;\n  border-collapse: collapse;\n}\ntd,\nth {\n  padding: 0;\n}\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n@media print {\n  *,\n  *:before,\n  *:after {\n    color: #000 !important;\n    text-shadow: none !important;\n    background: transparent !important;\n    -webkit-box-shadow: none !important;\n            box-shadow: none !important;\n  }\n  a,\n  a:visited {\n    text-decoration: underline;\n  }\n  a[href]:after {\n    content: \" (\" attr(href) \")\";\n  }\n  abbr[title]:after {\n    content: \" (\" attr(title) \")\";\n  }\n  a[href^=\"#\"]:after,\n  a[href^=\"javascript:\"]:after {\n    content: \"\";\n  }\n  pre,\n  blockquote {\n    border: 1px solid #999;\n\n    page-break-inside: avoid;\n  }\n  thead {\n    display: table-header-group;\n  }\n  tr,\n  img {\n    page-break-inside: avoid;\n  }\n  img {\n    max-width: 100% !important;\n  }\n  p,\n  h2,\n  h3 {\n    orphans: 3;\n    widows: 3;\n  }\n  h2,\n  h3 {\n    page-break-after: avoid;\n  }\n  select {\n    background: #fff !important;\n  }\n  .navbar {\n    display: none;\n  }\n  .btn > .caret,\n  .dropup > .btn > .caret {\n    border-top-color: #000 !important;\n  }\n  .label {\n    border: 1px solid #000;\n  }\n  .table {\n    border-collapse: collapse !important;\n  }\n  .table td,\n  .table th {\n    background-color: #fff !important;\n  }\n  .table-bordered th,\n  .table-bordered td {\n    border: 1px solid #ddd !important;\n  }\n}\n@font-face {\n  font-family: 'Glyphicons Halflings';\n\n  src: url('../fonts/glyphicons-halflings-regular.eot');\n  src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');\n}\n.glyphicon {\n  position: relative;\n  top: 1px;\n  display: inline-block;\n  font-family: 'Glyphicons Halflings';\n  font-style: normal;\n  font-weight: normal;\n  line-height: 1;\n\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n.glyphicon-asterisk:before {\n  content: \"\\2a\";\n}\n.glyphicon-plus:before {\n  content: \"\\2b\";\n}\n.glyphicon-euro:before,\n.glyphicon-eur:before {\n  content: \"\\20ac\";\n}\n.glyphicon-minus:before {\n  content: \"\\2212\";\n}\n.glyphicon-cloud:before {\n  content: \"\\2601\";\n}\n.glyphicon-envelope:before {\n  content: \"\\2709\";\n}\n.glyphicon-pencil:before {\n  content: \"\\270f\";\n}\n.glyphicon-glass:before {\n  content: \"\\e001\";\n}\n.glyphicon-music:before {\n  content: \"\\e002\";\n}\n.glyphicon-search:before {\n  content: \"\\e003\";\n}\n.glyphicon-heart:before {\n  content: \"\\e005\";\n}\n.glyphicon-star:before {\n  content: \"\\e006\";\n}\n.glyphicon-star-empty:before {\n  content: \"\\e007\";\n}\n.glyphicon-user:before {\n  content: \"\\e008\";\n}\n.glyphicon-film:before {\n  content: \"\\e009\";\n}\n.glyphicon-th-large:before {\n  content: \"\\e010\";\n}\n.glyphicon-th:before {\n  content: \"\\e011\";\n}\n.glyphicon-th-list:before {\n  content: \"\\e012\";\n}\n.glyphicon-ok:before {\n  content: \"\\e013\";\n}\n.glyphicon-remove:before {\n  content: \"\\e014\";\n}\n.glyphicon-zoom-in:before {\n  content: \"\\e015\";\n}\n.glyphicon-zoom-out:before {\n  content: \"\\e016\";\n}\n.glyphicon-off:before {\n  content: \"\\e017\";\n}\n.glyphicon-signal:before {\n  content: \"\\e018\";\n}\n.glyphicon-cog:before {\n  content: \"\\e019\";\n}\n.glyphicon-trash:before {\n  content: \"\\e020\";\n}\n.glyphicon-home:before {\n  content: \"\\e021\";\n}\n.glyphicon-file:before {\n  content: \"\\e022\";\n}\n.glyphicon-time:before {\n  content: \"\\e023\";\n}\n.glyphicon-road:before {\n  content: \"\\e024\";\n}\n.glyphicon-download-alt:before {\n  content: \"\\e025\";\n}\n.glyphicon-download:before {\n  content: \"\\e026\";\n}\n.glyphicon-upload:before {\n  content: \"\\e027\";\n}\n.glyphicon-inbox:before {\n  content: \"\\e028\";\n}\n.glyphicon-play-circle:before {\n  content: \"\\e029\";\n}\n.glyphicon-repeat:before {\n  content: \"\\e030\";\n}\n.glyphicon-refresh:before {\n  content: \"\\e031\";\n}\n.glyphicon-list-alt:before {\n  content: \"\\e032\";\n}\n.glyphicon-lock:before {\n  content: \"\\e033\";\n}\n.glyphicon-flag:before {\n  content: \"\\e034\";\n}\n.glyphicon-headphones:before {\n  content: \"\\e035\";\n}\n.glyphicon-volume-off:before {\n  content: \"\\e036\";\n}\n.glyphicon-volume-down:before {\n  content: \"\\e037\";\n}\n.glyphicon-volume-up:before {\n  content: \"\\e038\";\n}\n.glyphicon-qrcode:before {\n  content: \"\\e039\";\n}\n.glyphicon-barcode:before {\n  content: \"\\e040\";\n}\n.glyphicon-tag:before {\n  content: \"\\e041\";\n}\n.glyphicon-tags:before {\n  content: \"\\e042\";\n}\n.glyphicon-book:before {\n  content: \"\\e043\";\n}\n.glyphicon-bookmark:before {\n  content: \"\\e044\";\n}\n.glyphicon-print:before {\n  content: \"\\e045\";\n}\n.glyphicon-camera:before {\n  content: \"\\e046\";\n}\n.glyphicon-font:before {\n  content: \"\\e047\";\n}\n.glyphicon-bold:before {\n  content: \"\\e048\";\n}\n.glyphicon-italic:before {\n  content: \"\\e049\";\n}\n.glyphicon-text-height:before {\n  content: \"\\e050\";\n}\n.glyphicon-text-width:before {\n  content: \"\\e051\";\n}\n.glyphicon-align-left:before {\n  content: \"\\e052\";\n}\n.glyphicon-align-center:before {\n  content: \"\\e053\";\n}\n.glyphicon-align-right:before {\n  content: \"\\e054\";\n}\n.glyphicon-align-justify:before {\n  content: \"\\e055\";\n}\n.glyphicon-list:before {\n  content: \"\\e056\";\n}\n.glyphicon-indent-left:before {\n  content: \"\\e057\";\n}\n.glyphicon-indent-right:before {\n  content: \"\\e058\";\n}\n.glyphicon-facetime-video:before {\n  content: \"\\e059\";\n}\n.glyphicon-picture:before {\n  content: \"\\e060\";\n}\n.glyphicon-map-marker:before {\n  content: \"\\e062\";\n}\n.glyphicon-adjust:before {\n  content: \"\\e063\";\n}\n.glyphicon-tint:before {\n  content: \"\\e064\";\n}\n.glyphicon-edit:before {\n  content: \"\\e065\";\n}\n.glyphicon-share:before {\n  content: \"\\e066\";\n}\n.glyphicon-check:before {\n  content: \"\\e067\";\n}\n.glyphicon-move:before {\n  content: \"\\e068\";\n}\n.glyphicon-step-backward:before {\n  content: \"\\e069\";\n}\n.glyphicon-fast-backward:before {\n  content: \"\\e070\";\n}\n.glyphicon-backward:before {\n  content: \"\\e071\";\n}\n.glyphicon-play:before {\n  content: \"\\e072\";\n}\n.glyphicon-pause:before {\n  content: \"\\e073\";\n}\n.glyphicon-stop:before {\n  content: \"\\e074\";\n}\n.glyphicon-forward:before {\n  content: \"\\e075\";\n}\n.glyphicon-fast-forward:before {\n  content: \"\\e076\";\n}\n.glyphicon-step-forward:before {\n  content: \"\\e077\";\n}\n.glyphicon-eject:before {\n  content: \"\\e078\";\n}\n.glyphicon-chevron-left:before {\n  content: \"\\e079\";\n}\n.glyphicon-chevron-right:before {\n  content: \"\\e080\";\n}\n.glyphicon-plus-sign:before {\n  content: \"\\e081\";\n}\n.glyphicon-minus-sign:before {\n  content: \"\\e082\";\n}\n.glyphicon-remove-sign:before {\n  content: \"\\e083\";\n}\n.glyphicon-ok-sign:before {\n  content: \"\\e084\";\n}\n.glyphicon-question-sign:before {\n  content: \"\\e085\";\n}\n.glyphicon-info-sign:before {\n  content: \"\\e086\";\n}\n.glyphicon-screenshot:before {\n  content: \"\\e087\";\n}\n.glyphicon-remove-circle:before {\n  content: \"\\e088\";\n}\n.glyphicon-ok-circle:before {\n  content: \"\\e089\";\n}\n.glyphicon-ban-circle:before {\n  content: \"\\e090\";\n}\n.glyphicon-arrow-left:before {\n  content: \"\\e091\";\n}\n.glyphicon-arrow-right:before {\n  content: \"\\e092\";\n}\n.glyphicon-arrow-up:before {\n  content: \"\\e093\";\n}\n.glyphicon-arrow-down:before {\n  content: \"\\e094\";\n}\n.glyphicon-share-alt:before {\n  content: \"\\e095\";\n}\n.glyphicon-resize-full:before {\n  content: \"\\e096\";\n}\n.glyphicon-resize-small:before {\n  content: \"\\e097\";\n}\n.glyphicon-exclamation-sign:before {\n  content: \"\\e101\";\n}\n.glyphicon-gift:before {\n  content: \"\\e102\";\n}\n.glyphicon-leaf:before {\n  content: \"\\e103\";\n}\n.glyphicon-fire:before {\n  content: \"\\e104\";\n}\n.glyphicon-eye-open:before {\n  content: \"\\e105\";\n}\n.glyphicon-eye-close:before {\n  content: \"\\e106\";\n}\n.glyphicon-warning-sign:before {\n  content: \"\\e107\";\n}\n.glyphicon-plane:before {\n  content: \"\\e108\";\n}\n.glyphicon-calendar:before {\n  content: \"\\e109\";\n}\n.glyphicon-random:before {\n  content: \"\\e110\";\n}\n.glyphicon-comment:before {\n  content: \"\\e111\";\n}\n.glyphicon-magnet:before {\n  content: \"\\e112\";\n}\n.glyphicon-chevron-up:before {\n  content: \"\\e113\";\n}\n.glyphicon-chevron-down:before {\n  content: \"\\e114\";\n}\n.glyphicon-retweet:before {\n  content: \"\\e115\";\n}\n.glyphicon-shopping-cart:before {\n  content: \"\\e116\";\n}\n.glyphicon-folder-close:before {\n  content: \"\\e117\";\n}\n.glyphicon-folder-open:before {\n  content: \"\\e118\";\n}\n.glyphicon-resize-vertical:before {\n  content: \"\\e119\";\n}\n.glyphicon-resize-horizontal:before {\n  content: \"\\e120\";\n}\n.glyphicon-hdd:before {\n  content: \"\\e121\";\n}\n.glyphicon-bullhorn:before {\n  content: \"\\e122\";\n}\n.glyphicon-bell:before {\n  content: \"\\e123\";\n}\n.glyphicon-certificate:before {\n  content: \"\\e124\";\n}\n.glyphicon-thumbs-up:before {\n  content: \"\\e125\";\n}\n.glyphicon-thumbs-down:before {\n  content: \"\\e126\";\n}\n.glyphicon-hand-right:before {\n  content: \"\\e127\";\n}\n.glyphicon-hand-left:before {\n  content: \"\\e128\";\n}\n.glyphicon-hand-up:before {\n  content: \"\\e129\";\n}\n.glyphicon-hand-down:before {\n  content: \"\\e130\";\n}\n.glyphicon-circle-arrow-right:before {\n  content: \"\\e131\";\n}\n.glyphicon-circle-arrow-left:before {\n  content: \"\\e132\";\n}\n.glyphicon-circle-arrow-up:before {\n  content: \"\\e133\";\n}\n.glyphicon-circle-arrow-down:before {\n  content: \"\\e134\";\n}\n.glyphicon-globe:before {\n  content: \"\\e135\";\n}\n.glyphicon-wrench:before {\n  content: \"\\e136\";\n}\n.glyphicon-tasks:before {\n  content: \"\\e137\";\n}\n.glyphicon-filter:before {\n  content: \"\\e138\";\n}\n.glyphicon-briefcase:before {\n  content: \"\\e139\";\n}\n.glyphicon-fullscreen:before {\n  content: \"\\e140\";\n}\n.glyphicon-dashboard:before {\n  content: \"\\e141\";\n}\n.glyphicon-paperclip:before {\n  content: \"\\e142\";\n}\n.glyphicon-heart-empty:before {\n  content: \"\\e143\";\n}\n.glyphicon-link:before {\n  content: \"\\e144\";\n}\n.glyphicon-phone:before {\n  content: \"\\e145\";\n}\n.glyphicon-pushpin:before {\n  content: \"\\e146\";\n}\n.glyphicon-usd:before {\n  content: \"\\e148\";\n}\n.glyphicon-gbp:before {\n  content: \"\\e149\";\n}\n.glyphicon-sort:before {\n  content: \"\\e150\";\n}\n.glyphicon-sort-by-alphabet:before {\n  content: \"\\e151\";\n}\n.glyphicon-sort-by-alphabet-alt:before {\n  content: \"\\e152\";\n}\n.glyphicon-sort-by-order:before {\n  content: \"\\e153\";\n}\n.glyphicon-sort-by-order-alt:before {\n  content: \"\\e154\";\n}\n.glyphicon-sort-by-attributes:before {\n  content: \"\\e155\";\n}\n.glyphicon-sort-by-attributes-alt:before {\n  content: \"\\e156\";\n}\n.glyphicon-unchecked:before {\n  content: \"\\e157\";\n}\n.glyphicon-expand:before {\n  content: \"\\e158\";\n}\n.glyphicon-collapse-down:before {\n  content: \"\\e159\";\n}\n.glyphicon-collapse-up:before {\n  content: \"\\e160\";\n}\n.glyphicon-log-in:before {\n  content: \"\\e161\";\n}\n.glyphicon-flash:before {\n  content: \"\\e162\";\n}\n.glyphicon-log-out:before {\n  content: \"\\e163\";\n}\n.glyphicon-new-window:before {\n  content: \"\\e164\";\n}\n.glyphicon-record:before {\n  content: \"\\e165\";\n}\n.glyphicon-save:before {\n  content: \"\\e166\";\n}\n.glyphicon-open:before {\n  content: \"\\e167\";\n}\n.glyphicon-saved:before {\n  content: \"\\e168\";\n}\n.glyphicon-import:before {\n  content: \"\\e169\";\n}\n.glyphicon-export:before {\n  content: \"\\e170\";\n}\n.glyphicon-send:before {\n  content: \"\\e171\";\n}\n.glyphicon-floppy-disk:before {\n  content: \"\\e172\";\n}\n.glyphicon-floppy-saved:before {\n  content: \"\\e173\";\n}\n.glyphicon-floppy-remove:before {\n  content: \"\\e174\";\n}\n.glyphicon-floppy-save:before {\n  content: \"\\e175\";\n}\n.glyphicon-floppy-open:before {\n  content: \"\\e176\";\n}\n.glyphicon-credit-card:before {\n  content: \"\\e177\";\n}\n.glyphicon-transfer:before {\n  content: \"\\e178\";\n}\n.glyphicon-cutlery:before {\n  content: \"\\e179\";\n}\n.glyphicon-header:before {\n  content: \"\\e180\";\n}\n.glyphicon-compressed:before {\n  content: \"\\e181\";\n}\n.glyphicon-earphone:before {\n  content: \"\\e182\";\n}\n.glyphicon-phone-alt:before {\n  content: \"\\e183\";\n}\n.glyphicon-tower:before {\n  content: \"\\e184\";\n}\n.glyphicon-stats:before {\n  content: \"\\e185\";\n}\n.glyphicon-sd-video:before {\n  content: \"\\e186\";\n}\n.glyphicon-hd-video:before {\n  content: \"\\e187\";\n}\n.glyphicon-subtitles:before {\n  content: \"\\e188\";\n}\n.glyphicon-sound-stereo:before {\n  content: \"\\e189\";\n}\n.glyphicon-sound-dolby:before {\n  content: \"\\e190\";\n}\n.glyphicon-sound-5-1:before {\n  content: \"\\e191\";\n}\n.glyphicon-sound-6-1:before {\n  content: \"\\e192\";\n}\n.glyphicon-sound-7-1:before {\n  content: \"\\e193\";\n}\n.glyphicon-copyright-mark:before {\n  content: \"\\e194\";\n}\n.glyphicon-registration-mark:before {\n  content: \"\\e195\";\n}\n.glyphicon-cloud-download:before {\n  content: \"\\e197\";\n}\n.glyphicon-cloud-upload:before {\n  content: \"\\e198\";\n}\n.glyphicon-tree-conifer:before {\n  content: \"\\e199\";\n}\n.glyphicon-tree-deciduous:before {\n  content: \"\\e200\";\n}\n.glyphicon-cd:before {\n  content: \"\\e201\";\n}\n.glyphicon-save-file:before {\n  content: \"\\e202\";\n}\n.glyphicon-open-file:before {\n  content: \"\\e203\";\n}\n.glyphicon-level-up:before {\n  content: \"\\e204\";\n}\n.glyphicon-copy:before {\n  content: \"\\e205\";\n}\n.glyphicon-paste:before {\n  content: \"\\e206\";\n}\n.glyphicon-alert:before {\n  content: \"\\e209\";\n}\n.glyphicon-equalizer:before {\n  content: \"\\e210\";\n}\n.glyphicon-king:before {\n  content: \"\\e211\";\n}\n.glyphicon-queen:before {\n  content: \"\\e212\";\n}\n.glyphicon-pawn:before {\n  content: \"\\e213\";\n}\n.glyphicon-bishop:before {\n  content: \"\\e214\";\n}\n.glyphicon-knight:before {\n  content: \"\\e215\";\n}\n.glyphicon-baby-formula:before {\n  content: \"\\e216\";\n}\n.glyphicon-tent:before {\n  content: \"\\26fa\";\n}\n.glyphicon-blackboard:before {\n  content: \"\\e218\";\n}\n.glyphicon-bed:before {\n  content: \"\\e219\";\n}\n.glyphicon-apple:before {\n  content: \"\\f8ff\";\n}\n.glyphicon-erase:before {\n  content: \"\\e221\";\n}\n.glyphicon-hourglass:before {\n  content: \"\\231b\";\n}\n.glyphicon-lamp:before {\n  content: \"\\e223\";\n}\n.glyphicon-duplicate:before {\n  content: \"\\e224\";\n}\n.glyphicon-piggy-bank:before {\n  content: \"\\e225\";\n}\n.glyphicon-scissors:before {\n  content: \"\\e226\";\n}\n.glyphicon-bitcoin:before {\n  content: \"\\e227\";\n}\n.glyphicon-yen:before {\n  content: \"\\00a5\";\n}\n.glyphicon-ruble:before {\n  content: \"\\20bd\";\n}\n.glyphicon-scale:before {\n  content: \"\\e230\";\n}\n.glyphicon-ice-lolly:before {\n  content: \"\\e231\";\n}\n.glyphicon-ice-lolly-tasted:before {\n  content: \"\\e232\";\n}\n.glyphicon-education:before {\n  content: \"\\e233\";\n}\n.glyphicon-option-horizontal:before {\n  content: \"\\e234\";\n}\n.glyphicon-option-vertical:before {\n  content: \"\\e235\";\n}\n.glyphicon-menu-hamburger:before {\n  content: \"\\e236\";\n}\n.glyphicon-modal-window:before {\n  content: \"\\e237\";\n}\n.glyphicon-oil:before {\n  content: \"\\e238\";\n}\n.glyphicon-grain:before {\n  content: \"\\e239\";\n}\n.glyphicon-sunglasses:before {\n  content: \"\\e240\";\n}\n.glyphicon-text-size:before {\n  content: \"\\e241\";\n}\n.glyphicon-text-color:before {\n  content: \"\\e242\";\n}\n.glyphicon-text-background:before {\n  content: \"\\e243\";\n}\n.glyphicon-object-align-top:before {\n  content: \"\\e244\";\n}\n.glyphicon-object-align-bottom:before {\n  content: \"\\e245\";\n}\n.glyphicon-object-align-horizontal:before {\n  content: \"\\e246\";\n}\n.glyphicon-object-align-left:before {\n  content: \"\\e247\";\n}\n.glyphicon-object-align-vertical:before {\n  content: \"\\e248\";\n}\n.glyphicon-object-align-right:before {\n  content: \"\\e249\";\n}\n.glyphicon-triangle-right:before {\n  content: \"\\e250\";\n}\n.glyphicon-triangle-left:before {\n  content: \"\\e251\";\n}\n.glyphicon-triangle-bottom:before {\n  content: \"\\e252\";\n}\n.glyphicon-triangle-top:before {\n  content: \"\\e253\";\n}\n.glyphicon-console:before {\n  content: \"\\e254\";\n}\n.glyphicon-superscript:before {\n  content: \"\\e255\";\n}\n.glyphicon-subscript:before {\n  content: \"\\e256\";\n}\n.glyphicon-menu-left:before {\n  content: \"\\e257\";\n}\n.glyphicon-menu-right:before {\n  content: \"\\e258\";\n}\n.glyphicon-menu-down:before {\n  content: \"\\e259\";\n}\n.glyphicon-menu-up:before {\n  content: \"\\e260\";\n}\n* {\n  -webkit-box-sizing: border-box;\n     -moz-box-sizing: border-box;\n          box-sizing: border-box;\n}\n*:before,\n*:after {\n  -webkit-box-sizing: border-box;\n     -moz-box-sizing: border-box;\n          box-sizing: border-box;\n}\nhtml {\n  font-size: 10px;\n\n  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\nbody {\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #333;\n  background-color: #fff;\n}\ninput,\nbutton,\nselect,\ntextarea {\n  font-family: inherit;\n  font-size: inherit;\n  line-height: inherit;\n}\na {\n  color: #337ab7;\n  text-decoration: none;\n}\na:hover,\na:focus {\n  color: #23527c;\n  text-decoration: underline;\n}\na:focus {\n  outline: thin dotted;\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\nfigure {\n  margin: 0;\n}\nimg {\n  vertical-align: middle;\n}\n.img-responsive,\n.thumbnail > img,\n.thumbnail a > img,\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  display: block;\n  max-width: 100%;\n  height: auto;\n}\n.img-rounded {\n  border-radius: 6px;\n}\n.img-thumbnail {\n  display: inline-block;\n  max-width: 100%;\n  height: auto;\n  padding: 4px;\n  line-height: 1.42857143;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: all .2s ease-in-out;\n       -o-transition: all .2s ease-in-out;\n          transition: all .2s ease-in-out;\n}\n.img-circle {\n  border-radius: 50%;\n}\nhr {\n  margin-top: 20px;\n  margin-bottom: 20px;\n  border: 0;\n  border-top: 1px solid #eee;\n}\n.sr-only {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  padding: 0;\n  margin: -1px;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n}\n.sr-only-focusable:active,\n.sr-only-focusable:focus {\n  position: static;\n  width: auto;\n  height: auto;\n  margin: 0;\n  overflow: visible;\n  clip: auto;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\n.h1,\n.h2,\n.h3,\n.h4,\n.h5,\n.h6 {\n  font-family: inherit;\n  font-weight: 500;\n  line-height: 1.1;\n  color: inherit;\n}\nh1 small,\nh2 small,\nh3 small,\nh4 small,\nh5 small,\nh6 small,\n.h1 small,\n.h2 small,\n.h3 small,\n.h4 small,\n.h5 small,\n.h6 small,\nh1 .small,\nh2 .small,\nh3 .small,\nh4 .small,\nh5 .small,\nh6 .small,\n.h1 .small,\n.h2 .small,\n.h3 .small,\n.h4 .small,\n.h5 .small,\n.h6 .small {\n  font-weight: normal;\n  line-height: 1;\n  color: #777;\n}\nh1,\n.h1,\nh2,\n.h2,\nh3,\n.h3 {\n  margin-top: 20px;\n  margin-bottom: 10px;\n}\nh1 small,\n.h1 small,\nh2 small,\n.h2 small,\nh3 small,\n.h3 small,\nh1 .small,\n.h1 .small,\nh2 .small,\n.h2 .small,\nh3 .small,\n.h3 .small {\n  font-size: 65%;\n}\nh4,\n.h4,\nh5,\n.h5,\nh6,\n.h6 {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\nh4 small,\n.h4 small,\nh5 small,\n.h5 small,\nh6 small,\n.h6 small,\nh4 .small,\n.h4 .small,\nh5 .small,\n.h5 .small,\nh6 .small,\n.h6 .small {\n  font-size: 75%;\n}\nh1,\n.h1 {\n  font-size: 36px;\n}\nh2,\n.h2 {\n  font-size: 30px;\n}\nh3,\n.h3 {\n  font-size: 24px;\n}\nh4,\n.h4 {\n  font-size: 18px;\n}\nh5,\n.h5 {\n  font-size: 14px;\n}\nh6,\n.h6 {\n  font-size: 12px;\n}\np {\n  margin: 0 0 10px;\n}\n.lead {\n  margin-bottom: 20px;\n  font-size: 16px;\n  font-weight: 300;\n  line-height: 1.4;\n}\n@media (min-width: 768px) {\n  .lead {\n    font-size: 21px;\n  }\n}\nsmall,\n.small {\n  font-size: 85%;\n}\nmark,\n.mark {\n  padding: .2em;\n  background-color: #fcf8e3;\n}\n.text-left {\n  text-align: left;\n}\n.text-right {\n  text-align: right;\n}\n.text-center {\n  text-align: center;\n}\n.text-justify {\n  text-align: justify;\n}\n.text-nowrap {\n  white-space: nowrap;\n}\n.text-lowercase {\n  text-transform: lowercase;\n}\n.text-uppercase {\n  text-transform: uppercase;\n}\n.text-capitalize {\n  text-transform: capitalize;\n}\n.text-muted {\n  color: #777;\n}\n.text-primary {\n  color: #337ab7;\n}\na.text-primary:hover {\n  color: #286090;\n}\n.text-success {\n  color: #3c763d;\n}\na.text-success:hover {\n  color: #2b542c;\n}\n.text-info {\n  color: #31708f;\n}\na.text-info:hover {\n  color: #245269;\n}\n.text-warning {\n  color: #8a6d3b;\n}\na.text-warning:hover {\n  color: #66512c;\n}\n.text-danger {\n  color: #a94442;\n}\na.text-danger:hover {\n  color: #843534;\n}\n.bg-primary {\n  color: #fff;\n  background-color: #337ab7;\n}\na.bg-primary:hover {\n  background-color: #286090;\n}\n.bg-success {\n  background-color: #dff0d8;\n}\na.bg-success:hover {\n  background-color: #c1e2b3;\n}\n.bg-info {\n  background-color: #d9edf7;\n}\na.bg-info:hover {\n  background-color: #afd9ee;\n}\n.bg-warning {\n  background-color: #fcf8e3;\n}\na.bg-warning:hover {\n  background-color: #f7ecb5;\n}\n.bg-danger {\n  background-color: #f2dede;\n}\na.bg-danger:hover {\n  background-color: #e4b9b9;\n}\n.page-header {\n  padding-bottom: 9px;\n  margin: 40px 0 20px;\n  border-bottom: 1px solid #eee;\n}\nul,\nol {\n  margin-top: 0;\n  margin-bottom: 10px;\n}\nul ul,\nol ul,\nul ol,\nol ol {\n  margin-bottom: 0;\n}\n.list-unstyled {\n  padding-left: 0;\n  list-style: none;\n}\n.list-inline {\n  padding-left: 0;\n  margin-left: -5px;\n  list-style: none;\n}\n.list-inline > li {\n  display: inline-block;\n  padding-right: 5px;\n  padding-left: 5px;\n}\ndl {\n  margin-top: 0;\n  margin-bottom: 20px;\n}\ndt,\ndd {\n  line-height: 1.42857143;\n}\ndt {\n  font-weight: bold;\n}\ndd {\n  margin-left: 0;\n}\n@media (min-width: 768px) {\n  .dl-horizontal dt {\n    float: left;\n    width: 160px;\n    overflow: hidden;\n    clear: left;\n    text-align: right;\n    text-overflow: ellipsis;\n    white-space: nowrap;\n  }\n  .dl-horizontal dd {\n    margin-left: 180px;\n  }\n}\nabbr[title],\nabbr[data-original-title] {\n  cursor: help;\n  border-bottom: 1px dotted #777;\n}\n.initialism {\n  font-size: 90%;\n  text-transform: uppercase;\n}\nblockquote {\n  padding: 10px 20px;\n  margin: 0 0 20px;\n  font-size: 17.5px;\n  border-left: 5px solid #eee;\n}\nblockquote p:last-child,\nblockquote ul:last-child,\nblockquote ol:last-child {\n  margin-bottom: 0;\n}\nblockquote footer,\nblockquote small,\nblockquote .small {\n  display: block;\n  font-size: 80%;\n  line-height: 1.42857143;\n  color: #777;\n}\nblockquote footer:before,\nblockquote small:before,\nblockquote .small:before {\n  content: '\\2014 \\00A0';\n}\n.blockquote-reverse,\nblockquote.pull-right {\n  padding-right: 15px;\n  padding-left: 0;\n  text-align: right;\n  border-right: 5px solid #eee;\n  border-left: 0;\n}\n.blockquote-reverse footer:before,\nblockquote.pull-right footer:before,\n.blockquote-reverse small:before,\nblockquote.pull-right small:before,\n.blockquote-reverse .small:before,\nblockquote.pull-right .small:before {\n  content: '';\n}\n.blockquote-reverse footer:after,\nblockquote.pull-right footer:after,\n.blockquote-reverse small:after,\nblockquote.pull-right small:after,\n.blockquote-reverse .small:after,\nblockquote.pull-right .small:after {\n  content: '\\00A0 \\2014';\n}\naddress {\n  margin-bottom: 20px;\n  font-style: normal;\n  line-height: 1.42857143;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: Menlo, Monaco, Consolas, \"Courier New\", monospace;\n}\ncode {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #c7254e;\n  background-color: #f9f2f4;\n  border-radius: 4px;\n}\nkbd {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #fff;\n  background-color: #333;\n  border-radius: 3px;\n  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);\n          box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);\n}\nkbd kbd {\n  padding: 0;\n  font-size: 100%;\n  font-weight: bold;\n  -webkit-box-shadow: none;\n          box-shadow: none;\n}\npre {\n  display: block;\n  padding: 9.5px;\n  margin: 0 0 10px;\n  font-size: 13px;\n  line-height: 1.42857143;\n  color: #333;\n  word-break: break-all;\n  word-wrap: break-word;\n  background-color: #f5f5f5;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n}\npre code {\n  padding: 0;\n  font-size: inherit;\n  color: inherit;\n  white-space: pre-wrap;\n  background-color: transparent;\n  border-radius: 0;\n}\n.pre-scrollable {\n  max-height: 340px;\n  overflow-y: scroll;\n}\n.container {\n  padding-right: 15px;\n  padding-left: 15px;\n  margin-right: auto;\n  margin-left: auto;\n}\n@media (min-width: 768px) {\n  .container {\n    width: 750px;\n  }\n}\n@media (min-width: 992px) {\n  .container {\n    width: 970px;\n  }\n}\n@media (min-width: 1200px) {\n  .container {\n    width: 1170px;\n  }\n}\n.container-fluid {\n  padding-right: 15px;\n  padding-left: 15px;\n  margin-right: auto;\n  margin-left: auto;\n}\n.row {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 {\n  position: relative;\n  min-height: 1px;\n  padding-right: 15px;\n  padding-left: 15px;\n}\n.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 {\n  float: left;\n}\n.col-xs-12 {\n  width: 100%;\n}\n.col-xs-11 {\n  width: 91.66666667%;\n}\n.col-xs-10 {\n  width: 83.33333333%;\n}\n.col-xs-9 {\n  width: 75%;\n}\n.col-xs-8 {\n  width: 66.66666667%;\n}\n.col-xs-7 {\n  width: 58.33333333%;\n}\n.col-xs-6 {\n  width: 50%;\n}\n.col-xs-5 {\n  width: 41.66666667%;\n}\n.col-xs-4 {\n  width: 33.33333333%;\n}\n.col-xs-3 {\n  width: 25%;\n}\n.col-xs-2 {\n  width: 16.66666667%;\n}\n.col-xs-1 {\n  width: 8.33333333%;\n}\n.col-xs-pull-12 {\n  right: 100%;\n}\n.col-xs-pull-11 {\n  right: 91.66666667%;\n}\n.col-xs-pull-10 {\n  right: 83.33333333%;\n}\n.col-xs-pull-9 {\n  right: 75%;\n}\n.col-xs-pull-8 {\n  right: 66.66666667%;\n}\n.col-xs-pull-7 {\n  right: 58.33333333%;\n}\n.col-xs-pull-6 {\n  right: 50%;\n}\n.col-xs-pull-5 {\n  right: 41.66666667%;\n}\n.col-xs-pull-4 {\n  right: 33.33333333%;\n}\n.col-xs-pull-3 {\n  right: 25%;\n}\n.col-xs-pull-2 {\n  right: 16.66666667%;\n}\n.col-xs-pull-1 {\n  right: 8.33333333%;\n}\n.col-xs-pull-0 {\n  right: auto;\n}\n.col-xs-push-12 {\n  left: 100%;\n}\n.col-xs-push-11 {\n  left: 91.66666667%;\n}\n.col-xs-push-10 {\n  left: 83.33333333%;\n}\n.col-xs-push-9 {\n  left: 75%;\n}\n.col-xs-push-8 {\n  left: 66.66666667%;\n}\n.col-xs-push-7 {\n  left: 58.33333333%;\n}\n.col-xs-push-6 {\n  left: 50%;\n}\n.col-xs-push-5 {\n  left: 41.66666667%;\n}\n.col-xs-push-4 {\n  left: 33.33333333%;\n}\n.col-xs-push-3 {\n  left: 25%;\n}\n.col-xs-push-2 {\n  left: 16.66666667%;\n}\n.col-xs-push-1 {\n  left: 8.33333333%;\n}\n.col-xs-push-0 {\n  left: auto;\n}\n.col-xs-offset-12 {\n  margin-left: 100%;\n}\n.col-xs-offset-11 {\n  margin-left: 91.66666667%;\n}\n.col-xs-offset-10 {\n  margin-left: 83.33333333%;\n}\n.col-xs-offset-9 {\n  margin-left: 75%;\n}\n.col-xs-offset-8 {\n  margin-left: 66.66666667%;\n}\n.col-xs-offset-7 {\n  margin-left: 58.33333333%;\n}\n.col-xs-offset-6 {\n  margin-left: 50%;\n}\n.col-xs-offset-5 {\n  margin-left: 41.66666667%;\n}\n.col-xs-offset-4 {\n  margin-left: 33.33333333%;\n}\n.col-xs-offset-3 {\n  margin-left: 25%;\n}\n.col-xs-offset-2 {\n  margin-left: 16.66666667%;\n}\n.col-xs-offset-1 {\n  margin-left: 8.33333333%;\n}\n.col-xs-offset-0 {\n  margin-left: 0;\n}\n@media (min-width: 768px) {\n  .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 {\n    float: left;\n  }\n  .col-sm-12 {\n    width: 100%;\n  }\n  .col-sm-11 {\n    width: 91.66666667%;\n  }\n  .col-sm-10 {\n    width: 83.33333333%;\n  }\n  .col-sm-9 {\n    width: 75%;\n  }\n  .col-sm-8 {\n    width: 66.66666667%;\n  }\n  .col-sm-7 {\n    width: 58.33333333%;\n  }\n  .col-sm-6 {\n    width: 50%;\n  }\n  .col-sm-5 {\n    width: 41.66666667%;\n  }\n  .col-sm-4 {\n    width: 33.33333333%;\n  }\n  .col-sm-3 {\n    width: 25%;\n  }\n  .col-sm-2 {\n    width: 16.66666667%;\n  }\n  .col-sm-1 {\n    width: 8.33333333%;\n  }\n  .col-sm-pull-12 {\n    right: 100%;\n  }\n  .col-sm-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-sm-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-sm-pull-9 {\n    right: 75%;\n  }\n  .col-sm-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-sm-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-sm-pull-6 {\n    right: 50%;\n  }\n  .col-sm-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-sm-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-sm-pull-3 {\n    right: 25%;\n  }\n  .col-sm-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-sm-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-sm-pull-0 {\n    right: auto;\n  }\n  .col-sm-push-12 {\n    left: 100%;\n  }\n  .col-sm-push-11 {\n    left: 91.66666667%;\n  }\n  .col-sm-push-10 {\n    left: 83.33333333%;\n  }\n  .col-sm-push-9 {\n    left: 75%;\n  }\n  .col-sm-push-8 {\n    left: 66.66666667%;\n  }\n  .col-sm-push-7 {\n    left: 58.33333333%;\n  }\n  .col-sm-push-6 {\n    left: 50%;\n  }\n  .col-sm-push-5 {\n    left: 41.66666667%;\n  }\n  .col-sm-push-4 {\n    left: 33.33333333%;\n  }\n  .col-sm-push-3 {\n    left: 25%;\n  }\n  .col-sm-push-2 {\n    left: 16.66666667%;\n  }\n  .col-sm-push-1 {\n    left: 8.33333333%;\n  }\n  .col-sm-push-0 {\n    left: auto;\n  }\n  .col-sm-offset-12 {\n    margin-left: 100%;\n  }\n  .col-sm-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-sm-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-sm-offset-9 {\n    margin-left: 75%;\n  }\n  .col-sm-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-sm-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-sm-offset-6 {\n    margin-left: 50%;\n  }\n  .col-sm-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-sm-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-sm-offset-3 {\n    margin-left: 25%;\n  }\n  .col-sm-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-sm-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-sm-offset-0 {\n    margin-left: 0;\n  }\n}\n@media (min-width: 992px) {\n  .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 {\n    float: left;\n  }\n  .col-md-12 {\n    width: 100%;\n  }\n  .col-md-11 {\n    width: 91.66666667%;\n  }\n  .col-md-10 {\n    width: 83.33333333%;\n  }\n  .col-md-9 {\n    width: 75%;\n  }\n  .col-md-8 {\n    width: 66.66666667%;\n  }\n  .col-md-7 {\n    width: 58.33333333%;\n  }\n  .col-md-6 {\n    width: 50%;\n  }\n  .col-md-5 {\n    width: 41.66666667%;\n  }\n  .col-md-4 {\n    width: 33.33333333%;\n  }\n  .col-md-3 {\n    width: 25%;\n  }\n  .col-md-2 {\n    width: 16.66666667%;\n  }\n  .col-md-1 {\n    width: 8.33333333%;\n  }\n  .col-md-pull-12 {\n    right: 100%;\n  }\n  .col-md-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-md-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-md-pull-9 {\n    right: 75%;\n  }\n  .col-md-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-md-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-md-pull-6 {\n    right: 50%;\n  }\n  .col-md-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-md-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-md-pull-3 {\n    right: 25%;\n  }\n  .col-md-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-md-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-md-pull-0 {\n    right: auto;\n  }\n  .col-md-push-12 {\n    left: 100%;\n  }\n  .col-md-push-11 {\n    left: 91.66666667%;\n  }\n  .col-md-push-10 {\n    left: 83.33333333%;\n  }\n  .col-md-push-9 {\n    left: 75%;\n  }\n  .col-md-push-8 {\n    left: 66.66666667%;\n  }\n  .col-md-push-7 {\n    left: 58.33333333%;\n  }\n  .col-md-push-6 {\n    left: 50%;\n  }\n  .col-md-push-5 {\n    left: 41.66666667%;\n  }\n  .col-md-push-4 {\n    left: 33.33333333%;\n  }\n  .col-md-push-3 {\n    left: 25%;\n  }\n  .col-md-push-2 {\n    left: 16.66666667%;\n  }\n  .col-md-push-1 {\n    left: 8.33333333%;\n  }\n  .col-md-push-0 {\n    left: auto;\n  }\n  .col-md-offset-12 {\n    margin-left: 100%;\n  }\n  .col-md-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-md-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-md-offset-9 {\n    margin-left: 75%;\n  }\n  .col-md-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-md-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-md-offset-6 {\n    margin-left: 50%;\n  }\n  .col-md-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-md-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-md-offset-3 {\n    margin-left: 25%;\n  }\n  .col-md-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-md-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-md-offset-0 {\n    margin-left: 0;\n  }\n}\n@media (min-width: 1200px) {\n  .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 {\n    float: left;\n  }\n  .col-lg-12 {\n    width: 100%;\n  }\n  .col-lg-11 {\n    width: 91.66666667%;\n  }\n  .col-lg-10 {\n    width: 83.33333333%;\n  }\n  .col-lg-9 {\n    width: 75%;\n  }\n  .col-lg-8 {\n    width: 66.66666667%;\n  }\n  .col-lg-7 {\n    width: 58.33333333%;\n  }\n  .col-lg-6 {\n    width: 50%;\n  }\n  .col-lg-5 {\n    width: 41.66666667%;\n  }\n  .col-lg-4 {\n    width: 33.33333333%;\n  }\n  .col-lg-3 {\n    width: 25%;\n  }\n  .col-lg-2 {\n    width: 16.66666667%;\n  }\n  .col-lg-1 {\n    width: 8.33333333%;\n  }\n  .col-lg-pull-12 {\n    right: 100%;\n  }\n  .col-lg-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-lg-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-lg-pull-9 {\n    right: 75%;\n  }\n  .col-lg-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-lg-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-lg-pull-6 {\n    right: 50%;\n  }\n  .col-lg-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-lg-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-lg-pull-3 {\n    right: 25%;\n  }\n  .col-lg-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-lg-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-lg-pull-0 {\n    right: auto;\n  }\n  .col-lg-push-12 {\n    left: 100%;\n  }\n  .col-lg-push-11 {\n    left: 91.66666667%;\n  }\n  .col-lg-push-10 {\n    left: 83.33333333%;\n  }\n  .col-lg-push-9 {\n    left: 75%;\n  }\n  .col-lg-push-8 {\n    left: 66.66666667%;\n  }\n  .col-lg-push-7 {\n    left: 58.33333333%;\n  }\n  .col-lg-push-6 {\n    left: 50%;\n  }\n  .col-lg-push-5 {\n    left: 41.66666667%;\n  }\n  .col-lg-push-4 {\n    left: 33.33333333%;\n  }\n  .col-lg-push-3 {\n    left: 25%;\n  }\n  .col-lg-push-2 {\n    left: 16.66666667%;\n  }\n  .col-lg-push-1 {\n    left: 8.33333333%;\n  }\n  .col-lg-push-0 {\n    left: auto;\n  }\n  .col-lg-offset-12 {\n    margin-left: 100%;\n  }\n  .col-lg-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-lg-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-lg-offset-9 {\n    margin-left: 75%;\n  }\n  .col-lg-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-lg-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-lg-offset-6 {\n    margin-left: 50%;\n  }\n  .col-lg-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-lg-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-lg-offset-3 {\n    margin-left: 25%;\n  }\n  .col-lg-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-lg-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-lg-offset-0 {\n    margin-left: 0;\n  }\n}\ntable {\n  background-color: transparent;\n}\ncaption {\n  padding-top: 8px;\n  padding-bottom: 8px;\n  color: #777;\n  text-align: left;\n}\nth {\n  text-align: left;\n}\n.table {\n  width: 100%;\n  max-width: 100%;\n  margin-bottom: 20px;\n}\n.table > thead > tr > th,\n.table > tbody > tr > th,\n.table > tfoot > tr > th,\n.table > thead > tr > td,\n.table > tbody > tr > td,\n.table > tfoot > tr > td {\n  padding: 8px;\n  line-height: 1.42857143;\n  vertical-align: top;\n  border-top: 1px solid #ddd;\n}\n.table > thead > tr > th {\n  vertical-align: bottom;\n  border-bottom: 2px solid #ddd;\n}\n.table > caption + thead > tr:first-child > th,\n.table > colgroup + thead > tr:first-child > th,\n.table > thead:first-child > tr:first-child > th,\n.table > caption + thead > tr:first-child > td,\n.table > colgroup + thead > tr:first-child > td,\n.table > thead:first-child > tr:first-child > td {\n  border-top: 0;\n}\n.table > tbody + tbody {\n  border-top: 2px solid #ddd;\n}\n.table .table {\n  background-color: #fff;\n}\n.table-condensed > thead > tr > th,\n.table-condensed > tbody > tr > th,\n.table-condensed > tfoot > tr > th,\n.table-condensed > thead > tr > td,\n.table-condensed > tbody > tr > td,\n.table-condensed > tfoot > tr > td {\n  padding: 5px;\n}\n.table-bordered {\n  border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > tbody > tr > th,\n.table-bordered > tfoot > tr > th,\n.table-bordered > thead > tr > td,\n.table-bordered > tbody > tr > td,\n.table-bordered > tfoot > tr > td {\n  border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > thead > tr > td {\n  border-bottom-width: 2px;\n}\n.table-striped > tbody > tr:nth-of-type(odd) {\n  background-color: #f9f9f9;\n}\n.table-hover > tbody > tr:hover {\n  background-color: #f5f5f5;\n}\ntable col[class*=\"col-\"] {\n  position: static;\n  display: table-column;\n  float: none;\n}\ntable td[class*=\"col-\"],\ntable th[class*=\"col-\"] {\n  position: static;\n  display: table-cell;\n  float: none;\n}\n.table > thead > tr > td.active,\n.table > tbody > tr > td.active,\n.table > tfoot > tr > td.active,\n.table > thead > tr > th.active,\n.table > tbody > tr > th.active,\n.table > tfoot > tr > th.active,\n.table > thead > tr.active > td,\n.table > tbody > tr.active > td,\n.table > tfoot > tr.active > td,\n.table > thead > tr.active > th,\n.table > tbody > tr.active > th,\n.table > tfoot > tr.active > th {\n  background-color: #f5f5f5;\n}\n.table-hover > tbody > tr > td.active:hover,\n.table-hover > tbody > tr > th.active:hover,\n.table-hover > tbody > tr.active:hover > td,\n.table-hover > tbody > tr:hover > .active,\n.table-hover > tbody > tr.active:hover > th {\n  background-color: #e8e8e8;\n}\n.table > thead > tr > td.success,\n.table > tbody > tr > td.success,\n.table > tfoot > tr > td.success,\n.table > thead > tr > th.success,\n.table > tbody > tr > th.success,\n.table > tfoot > tr > th.success,\n.table > thead > tr.success > td,\n.table > tbody > tr.success > td,\n.table > tfoot > tr.success > td,\n.table > thead > tr.success > th,\n.table > tbody > tr.success > th,\n.table > tfoot > tr.success > th {\n  background-color: #dff0d8;\n}\n.table-hover > tbody > tr > td.success:hover,\n.table-hover > tbody > tr > th.success:hover,\n.table-hover > tbody > tr.success:hover > td,\n.table-hover > tbody > tr:hover > .success,\n.table-hover > tbody > tr.success:hover > th {\n  background-color: #d0e9c6;\n}\n.table > thead > tr > td.info,\n.table > tbody > tr > td.info,\n.table > tfoot > tr > td.info,\n.table > thead > tr > th.info,\n.table > tbody > tr > th.info,\n.table > tfoot > tr > th.info,\n.table > thead > tr.info > td,\n.table > tbody > tr.info > td,\n.table > tfoot > tr.info > td,\n.table > thead > tr.info > th,\n.table > tbody > tr.info > th,\n.table > tfoot > tr.info > th {\n  background-color: #d9edf7;\n}\n.table-hover > tbody > tr > td.info:hover,\n.table-hover > tbody > tr > th.info:hover,\n.table-hover > tbody > tr.info:hover > td,\n.table-hover > tbody > tr:hover > .info,\n.table-hover > tbody > tr.info:hover > th {\n  background-color: #c4e3f3;\n}\n.table > thead > tr > td.warning,\n.table > tbody > tr > td.warning,\n.table > tfoot > tr > td.warning,\n.table > thead > tr > th.warning,\n.table > tbody > tr > th.warning,\n.table > tfoot > tr > th.warning,\n.table > thead > tr.warning > td,\n.table > tbody > tr.warning > td,\n.table > tfoot > tr.warning > td,\n.table > thead > tr.warning > th,\n.table > tbody > tr.warning > th,\n.table > tfoot > tr.warning > th {\n  background-color: #fcf8e3;\n}\n.table-hover > tbody > tr > td.warning:hover,\n.table-hover > tbody > tr > th.warning:hover,\n.table-hover > tbody > tr.warning:hover > td,\n.table-hover > tbody > tr:hover > .warning,\n.table-hover > tbody > tr.warning:hover > th {\n  background-color: #faf2cc;\n}\n.table > thead > tr > td.danger,\n.table > tbody > tr > td.danger,\n.table > tfoot > tr > td.danger,\n.table > thead > tr > th.danger,\n.table > tbody > tr > th.danger,\n.table > tfoot > tr > th.danger,\n.table > thead > tr.danger > td,\n.table > tbody > tr.danger > td,\n.table > tfoot > tr.danger > td,\n.table > thead > tr.danger > th,\n.table > tbody > tr.danger > th,\n.table > tfoot > tr.danger > th {\n  background-color: #f2dede;\n}\n.table-hover > tbody > tr > td.danger:hover,\n.table-hover > tbody > tr > th.danger:hover,\n.table-hover > tbody > tr.danger:hover > td,\n.table-hover > tbody > tr:hover > .danger,\n.table-hover > tbody > tr.danger:hover > th {\n  background-color: #ebcccc;\n}\n.table-responsive {\n  min-height: .01%;\n  overflow-x: auto;\n}\n@media screen and (max-width: 767px) {\n  .table-responsive {\n    width: 100%;\n    margin-bottom: 15px;\n    overflow-y: hidden;\n    -ms-overflow-style: -ms-autohiding-scrollbar;\n    border: 1px solid #ddd;\n  }\n  .table-responsive > .table {\n    margin-bottom: 0;\n  }\n  .table-responsive > .table > thead > tr > th,\n  .table-responsive > .table > tbody > tr > th,\n  .table-responsive > .table > tfoot > tr > th,\n  .table-responsive > .table > thead > tr > td,\n  .table-responsive > .table > tbody > tr > td,\n  .table-responsive > .table > tfoot > tr > td {\n    white-space: nowrap;\n  }\n  .table-responsive > .table-bordered {\n    border: 0;\n  }\n  .table-responsive > .table-bordered > thead > tr > th:first-child,\n  .table-responsive > .table-bordered > tbody > tr > th:first-child,\n  .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n  .table-responsive > .table-bordered > thead > tr > td:first-child,\n  .table-responsive > .table-bordered > tbody > tr > td:first-child,\n  .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n    border-left: 0;\n  }\n  .table-responsive > .table-bordered > thead > tr > th:last-child,\n  .table-responsive > .table-bordered > tbody > tr > th:last-child,\n  .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n  .table-responsive > .table-bordered > thead > tr > td:last-child,\n  .table-responsive > .table-bordered > tbody > tr > td:last-child,\n  .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n    border-right: 0;\n  }\n  .table-responsive > .table-bordered > tbody > tr:last-child > th,\n  .table-responsive > .table-bordered > tfoot > tr:last-child > th,\n  .table-responsive > .table-bordered > tbody > tr:last-child > td,\n  .table-responsive > .table-bordered > tfoot > tr:last-child > td {\n    border-bottom: 0;\n  }\n}\nfieldset {\n  min-width: 0;\n  padding: 0;\n  margin: 0;\n  border: 0;\n}\nlegend {\n  display: block;\n  width: 100%;\n  padding: 0;\n  margin-bottom: 20px;\n  font-size: 21px;\n  line-height: inherit;\n  color: #333;\n  border: 0;\n  border-bottom: 1px solid #e5e5e5;\n}\nlabel {\n  display: inline-block;\n  max-width: 100%;\n  margin-bottom: 5px;\n  font-weight: bold;\n}\ninput[type=\"search\"] {\n  -webkit-box-sizing: border-box;\n     -moz-box-sizing: border-box;\n          box-sizing: border-box;\n}\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n  margin: 4px 0 0;\n  margin-top: 1px \\9;\n  line-height: normal;\n}\ninput[type=\"file\"] {\n  display: block;\n}\ninput[type=\"range\"] {\n  display: block;\n  width: 100%;\n}\nselect[multiple],\nselect[size] {\n  height: auto;\n}\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n  outline: thin dotted;\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\noutput {\n  display: block;\n  padding-top: 7px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #555;\n}\n.form-control {\n  display: block;\n  width: 100%;\n  height: 34px;\n  padding: 6px 12px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #555;\n  background-color: #fff;\n  background-image: none;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n  -webkit-transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s;\n       -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n          transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n}\n.form-control:focus {\n  border-color: #66afe9;\n  outline: 0;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);\n          box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);\n}\n.form-control::-moz-placeholder {\n  color: #999;\n  opacity: 1;\n}\n.form-control:-ms-input-placeholder {\n  color: #999;\n}\n.form-control::-webkit-input-placeholder {\n  color: #999;\n}\n.form-control[disabled],\n.form-control[readonly],\nfieldset[disabled] .form-control {\n  cursor: not-allowed;\n  background-color: #eee;\n  opacity: 1;\n}\ntextarea.form-control {\n  height: auto;\n}\ninput[type=\"search\"] {\n  -webkit-appearance: none;\n}\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n  input[type=\"date\"],\n  input[type=\"time\"],\n  input[type=\"datetime-local\"],\n  input[type=\"month\"] {\n    line-height: 34px;\n  }\n  input[type=\"date\"].input-sm,\n  input[type=\"time\"].input-sm,\n  input[type=\"datetime-local\"].input-sm,\n  input[type=\"month\"].input-sm,\n  .input-group-sm input[type=\"date\"],\n  .input-group-sm input[type=\"time\"],\n  .input-group-sm input[type=\"datetime-local\"],\n  .input-group-sm input[type=\"month\"] {\n    line-height: 30px;\n  }\n  input[type=\"date\"].input-lg,\n  input[type=\"time\"].input-lg,\n  input[type=\"datetime-local\"].input-lg,\n  input[type=\"month\"].input-lg,\n  .input-group-lg input[type=\"date\"],\n  .input-group-lg input[type=\"time\"],\n  .input-group-lg input[type=\"datetime-local\"],\n  .input-group-lg input[type=\"month\"] {\n    line-height: 46px;\n  }\n}\n.form-group {\n  margin-bottom: 15px;\n}\n.radio,\n.checkbox {\n  position: relative;\n  display: block;\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\n.radio label,\n.checkbox label {\n  min-height: 20px;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: normal;\n  cursor: pointer;\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n  position: absolute;\n  margin-top: 4px \\9;\n  margin-left: -20px;\n}\n.radio + .radio,\n.checkbox + .checkbox {\n  margin-top: -5px;\n}\n.radio-inline,\n.checkbox-inline {\n  display: inline-block;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: normal;\n  vertical-align: middle;\n  cursor: pointer;\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n  margin-top: 0;\n  margin-left: 10px;\n}\ninput[type=\"radio\"][disabled],\ninput[type=\"checkbox\"][disabled],\ninput[type=\"radio\"].disabled,\ninput[type=\"checkbox\"].disabled,\nfieldset[disabled] input[type=\"radio\"],\nfieldset[disabled] input[type=\"checkbox\"] {\n  cursor: not-allowed;\n}\n.radio-inline.disabled,\n.checkbox-inline.disabled,\nfieldset[disabled] .radio-inline,\nfieldset[disabled] .checkbox-inline {\n  cursor: not-allowed;\n}\n.radio.disabled label,\n.checkbox.disabled label,\nfieldset[disabled] .radio label,\nfieldset[disabled] .checkbox label {\n  cursor: not-allowed;\n}\n.form-control-static {\n  padding-top: 7px;\n  padding-bottom: 7px;\n  margin-bottom: 0;\n}\n.form-control-static.input-lg,\n.form-control-static.input-sm {\n  padding-right: 0;\n  padding-left: 0;\n}\n.input-sm {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.input-sm {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.input-sm,\nselect[multiple].input-sm {\n  height: auto;\n}\n.form-group-sm .form-control {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.form-group-sm .form-control {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.form-group-sm .form-control,\nselect[multiple].form-group-sm .form-control {\n  height: auto;\n}\n.form-group-sm .form-control-static {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n}\n.input-lg {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\nselect.input-lg {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.input-lg,\nselect[multiple].input-lg {\n  height: auto;\n}\n.form-group-lg .form-control {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\nselect.form-group-lg .form-control {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.form-group-lg .form-control,\nselect[multiple].form-group-lg .form-control {\n  height: auto;\n}\n.form-group-lg .form-control-static {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n}\n.has-feedback {\n  position: relative;\n}\n.has-feedback .form-control {\n  padding-right: 42.5px;\n}\n.form-control-feedback {\n  position: absolute;\n  top: 0;\n  right: 0;\n  z-index: 2;\n  display: block;\n  width: 34px;\n  height: 34px;\n  line-height: 34px;\n  text-align: center;\n  pointer-events: none;\n}\n.input-lg + .form-control-feedback {\n  width: 46px;\n  height: 46px;\n  line-height: 46px;\n}\n.input-sm + .form-control-feedback {\n  width: 30px;\n  height: 30px;\n  line-height: 30px;\n}\n.has-success .help-block,\n.has-success .control-label,\n.has-success .radio,\n.has-success .checkbox,\n.has-success .radio-inline,\n.has-success .checkbox-inline,\n.has-success.radio label,\n.has-success.checkbox label,\n.has-success.radio-inline label,\n.has-success.checkbox-inline label {\n  color: #3c763d;\n}\n.has-success .form-control {\n  border-color: #3c763d;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n}\n.has-success .form-control:focus {\n  border-color: #2b542c;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;\n}\n.has-success .input-group-addon {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #3c763d;\n}\n.has-success .form-control-feedback {\n  color: #3c763d;\n}\n.has-warning .help-block,\n.has-warning .control-label,\n.has-warning .radio,\n.has-warning .checkbox,\n.has-warning .radio-inline,\n.has-warning .checkbox-inline,\n.has-warning.radio label,\n.has-warning.checkbox label,\n.has-warning.radio-inline label,\n.has-warning.checkbox-inline label {\n  color: #8a6d3b;\n}\n.has-warning .form-control {\n  border-color: #8a6d3b;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n}\n.has-warning .form-control:focus {\n  border-color: #66512c;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;\n}\n.has-warning .input-group-addon {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #8a6d3b;\n}\n.has-warning .form-control-feedback {\n  color: #8a6d3b;\n}\n.has-error .help-block,\n.has-error .control-label,\n.has-error .radio,\n.has-error .checkbox,\n.has-error .radio-inline,\n.has-error .checkbox-inline,\n.has-error.radio label,\n.has-error.checkbox label,\n.has-error.radio-inline label,\n.has-error.checkbox-inline label {\n  color: #a94442;\n}\n.has-error .form-control {\n  border-color: #a94442;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n}\n.has-error .form-control:focus {\n  border-color: #843534;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;\n}\n.has-error .input-group-addon {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #a94442;\n}\n.has-error .form-control-feedback {\n  color: #a94442;\n}\n.has-feedback label ~ .form-control-feedback {\n  top: 25px;\n}\n.has-feedback label.sr-only ~ .form-control-feedback {\n  top: 0;\n}\n.help-block {\n  display: block;\n  margin-top: 5px;\n  margin-bottom: 10px;\n  color: #737373;\n}\n@media (min-width: 768px) {\n  .form-inline .form-group {\n    display: inline-block;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .form-control {\n    display: inline-block;\n    width: auto;\n    vertical-align: middle;\n  }\n  .form-inline .form-control-static {\n    display: inline-block;\n  }\n  .form-inline .input-group {\n    display: inline-table;\n    vertical-align: middle;\n  }\n  .form-inline .input-group .input-group-addon,\n  .form-inline .input-group .input-group-btn,\n  .form-inline .input-group .form-control {\n    width: auto;\n  }\n  .form-inline .input-group > .form-control {\n    width: 100%;\n  }\n  .form-inline .control-label {\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .radio,\n  .form-inline .checkbox {\n    display: inline-block;\n    margin-top: 0;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .radio label,\n  .form-inline .checkbox label {\n    padding-left: 0;\n  }\n  .form-inline .radio input[type=\"radio\"],\n  .form-inline .checkbox input[type=\"checkbox\"] {\n    position: relative;\n    margin-left: 0;\n  }\n  .form-inline .has-feedback .form-control-feedback {\n    top: 0;\n  }\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox,\n.form-horizontal .radio-inline,\n.form-horizontal .checkbox-inline {\n  padding-top: 7px;\n  margin-top: 0;\n  margin-bottom: 0;\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox {\n  min-height: 27px;\n}\n.form-horizontal .form-group {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n@media (min-width: 768px) {\n  .form-horizontal .control-label {\n    padding-top: 7px;\n    margin-bottom: 0;\n    text-align: right;\n  }\n}\n.form-horizontal .has-feedback .form-control-feedback {\n  right: 15px;\n}\n@media (min-width: 768px) {\n  .form-horizontal .form-group-lg .control-label {\n    padding-top: 14.333333px;\n  }\n}\n@media (min-width: 768px) {\n  .form-horizontal .form-group-sm .control-label {\n    padding-top: 6px;\n  }\n}\n.btn {\n  display: inline-block;\n  padding: 6px 12px;\n  margin-bottom: 0;\n  font-size: 14px;\n  font-weight: normal;\n  line-height: 1.42857143;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  -ms-touch-action: manipulation;\n      touch-action: manipulation;\n  cursor: pointer;\n  -webkit-user-select: none;\n     -moz-user-select: none;\n      -ms-user-select: none;\n          user-select: none;\n  background-image: none;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.btn:focus,\n.btn:active:focus,\n.btn.active:focus,\n.btn.focus,\n.btn:active.focus,\n.btn.active.focus {\n  outline: thin dotted;\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\n.btn:hover,\n.btn:focus,\n.btn.focus {\n  color: #333;\n  text-decoration: none;\n}\n.btn:active,\n.btn.active {\n  background-image: none;\n  outline: 0;\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n          box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n}\n.btn.disabled,\n.btn[disabled],\nfieldset[disabled] .btn {\n  pointer-events: none;\n  cursor: not-allowed;\n  filter: alpha(opacity=65);\n  -webkit-box-shadow: none;\n          box-shadow: none;\n  opacity: .65;\n}\n.btn-default {\n  color: #333;\n  background-color: #fff;\n  border-color: #ccc;\n}\n.btn-default:hover,\n.btn-default:focus,\n.btn-default.focus,\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n  color: #333;\n  background-color: #e6e6e6;\n  border-color: #adadad;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n  background-image: none;\n}\n.btn-default.disabled,\n.btn-default[disabled],\nfieldset[disabled] .btn-default,\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus,\n.btn-default.disabled:active,\n.btn-default[disabled]:active,\nfieldset[disabled] .btn-default:active,\n.btn-default.disabled.active,\n.btn-default[disabled].active,\nfieldset[disabled] .btn-default.active {\n  background-color: #fff;\n  border-color: #ccc;\n}\n.btn-default .badge {\n  color: #fff;\n  background-color: #333;\n}\n.btn-primary {\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #2e6da4;\n}\n.btn-primary:hover,\n.btn-primary:focus,\n.btn-primary.focus,\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n  color: #fff;\n  background-color: #286090;\n  border-color: #204d74;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n  background-image: none;\n}\n.btn-primary.disabled,\n.btn-primary[disabled],\nfieldset[disabled] .btn-primary,\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus,\n.btn-primary.disabled:active,\n.btn-primary[disabled]:active,\nfieldset[disabled] .btn-primary:active,\n.btn-primary.disabled.active,\n.btn-primary[disabled].active,\nfieldset[disabled] .btn-primary.active {\n  background-color: #337ab7;\n  border-color: #2e6da4;\n}\n.btn-primary .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.btn-success {\n  color: #fff;\n  background-color: #5cb85c;\n  border-color: #4cae4c;\n}\n.btn-success:hover,\n.btn-success:focus,\n.btn-success.focus,\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n  color: #fff;\n  background-color: #449d44;\n  border-color: #398439;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n  background-image: none;\n}\n.btn-success.disabled,\n.btn-success[disabled],\nfieldset[disabled] .btn-success,\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus,\n.btn-success.disabled:active,\n.btn-success[disabled]:active,\nfieldset[disabled] .btn-success:active,\n.btn-success.disabled.active,\n.btn-success[disabled].active,\nfieldset[disabled] .btn-success.active {\n  background-color: #5cb85c;\n  border-color: #4cae4c;\n}\n.btn-success .badge {\n  color: #5cb85c;\n  background-color: #fff;\n}\n.btn-info {\n  color: #fff;\n  background-color: #5bc0de;\n  border-color: #46b8da;\n}\n.btn-info:hover,\n.btn-info:focus,\n.btn-info.focus,\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n  color: #fff;\n  background-color: #31b0d5;\n  border-color: #269abc;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n  background-image: none;\n}\n.btn-info.disabled,\n.btn-info[disabled],\nfieldset[disabled] .btn-info,\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus,\n.btn-info.disabled:active,\n.btn-info[disabled]:active,\nfieldset[disabled] .btn-info:active,\n.btn-info.disabled.active,\n.btn-info[disabled].active,\nfieldset[disabled] .btn-info.active {\n  background-color: #5bc0de;\n  border-color: #46b8da;\n}\n.btn-info .badge {\n  color: #5bc0de;\n  background-color: #fff;\n}\n.btn-warning {\n  color: #fff;\n  background-color: #f0ad4e;\n  border-color: #eea236;\n}\n.btn-warning:hover,\n.btn-warning:focus,\n.btn-warning.focus,\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n  color: #fff;\n  background-color: #ec971f;\n  border-color: #d58512;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n  background-image: none;\n}\n.btn-warning.disabled,\n.btn-warning[disabled],\nfieldset[disabled] .btn-warning,\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus,\n.btn-warning.disabled:active,\n.btn-warning[disabled]:active,\nfieldset[disabled] .btn-warning:active,\n.btn-warning.disabled.active,\n.btn-warning[disabled].active,\nfieldset[disabled] .btn-warning.active {\n  background-color: #f0ad4e;\n  border-color: #eea236;\n}\n.btn-warning .badge {\n  color: #f0ad4e;\n  background-color: #fff;\n}\n.btn-danger {\n  color: #fff;\n  background-color: #d9534f;\n  border-color: #d43f3a;\n}\n.btn-danger:hover,\n.btn-danger:focus,\n.btn-danger.focus,\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n  color: #fff;\n  background-color: #c9302c;\n  border-color: #ac2925;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n  background-image: none;\n}\n.btn-danger.disabled,\n.btn-danger[disabled],\nfieldset[disabled] .btn-danger,\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus,\n.btn-danger.disabled:active,\n.btn-danger[disabled]:active,\nfieldset[disabled] .btn-danger:active,\n.btn-danger.disabled.active,\n.btn-danger[disabled].active,\nfieldset[disabled] .btn-danger.active {\n  background-color: #d9534f;\n  border-color: #d43f3a;\n}\n.btn-danger .badge {\n  color: #d9534f;\n  background-color: #fff;\n}\n.btn-link {\n  font-weight: normal;\n  color: #337ab7;\n  border-radius: 0;\n}\n.btn-link,\n.btn-link:active,\n.btn-link.active,\n.btn-link[disabled],\nfieldset[disabled] .btn-link {\n  background-color: transparent;\n  -webkit-box-shadow: none;\n          box-shadow: none;\n}\n.btn-link,\n.btn-link:hover,\n.btn-link:focus,\n.btn-link:active {\n  border-color: transparent;\n}\n.btn-link:hover,\n.btn-link:focus {\n  color: #23527c;\n  text-decoration: underline;\n  background-color: transparent;\n}\n.btn-link[disabled]:hover,\nfieldset[disabled] .btn-link:hover,\n.btn-link[disabled]:focus,\nfieldset[disabled] .btn-link:focus {\n  color: #777;\n  text-decoration: none;\n}\n.btn-lg,\n.btn-group-lg > .btn {\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\n.btn-sm,\n.btn-group-sm > .btn {\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-xs,\n.btn-group-xs > .btn {\n  padding: 1px 5px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-block {\n  display: block;\n  width: 100%;\n}\n.btn-block + .btn-block {\n  margin-top: 5px;\n}\ninput[type=\"submit\"].btn-block,\ninput[type=\"reset\"].btn-block,\ninput[type=\"button\"].btn-block {\n  width: 100%;\n}\n.fade {\n  opacity: 0;\n  -webkit-transition: opacity .15s linear;\n       -o-transition: opacity .15s linear;\n          transition: opacity .15s linear;\n}\n.fade.in {\n  opacity: 1;\n}\n.collapse {\n  display: none;\n  visibility: hidden;\n}\n.collapse.in {\n  display: block;\n  visibility: visible;\n}\ntr.collapse.in {\n  display: table-row;\n}\ntbody.collapse.in {\n  display: table-row-group;\n}\n.collapsing {\n  position: relative;\n  height: 0;\n  overflow: hidden;\n  -webkit-transition-timing-function: ease;\n       -o-transition-timing-function: ease;\n          transition-timing-function: ease;\n  -webkit-transition-duration: .35s;\n       -o-transition-duration: .35s;\n          transition-duration: .35s;\n  -webkit-transition-property: height, visibility;\n       -o-transition-property: height, visibility;\n          transition-property: height, visibility;\n}\n.caret {\n  display: inline-block;\n  width: 0;\n  height: 0;\n  margin-left: 2px;\n  vertical-align: middle;\n  border-top: 4px solid;\n  border-right: 4px solid transparent;\n  border-left: 4px solid transparent;\n}\n.dropup,\n.dropdown {\n  position: relative;\n}\n.dropdown-toggle:focus {\n  outline: 0;\n}\n.dropdown-menu {\n  position: absolute;\n  top: 100%;\n  left: 0;\n  z-index: 1000;\n  display: none;\n  float: left;\n  min-width: 160px;\n  padding: 5px 0;\n  margin: 2px 0 0;\n  font-size: 14px;\n  text-align: left;\n  list-style: none;\n  background-color: #fff;\n  -webkit-background-clip: padding-box;\n          background-clip: padding-box;\n  border: 1px solid #ccc;\n  border: 1px solid rgba(0, 0, 0, .15);\n  border-radius: 4px;\n  -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, .175);\n          box-shadow: 0 6px 12px rgba(0, 0, 0, .175);\n}\n.dropdown-menu.pull-right {\n  right: 0;\n  left: auto;\n}\n.dropdown-menu .divider {\n  height: 1px;\n  margin: 9px 0;\n  overflow: hidden;\n  background-color: #e5e5e5;\n}\n.dropdown-menu > li > a {\n  display: block;\n  padding: 3px 20px;\n  clear: both;\n  font-weight: normal;\n  line-height: 1.42857143;\n  color: #333;\n  white-space: nowrap;\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  color: #262626;\n  text-decoration: none;\n  background-color: #f5f5f5;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  color: #fff;\n  text-decoration: none;\n  background-color: #337ab7;\n  outline: 0;\n}\n.dropdown-menu > .disabled > a,\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n  color: #777;\n}\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n  text-decoration: none;\n  cursor: not-allowed;\n  background-color: transparent;\n  background-image: none;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n}\n.open > .dropdown-menu {\n  display: block;\n}\n.open > a {\n  outline: 0;\n}\n.dropdown-menu-right {\n  right: 0;\n  left: auto;\n}\n.dropdown-menu-left {\n  right: auto;\n  left: 0;\n}\n.dropdown-header {\n  display: block;\n  padding: 3px 20px;\n  font-size: 12px;\n  line-height: 1.42857143;\n  color: #777;\n  white-space: nowrap;\n}\n.dropdown-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 990;\n}\n.pull-right > .dropdown-menu {\n  right: 0;\n  left: auto;\n}\n.dropup .caret,\n.navbar-fixed-bottom .dropdown .caret {\n  content: \"\";\n  border-top: 0;\n  border-bottom: 4px solid;\n}\n.dropup .dropdown-menu,\n.navbar-fixed-bottom .dropdown .dropdown-menu {\n  top: auto;\n  bottom: 100%;\n  margin-bottom: 2px;\n}\n@media (min-width: 768px) {\n  .navbar-right .dropdown-menu {\n    right: 0;\n    left: auto;\n  }\n  .navbar-right .dropdown-menu-left {\n    right: auto;\n    left: 0;\n  }\n}\n.btn-group,\n.btn-group-vertical {\n  position: relative;\n  display: inline-block;\n  vertical-align: middle;\n}\n.btn-group > .btn,\n.btn-group-vertical > .btn {\n  position: relative;\n  float: left;\n}\n.btn-group > .btn:hover,\n.btn-group-vertical > .btn:hover,\n.btn-group > .btn:focus,\n.btn-group-vertical > .btn:focus,\n.btn-group > .btn:active,\n.btn-group-vertical > .btn:active,\n.btn-group > .btn.active,\n.btn-group-vertical > .btn.active {\n  z-index: 2;\n}\n.btn-group .btn + .btn,\n.btn-group .btn + .btn-group,\n.btn-group .btn-group + .btn,\n.btn-group .btn-group + .btn-group {\n  margin-left: -1px;\n}\n.btn-toolbar {\n  margin-left: -5px;\n}\n.btn-toolbar .btn-group,\n.btn-toolbar .input-group {\n  float: left;\n}\n.btn-toolbar > .btn,\n.btn-toolbar > .btn-group,\n.btn-toolbar > .input-group {\n  margin-left: 5px;\n}\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n  border-radius: 0;\n}\n.btn-group > .btn:first-child {\n  margin-left: 0;\n}\n.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group > .btn-group {\n  float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n  outline: 0;\n}\n.btn-group > .btn + .dropdown-toggle {\n  padding-right: 8px;\n  padding-left: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n  padding-right: 12px;\n  padding-left: 12px;\n}\n.btn-group.open .dropdown-toggle {\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n          box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n}\n.btn-group.open .dropdown-toggle.btn-link {\n  -webkit-box-shadow: none;\n          box-shadow: none;\n}\n.btn .caret {\n  margin-left: 0;\n}\n.btn-lg .caret {\n  border-width: 5px 5px 0;\n  border-bottom-width: 0;\n}\n.dropup .btn-lg .caret {\n  border-width: 0 5px 5px;\n}\n.btn-group-vertical > .btn,\n.btn-group-vertical > .btn-group,\n.btn-group-vertical > .btn-group > .btn {\n  display: block;\n  float: none;\n  width: 100%;\n  max-width: 100%;\n}\n.btn-group-vertical > .btn-group > .btn {\n  float: none;\n}\n.btn-group-vertical > .btn + .btn,\n.btn-group-vertical > .btn + .btn-group,\n.btn-group-vertical > .btn-group + .btn,\n.btn-group-vertical > .btn-group + .btn-group {\n  margin-top: -1px;\n  margin-left: 0;\n}\n.btn-group-vertical > .btn:not(:first-child):not(:last-child) {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn:first-child:not(:last-child) {\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn:last-child:not(:first-child) {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n  border-bottom-left-radius: 4px;\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.btn-group-justified {\n  display: table;\n  width: 100%;\n  table-layout: fixed;\n  border-collapse: separate;\n}\n.btn-group-justified > .btn,\n.btn-group-justified > .btn-group {\n  display: table-cell;\n  float: none;\n  width: 1%;\n}\n.btn-group-justified > .btn-group .btn {\n  width: 100%;\n}\n.btn-group-justified > .btn-group .dropdown-menu {\n  left: auto;\n}\n[data-toggle=\"buttons\"] > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn input[type=\"checkbox\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"checkbox\"] {\n  position: absolute;\n  clip: rect(0, 0, 0, 0);\n  pointer-events: none;\n}\n.input-group {\n  position: relative;\n  display: table;\n  border-collapse: separate;\n}\n.input-group[class*=\"col-\"] {\n  float: none;\n  padding-right: 0;\n  padding-left: 0;\n}\n.input-group .form-control {\n  position: relative;\n  z-index: 2;\n  float: left;\n  width: 100%;\n  margin-bottom: 0;\n}\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\nselect.input-group-lg > .form-control,\nselect.input-group-lg > .input-group-addon,\nselect.input-group-lg > .input-group-btn > .btn {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.input-group-lg > .form-control,\ntextarea.input-group-lg > .input-group-addon,\ntextarea.input-group-lg > .input-group-btn > .btn,\nselect[multiple].input-group-lg > .form-control,\nselect[multiple].input-group-lg > .input-group-addon,\nselect[multiple].input-group-lg > .input-group-btn > .btn {\n  height: auto;\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.input-group-sm > .form-control,\nselect.input-group-sm > .input-group-addon,\nselect.input-group-sm > .input-group-btn > .btn {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.input-group-sm > .form-control,\ntextarea.input-group-sm > .input-group-addon,\ntextarea.input-group-sm > .input-group-btn > .btn,\nselect[multiple].input-group-sm > .form-control,\nselect[multiple].input-group-sm > .input-group-addon,\nselect[multiple].input-group-sm > .input-group-btn > .btn {\n  height: auto;\n}\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n  display: table-cell;\n}\n.input-group-addon:not(:first-child):not(:last-child),\n.input-group-btn:not(:first-child):not(:last-child),\n.input-group .form-control:not(:first-child):not(:last-child) {\n  border-radius: 0;\n}\n.input-group-addon,\n.input-group-btn {\n  width: 1%;\n  white-space: nowrap;\n  vertical-align: middle;\n}\n.input-group-addon {\n  padding: 6px 12px;\n  font-size: 14px;\n  font-weight: normal;\n  line-height: 1;\n  color: #555;\n  text-align: center;\n  background-color: #eee;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n}\n.input-group-addon.input-sm {\n  padding: 5px 10px;\n  font-size: 12px;\n  border-radius: 3px;\n}\n.input-group-addon.input-lg {\n  padding: 10px 16px;\n  font-size: 18px;\n  border-radius: 6px;\n}\n.input-group-addon input[type=\"radio\"],\n.input-group-addon input[type=\"checkbox\"] {\n  margin-top: 0;\n}\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.input-group-addon:first-child {\n  border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.input-group-addon:last-child {\n  border-left: 0;\n}\n.input-group-btn {\n  position: relative;\n  font-size: 0;\n  white-space: nowrap;\n}\n.input-group-btn > .btn {\n  position: relative;\n}\n.input-group-btn > .btn + .btn {\n  margin-left: -1px;\n}\n.input-group-btn > .btn:hover,\n.input-group-btn > .btn:focus,\n.input-group-btn > .btn:active {\n  z-index: 2;\n}\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group {\n  margin-right: -1px;\n}\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group {\n  margin-left: -1px;\n}\n.nav {\n  padding-left: 0;\n  margin-bottom: 0;\n  list-style: none;\n}\n.nav > li {\n  position: relative;\n  display: block;\n}\n.nav > li > a {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n}\n.nav > li > a:hover,\n.nav > li > a:focus {\n  text-decoration: none;\n  background-color: #eee;\n}\n.nav > li.disabled > a {\n  color: #777;\n}\n.nav > li.disabled > a:hover,\n.nav > li.disabled > a:focus {\n  color: #777;\n  text-decoration: none;\n  cursor: not-allowed;\n  background-color: transparent;\n}\n.nav .open > a,\n.nav .open > a:hover,\n.nav .open > a:focus {\n  background-color: #eee;\n  border-color: #337ab7;\n}\n.nav .nav-divider {\n  height: 1px;\n  margin: 9px 0;\n  overflow: hidden;\n  background-color: #e5e5e5;\n}\n.nav > li > a > img {\n  max-width: none;\n}\n.nav-tabs {\n  border-bottom: 1px solid #ddd;\n}\n.nav-tabs > li {\n  float: left;\n  margin-bottom: -1px;\n}\n.nav-tabs > li > a {\n  margin-right: 2px;\n  line-height: 1.42857143;\n  border: 1px solid transparent;\n  border-radius: 4px 4px 0 0;\n}\n.nav-tabs > li > a:hover {\n  border-color: #eee #eee #ddd;\n}\n.nav-tabs > li.active > a,\n.nav-tabs > li.active > a:hover,\n.nav-tabs > li.active > a:focus {\n  color: #555;\n  cursor: default;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-bottom-color: transparent;\n}\n.nav-tabs.nav-justified {\n  width: 100%;\n  border-bottom: 0;\n}\n.nav-tabs.nav-justified > li {\n  float: none;\n}\n.nav-tabs.nav-justified > li > a {\n  margin-bottom: 5px;\n  text-align: center;\n}\n.nav-tabs.nav-justified > .dropdown .dropdown-menu {\n  top: auto;\n  left: auto;\n}\n@media (min-width: 768px) {\n  .nav-tabs.nav-justified > li {\n    display: table-cell;\n    width: 1%;\n  }\n  .nav-tabs.nav-justified > li > a {\n    margin-bottom: 0;\n  }\n}\n.nav-tabs.nav-justified > li > a {\n  margin-right: 0;\n  border-radius: 4px;\n}\n.nav-tabs.nav-justified > .active > a,\n.nav-tabs.nav-justified > .active > a:hover,\n.nav-tabs.nav-justified > .active > a:focus {\n  border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n  .nav-tabs.nav-justified > li > a {\n    border-bottom: 1px solid #ddd;\n    border-radius: 4px 4px 0 0;\n  }\n  .nav-tabs.nav-justified > .active > a,\n  .nav-tabs.nav-justified > .active > a:hover,\n  .nav-tabs.nav-justified > .active > a:focus {\n    border-bottom-color: #fff;\n  }\n}\n.nav-pills > li {\n  float: left;\n}\n.nav-pills > li > a {\n  border-radius: 4px;\n}\n.nav-pills > li + li {\n  margin-left: 2px;\n}\n.nav-pills > li.active > a,\n.nav-pills > li.active > a:hover,\n.nav-pills > li.active > a:focus {\n  color: #fff;\n  background-color: #337ab7;\n}\n.nav-stacked > li {\n  float: none;\n}\n.nav-stacked > li + li {\n  margin-top: 2px;\n  margin-left: 0;\n}\n.nav-justified {\n  width: 100%;\n}\n.nav-justified > li {\n  float: none;\n}\n.nav-justified > li > a {\n  margin-bottom: 5px;\n  text-align: center;\n}\n.nav-justified > .dropdown .dropdown-menu {\n  top: auto;\n  left: auto;\n}\n@media (min-width: 768px) {\n  .nav-justified > li {\n    display: table-cell;\n    width: 1%;\n  }\n  .nav-justified > li > a {\n    margin-bottom: 0;\n  }\n}\n.nav-tabs-justified {\n  border-bottom: 0;\n}\n.nav-tabs-justified > li > a {\n  margin-right: 0;\n  border-radius: 4px;\n}\n.nav-tabs-justified > .active > a,\n.nav-tabs-justified > .active > a:hover,\n.nav-tabs-justified > .active > a:focus {\n  border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n  .nav-tabs-justified > li > a {\n    border-bottom: 1px solid #ddd;\n    border-radius: 4px 4px 0 0;\n  }\n  .nav-tabs-justified > .active > a,\n  .nav-tabs-justified > .active > a:hover,\n  .nav-tabs-justified > .active > a:focus {\n    border-bottom-color: #fff;\n  }\n}\n.tab-content > .tab-pane {\n  display: none;\n  visibility: hidden;\n}\n.tab-content > .active {\n  display: block;\n  visibility: visible;\n}\n.nav-tabs .dropdown-menu {\n  margin-top: -1px;\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.navbar {\n  position: relative;\n  min-height: 50px;\n  margin-bottom: 20px;\n  border: 1px solid transparent;\n}\n@media (min-width: 768px) {\n  .navbar {\n    border-radius: 4px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-header {\n    float: left;\n  }\n}\n.navbar-collapse {\n  padding-right: 15px;\n  padding-left: 15px;\n  overflow-x: visible;\n  -webkit-overflow-scrolling: touch;\n  border-top: 1px solid transparent;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1);\n          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1);\n}\n.navbar-collapse.in {\n  overflow-y: auto;\n}\n@media (min-width: 768px) {\n  .navbar-collapse {\n    width: auto;\n    border-top: 0;\n    -webkit-box-shadow: none;\n            box-shadow: none;\n  }\n  .navbar-collapse.collapse {\n    display: block !important;\n    height: auto !important;\n    padding-bottom: 0;\n    overflow: visible !important;\n    visibility: visible !important;\n  }\n  .navbar-collapse.in {\n    overflow-y: visible;\n  }\n  .navbar-fixed-top .navbar-collapse,\n  .navbar-static-top .navbar-collapse,\n  .navbar-fixed-bottom .navbar-collapse {\n    padding-right: 0;\n    padding-left: 0;\n  }\n}\n.navbar-fixed-top .navbar-collapse,\n.navbar-fixed-bottom .navbar-collapse {\n  max-height: 340px;\n}\n@media (max-device-width: 480px) and (orientation: landscape) {\n  .navbar-fixed-top .navbar-collapse,\n  .navbar-fixed-bottom .navbar-collapse {\n    max-height: 200px;\n  }\n}\n.container > .navbar-header,\n.container-fluid > .navbar-header,\n.container > .navbar-collapse,\n.container-fluid > .navbar-collapse {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n@media (min-width: 768px) {\n  .container > .navbar-header,\n  .container-fluid > .navbar-header,\n  .container > .navbar-collapse,\n  .container-fluid > .navbar-collapse {\n    margin-right: 0;\n    margin-left: 0;\n  }\n}\n.navbar-static-top {\n  z-index: 1000;\n  border-width: 0 0 1px;\n}\n@media (min-width: 768px) {\n  .navbar-static-top {\n    border-radius: 0;\n  }\n}\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  position: fixed;\n  right: 0;\n  left: 0;\n  z-index: 1030;\n}\n@media (min-width: 768px) {\n  .navbar-fixed-top,\n  .navbar-fixed-bottom {\n    border-radius: 0;\n  }\n}\n.navbar-fixed-top {\n  top: 0;\n  border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n  bottom: 0;\n  margin-bottom: 0;\n  border-width: 1px 0 0;\n}\n.navbar-brand {\n  float: left;\n  height: 50px;\n  padding: 15px 15px;\n  font-size: 18px;\n  line-height: 20px;\n}\n.navbar-brand:hover,\n.navbar-brand:focus {\n  text-decoration: none;\n}\n.navbar-brand > img {\n  display: block;\n}\n@media (min-width: 768px) {\n  .navbar > .container .navbar-brand,\n  .navbar > .container-fluid .navbar-brand {\n    margin-left: -15px;\n  }\n}\n.navbar-toggle {\n  position: relative;\n  float: right;\n  padding: 9px 10px;\n  margin-top: 8px;\n  margin-right: 15px;\n  margin-bottom: 8px;\n  background-color: transparent;\n  background-image: none;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.navbar-toggle:focus {\n  outline: 0;\n}\n.navbar-toggle .icon-bar {\n  display: block;\n  width: 22px;\n  height: 2px;\n  border-radius: 1px;\n}\n.navbar-toggle .icon-bar + .icon-bar {\n  margin-top: 4px;\n}\n@media (min-width: 768px) {\n  .navbar-toggle {\n    display: none;\n  }\n}\n.navbar-nav {\n  margin: 7.5px -15px;\n}\n.navbar-nav > li > a {\n  padding-top: 10px;\n  padding-bottom: 10px;\n  line-height: 20px;\n}\n@media (max-width: 767px) {\n  .navbar-nav .open .dropdown-menu {\n    position: static;\n    float: none;\n    width: auto;\n    margin-top: 0;\n    background-color: transparent;\n    border: 0;\n    -webkit-box-shadow: none;\n            box-shadow: none;\n  }\n  .navbar-nav .open .dropdown-menu > li > a,\n  .navbar-nav .open .dropdown-menu .dropdown-header {\n    padding: 5px 15px 5px 25px;\n  }\n  .navbar-nav .open .dropdown-menu > li > a {\n    line-height: 20px;\n  }\n  .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-nav .open .dropdown-menu > li > a:focus {\n    background-image: none;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-nav {\n    float: left;\n    margin: 0;\n  }\n  .navbar-nav > li {\n    float: left;\n  }\n  .navbar-nav > li > a {\n    padding-top: 15px;\n    padding-bottom: 15px;\n  }\n}\n.navbar-form {\n  padding: 10px 15px;\n  margin-top: 8px;\n  margin-right: -15px;\n  margin-bottom: 8px;\n  margin-left: -15px;\n  border-top: 1px solid transparent;\n  border-bottom: 1px solid transparent;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);\n          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);\n}\n@media (min-width: 768px) {\n  .navbar-form .form-group {\n    display: inline-block;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .form-control {\n    display: inline-block;\n    width: auto;\n    vertical-align: middle;\n  }\n  .navbar-form .form-control-static {\n    display: inline-block;\n  }\n  .navbar-form .input-group {\n    display: inline-table;\n    vertical-align: middle;\n  }\n  .navbar-form .input-group .input-group-addon,\n  .navbar-form .input-group .input-group-btn,\n  .navbar-form .input-group .form-control {\n    width: auto;\n  }\n  .navbar-form .input-group > .form-control {\n    width: 100%;\n  }\n  .navbar-form .control-label {\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .radio,\n  .navbar-form .checkbox {\n    display: inline-block;\n    margin-top: 0;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .radio label,\n  .navbar-form .checkbox label {\n    padding-left: 0;\n  }\n  .navbar-form .radio input[type=\"radio\"],\n  .navbar-form .checkbox input[type=\"checkbox\"] {\n    position: relative;\n    margin-left: 0;\n  }\n  .navbar-form .has-feedback .form-control-feedback {\n    top: 0;\n  }\n}\n@media (max-width: 767px) {\n  .navbar-form .form-group {\n    margin-bottom: 5px;\n  }\n  .navbar-form .form-group:last-child {\n    margin-bottom: 0;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-form {\n    width: auto;\n    padding-top: 0;\n    padding-bottom: 0;\n    margin-right: 0;\n    margin-left: 0;\n    border: 0;\n    -webkit-box-shadow: none;\n            box-shadow: none;\n  }\n}\n.navbar-nav > li > .dropdown-menu {\n  margin-top: 0;\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n  margin-bottom: 0;\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.navbar-btn {\n  margin-top: 8px;\n  margin-bottom: 8px;\n}\n.navbar-btn.btn-sm {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\n.navbar-btn.btn-xs {\n  margin-top: 14px;\n  margin-bottom: 14px;\n}\n.navbar-text {\n  margin-top: 15px;\n  margin-bottom: 15px;\n}\n@media (min-width: 768px) {\n  .navbar-text {\n    float: left;\n    margin-right: 15px;\n    margin-left: 15px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-left {\n    float: left !important;\n  }\n  .navbar-right {\n    float: right !important;\n    margin-right: -15px;\n  }\n  .navbar-right ~ .navbar-right {\n    margin-right: 0;\n  }\n}\n.navbar-default {\n  background-color: #f8f8f8;\n  border-color: #e7e7e7;\n}\n.navbar-default .navbar-brand {\n  color: #777;\n}\n.navbar-default .navbar-brand:hover,\n.navbar-default .navbar-brand:focus {\n  color: #5e5e5e;\n  background-color: transparent;\n}\n.navbar-default .navbar-text {\n  color: #777;\n}\n.navbar-default .navbar-nav > li > a {\n  color: #777;\n}\n.navbar-default .navbar-nav > li > a:hover,\n.navbar-default .navbar-nav > li > a:focus {\n  color: #333;\n  background-color: transparent;\n}\n.navbar-default .navbar-nav > .active > a,\n.navbar-default .navbar-nav > .active > a:hover,\n.navbar-default .navbar-nav > .active > a:focus {\n  color: #555;\n  background-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .disabled > a,\n.navbar-default .navbar-nav > .disabled > a:hover,\n.navbar-default .navbar-nav > .disabled > a:focus {\n  color: #ccc;\n  background-color: transparent;\n}\n.navbar-default .navbar-toggle {\n  border-color: #ddd;\n}\n.navbar-default .navbar-toggle:hover,\n.navbar-default .navbar-toggle:focus {\n  background-color: #ddd;\n}\n.navbar-default .navbar-toggle .icon-bar {\n  background-color: #888;\n}\n.navbar-default .navbar-collapse,\n.navbar-default .navbar-form {\n  border-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .open > a:hover,\n.navbar-default .navbar-nav > .open > a:focus {\n  color: #555;\n  background-color: #e7e7e7;\n}\n@media (max-width: 767px) {\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a {\n    color: #777;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {\n    color: #333;\n    background-color: transparent;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #555;\n    background-color: #e7e7e7;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n    color: #ccc;\n    background-color: transparent;\n  }\n}\n.navbar-default .navbar-link {\n  color: #777;\n}\n.navbar-default .navbar-link:hover {\n  color: #333;\n}\n.navbar-default .btn-link {\n  color: #777;\n}\n.navbar-default .btn-link:hover,\n.navbar-default .btn-link:focus {\n  color: #333;\n}\n.navbar-default .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-default .btn-link:hover,\n.navbar-default .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-default .btn-link:focus {\n  color: #ccc;\n}\n.navbar-inverse {\n  background-color: #222;\n  border-color: #080808;\n}\n.navbar-inverse .navbar-brand {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-brand:hover,\n.navbar-inverse .navbar-brand:focus {\n  color: #fff;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-text {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a:hover,\n.navbar-inverse .navbar-nav > li > a:focus {\n  color: #fff;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .active > a,\n.navbar-inverse .navbar-nav > .active > a:hover,\n.navbar-inverse .navbar-nav > .active > a:focus {\n  color: #fff;\n  background-color: #080808;\n}\n.navbar-inverse .navbar-nav > .disabled > a,\n.navbar-inverse .navbar-nav > .disabled > a:hover,\n.navbar-inverse .navbar-nav > .disabled > a:focus {\n  color: #444;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-toggle {\n  border-color: #333;\n}\n.navbar-inverse .navbar-toggle:hover,\n.navbar-inverse .navbar-toggle:focus {\n  background-color: #333;\n}\n.navbar-inverse .navbar-toggle .icon-bar {\n  background-color: #fff;\n}\n.navbar-inverse .navbar-collapse,\n.navbar-inverse .navbar-form {\n  border-color: #101010;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .open > a:hover,\n.navbar-inverse .navbar-nav > .open > a:focus {\n  color: #fff;\n  background-color: #080808;\n}\n@media (max-width: 767px) {\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {\n    border-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu .divider {\n    background-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {\n    color: #9d9d9d;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {\n    color: #fff;\n    background-color: transparent;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #fff;\n    background-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n    color: #444;\n    background-color: transparent;\n  }\n}\n.navbar-inverse .navbar-link {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-link:hover {\n  color: #fff;\n}\n.navbar-inverse .btn-link {\n  color: #9d9d9d;\n}\n.navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link:focus {\n  color: #fff;\n}\n.navbar-inverse .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-inverse .btn-link:focus {\n  color: #444;\n}\n.breadcrumb {\n  padding: 8px 15px;\n  margin-bottom: 20px;\n  list-style: none;\n  background-color: #f5f5f5;\n  border-radius: 4px;\n}\n.breadcrumb > li {\n  display: inline-block;\n}\n.breadcrumb > li + li:before {\n  padding: 0 5px;\n  color: #ccc;\n  content: \"/\\00a0\";\n}\n.breadcrumb > .active {\n  color: #777;\n}\n.pagination {\n  display: inline-block;\n  padding-left: 0;\n  margin: 20px 0;\n  border-radius: 4px;\n}\n.pagination > li {\n  display: inline;\n}\n.pagination > li > a,\n.pagination > li > span {\n  position: relative;\n  float: left;\n  padding: 6px 12px;\n  margin-left: -1px;\n  line-height: 1.42857143;\n  color: #337ab7;\n  text-decoration: none;\n  background-color: #fff;\n  border: 1px solid #ddd;\n}\n.pagination > li:first-child > a,\n.pagination > li:first-child > span {\n  margin-left: 0;\n  border-top-left-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.pagination > li:last-child > a,\n.pagination > li:last-child > span {\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 4px;\n}\n.pagination > li > a:hover,\n.pagination > li > span:hover,\n.pagination > li > a:focus,\n.pagination > li > span:focus {\n  color: #23527c;\n  background-color: #eee;\n  border-color: #ddd;\n}\n.pagination > .active > a,\n.pagination > .active > span,\n.pagination > .active > a:hover,\n.pagination > .active > span:hover,\n.pagination > .active > a:focus,\n.pagination > .active > span:focus {\n  z-index: 2;\n  color: #fff;\n  cursor: default;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.pagination > .disabled > span,\n.pagination > .disabled > span:hover,\n.pagination > .disabled > span:focus,\n.pagination > .disabled > a,\n.pagination > .disabled > a:hover,\n.pagination > .disabled > a:focus {\n  color: #777;\n  cursor: not-allowed;\n  background-color: #fff;\n  border-color: #ddd;\n}\n.pagination-lg > li > a,\n.pagination-lg > li > span {\n  padding: 10px 16px;\n  font-size: 18px;\n}\n.pagination-lg > li:first-child > a,\n.pagination-lg > li:first-child > span {\n  border-top-left-radius: 6px;\n  border-bottom-left-radius: 6px;\n}\n.pagination-lg > li:last-child > a,\n.pagination-lg > li:last-child > span {\n  border-top-right-radius: 6px;\n  border-bottom-right-radius: 6px;\n}\n.pagination-sm > li > a,\n.pagination-sm > li > span {\n  padding: 5px 10px;\n  font-size: 12px;\n}\n.pagination-sm > li:first-child > a,\n.pagination-sm > li:first-child > span {\n  border-top-left-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.pagination-sm > li:last-child > a,\n.pagination-sm > li:last-child > span {\n  border-top-right-radius: 3px;\n  border-bottom-right-radius: 3px;\n}\n.pager {\n  padding-left: 0;\n  margin: 20px 0;\n  text-align: center;\n  list-style: none;\n}\n.pager li {\n  display: inline;\n}\n.pager li > a,\n.pager li > span {\n  display: inline-block;\n  padding: 5px 14px;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 15px;\n}\n.pager li > a:hover,\n.pager li > a:focus {\n  text-decoration: none;\n  background-color: #eee;\n}\n.pager .next > a,\n.pager .next > span {\n  float: right;\n}\n.pager .previous > a,\n.pager .previous > span {\n  float: left;\n}\n.pager .disabled > a,\n.pager .disabled > a:hover,\n.pager .disabled > a:focus,\n.pager .disabled > span {\n  color: #777;\n  cursor: not-allowed;\n  background-color: #fff;\n}\n.label {\n  display: inline;\n  padding: .2em .6em .3em;\n  font-size: 75%;\n  font-weight: bold;\n  line-height: 1;\n  color: #fff;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: baseline;\n  border-radius: .25em;\n}\na.label:hover,\na.label:focus {\n  color: #fff;\n  text-decoration: none;\n  cursor: pointer;\n}\n.label:empty {\n  display: none;\n}\n.btn .label {\n  position: relative;\n  top: -1px;\n}\n.label-default {\n  background-color: #777;\n}\n.label-default[href]:hover,\n.label-default[href]:focus {\n  background-color: #5e5e5e;\n}\n.label-primary {\n  background-color: #337ab7;\n}\n.label-primary[href]:hover,\n.label-primary[href]:focus {\n  background-color: #286090;\n}\n.label-success {\n  background-color: #5cb85c;\n}\n.label-success[href]:hover,\n.label-success[href]:focus {\n  background-color: #449d44;\n}\n.label-info {\n  background-color: #5bc0de;\n}\n.label-info[href]:hover,\n.label-info[href]:focus {\n  background-color: #31b0d5;\n}\n.label-warning {\n  background-color: #f0ad4e;\n}\n.label-warning[href]:hover,\n.label-warning[href]:focus {\n  background-color: #ec971f;\n}\n.label-danger {\n  background-color: #d9534f;\n}\n.label-danger[href]:hover,\n.label-danger[href]:focus {\n  background-color: #c9302c;\n}\n.badge {\n  display: inline-block;\n  min-width: 10px;\n  padding: 3px 7px;\n  font-size: 12px;\n  font-weight: bold;\n  line-height: 1;\n  color: #fff;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: baseline;\n  background-color: #777;\n  border-radius: 10px;\n}\n.badge:empty {\n  display: none;\n}\n.btn .badge {\n  position: relative;\n  top: -1px;\n}\n.btn-xs .badge {\n  top: 0;\n  padding: 1px 5px;\n}\na.badge:hover,\na.badge:focus {\n  color: #fff;\n  text-decoration: none;\n  cursor: pointer;\n}\n.list-group-item.active > .badge,\n.nav-pills > .active > a > .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.list-group-item > .badge {\n  float: right;\n}\n.list-group-item > .badge + .badge {\n  margin-right: 5px;\n}\n.nav-pills > li > a > .badge {\n  margin-left: 3px;\n}\n.jumbotron {\n  padding: 30px 15px;\n  margin-bottom: 30px;\n  color: inherit;\n  background-color: #eee;\n}\n.jumbotron h1,\n.jumbotron .h1 {\n  color: inherit;\n}\n.jumbotron p {\n  margin-bottom: 15px;\n  font-size: 21px;\n  font-weight: 200;\n}\n.jumbotron > hr {\n  border-top-color: #d5d5d5;\n}\n.container .jumbotron,\n.container-fluid .jumbotron {\n  border-radius: 6px;\n}\n.jumbotron .container {\n  max-width: 100%;\n}\n@media screen and (min-width: 768px) {\n  .jumbotron {\n    padding: 48px 0;\n  }\n  .container .jumbotron,\n  .container-fluid .jumbotron {\n    padding-right: 60px;\n    padding-left: 60px;\n  }\n  .jumbotron h1,\n  .jumbotron .h1 {\n    font-size: 63px;\n  }\n}\n.thumbnail {\n  display: block;\n  padding: 4px;\n  margin-bottom: 20px;\n  line-height: 1.42857143;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: border .2s ease-in-out;\n       -o-transition: border .2s ease-in-out;\n          transition: border .2s ease-in-out;\n}\n.thumbnail > img,\n.thumbnail a > img {\n  margin-right: auto;\n  margin-left: auto;\n}\na.thumbnail:hover,\na.thumbnail:focus,\na.thumbnail.active {\n  border-color: #337ab7;\n}\n.thumbnail .caption {\n  padding: 9px;\n  color: #333;\n}\n.alert {\n  padding: 15px;\n  margin-bottom: 20px;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.alert h4 {\n  margin-top: 0;\n  color: inherit;\n}\n.alert .alert-link {\n  font-weight: bold;\n}\n.alert > p,\n.alert > ul {\n  margin-bottom: 0;\n}\n.alert > p + p {\n  margin-top: 5px;\n}\n.alert-dismissable,\n.alert-dismissible {\n  padding-right: 35px;\n}\n.alert-dismissable .close,\n.alert-dismissible .close {\n  position: relative;\n  top: -2px;\n  right: -21px;\n  color: inherit;\n}\n.alert-success {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #d6e9c6;\n}\n.alert-success hr {\n  border-top-color: #c9e2b3;\n}\n.alert-success .alert-link {\n  color: #2b542c;\n}\n.alert-info {\n  color: #31708f;\n  background-color: #d9edf7;\n  border-color: #bce8f1;\n}\n.alert-info hr {\n  border-top-color: #a6e1ec;\n}\n.alert-info .alert-link {\n  color: #245269;\n}\n.alert-warning {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #faebcc;\n}\n.alert-warning hr {\n  border-top-color: #f7e1b5;\n}\n.alert-warning .alert-link {\n  color: #66512c;\n}\n.alert-danger {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #ebccd1;\n}\n.alert-danger hr {\n  border-top-color: #e4b9c0;\n}\n.alert-danger .alert-link {\n  color: #843534;\n}\n@-webkit-keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n@-o-keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n@keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n.progress {\n  height: 20px;\n  margin-bottom: 20px;\n  overflow: hidden;\n  background-color: #f5f5f5;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1);\n          box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1);\n}\n.progress-bar {\n  float: left;\n  width: 0;\n  height: 100%;\n  font-size: 12px;\n  line-height: 20px;\n  color: #fff;\n  text-align: center;\n  background-color: #337ab7;\n  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15);\n          box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15);\n  -webkit-transition: width .6s ease;\n       -o-transition: width .6s ease;\n          transition: width .6s ease;\n}\n.progress-striped .progress-bar,\n.progress-bar-striped {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:      -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  -webkit-background-size: 40px 40px;\n          background-size: 40px 40px;\n}\n.progress.active .progress-bar,\n.progress-bar.active {\n  -webkit-animation: progress-bar-stripes 2s linear infinite;\n       -o-animation: progress-bar-stripes 2s linear infinite;\n          animation: progress-bar-stripes 2s linear infinite;\n}\n.progress-bar-success {\n  background-color: #5cb85c;\n}\n.progress-striped .progress-bar-success {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:      -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.progress-bar-info {\n  background-color: #5bc0de;\n}\n.progress-striped .progress-bar-info {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:      -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.progress-bar-warning {\n  background-color: #f0ad4e;\n}\n.progress-striped .progress-bar-warning {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:      -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.progress-bar-danger {\n  background-color: #d9534f;\n}\n.progress-striped .progress-bar-danger {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:      -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.media {\n  margin-top: 15px;\n}\n.media:first-child {\n  margin-top: 0;\n}\n.media,\n.media-body {\n  overflow: hidden;\n  zoom: 1;\n}\n.media-body {\n  width: 10000px;\n}\n.media-object {\n  display: block;\n}\n.media-right,\n.media > .pull-right {\n  padding-left: 10px;\n}\n.media-left,\n.media > .pull-left {\n  padding-right: 10px;\n}\n.media-left,\n.media-right,\n.media-body {\n  display: table-cell;\n  vertical-align: top;\n}\n.media-middle {\n  vertical-align: middle;\n}\n.media-bottom {\n  vertical-align: bottom;\n}\n.media-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.media-list {\n  padding-left: 0;\n  list-style: none;\n}\n.list-group {\n  padding-left: 0;\n  margin-bottom: 20px;\n}\n.list-group-item {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n  margin-bottom: -1px;\n  background-color: #fff;\n  border: 1px solid #ddd;\n}\n.list-group-item:first-child {\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n}\n.list-group-item:last-child {\n  margin-bottom: 0;\n  border-bottom-right-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\na.list-group-item {\n  color: #555;\n}\na.list-group-item .list-group-item-heading {\n  color: #333;\n}\na.list-group-item:hover,\na.list-group-item:focus {\n  color: #555;\n  text-decoration: none;\n  background-color: #f5f5f5;\n}\n.list-group-item.disabled,\n.list-group-item.disabled:hover,\n.list-group-item.disabled:focus {\n  color: #777;\n  cursor: not-allowed;\n  background-color: #eee;\n}\n.list-group-item.disabled .list-group-item-heading,\n.list-group-item.disabled:hover .list-group-item-heading,\n.list-group-item.disabled:focus .list-group-item-heading {\n  color: inherit;\n}\n.list-group-item.disabled .list-group-item-text,\n.list-group-item.disabled:hover .list-group-item-text,\n.list-group-item.disabled:focus .list-group-item-text {\n  color: #777;\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  z-index: 2;\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.list-group-item.active .list-group-item-heading,\n.list-group-item.active:hover .list-group-item-heading,\n.list-group-item.active:focus .list-group-item-heading,\n.list-group-item.active .list-group-item-heading > small,\n.list-group-item.active:hover .list-group-item-heading > small,\n.list-group-item.active:focus .list-group-item-heading > small,\n.list-group-item.active .list-group-item-heading > .small,\n.list-group-item.active:hover .list-group-item-heading > .small,\n.list-group-item.active:focus .list-group-item-heading > .small {\n  color: inherit;\n}\n.list-group-item.active .list-group-item-text,\n.list-group-item.active:hover .list-group-item-text,\n.list-group-item.active:focus .list-group-item-text {\n  color: #c7ddef;\n}\n.list-group-item-success {\n  color: #3c763d;\n  background-color: #dff0d8;\n}\na.list-group-item-success {\n  color: #3c763d;\n}\na.list-group-item-success .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-success:hover,\na.list-group-item-success:focus {\n  color: #3c763d;\n  background-color: #d0e9c6;\n}\na.list-group-item-success.active,\na.list-group-item-success.active:hover,\na.list-group-item-success.active:focus {\n  color: #fff;\n  background-color: #3c763d;\n  border-color: #3c763d;\n}\n.list-group-item-info {\n  color: #31708f;\n  background-color: #d9edf7;\n}\na.list-group-item-info {\n  color: #31708f;\n}\na.list-group-item-info .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-info:hover,\na.list-group-item-info:focus {\n  color: #31708f;\n  background-color: #c4e3f3;\n}\na.list-group-item-info.active,\na.list-group-item-info.active:hover,\na.list-group-item-info.active:focus {\n  color: #fff;\n  background-color: #31708f;\n  border-color: #31708f;\n}\n.list-group-item-warning {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n}\na.list-group-item-warning {\n  color: #8a6d3b;\n}\na.list-group-item-warning .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-warning:hover,\na.list-group-item-warning:focus {\n  color: #8a6d3b;\n  background-color: #faf2cc;\n}\na.list-group-item-warning.active,\na.list-group-item-warning.active:hover,\na.list-group-item-warning.active:focus {\n  color: #fff;\n  background-color: #8a6d3b;\n  border-color: #8a6d3b;\n}\n.list-group-item-danger {\n  color: #a94442;\n  background-color: #f2dede;\n}\na.list-group-item-danger {\n  color: #a94442;\n}\na.list-group-item-danger .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-danger:hover,\na.list-group-item-danger:focus {\n  color: #a94442;\n  background-color: #ebcccc;\n}\na.list-group-item-danger.active,\na.list-group-item-danger.active:hover,\na.list-group-item-danger.active:focus {\n  color: #fff;\n  background-color: #a94442;\n  border-color: #a94442;\n}\n.list-group-item-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.list-group-item-text {\n  margin-bottom: 0;\n  line-height: 1.3;\n}\n.panel {\n  margin-bottom: 20px;\n  background-color: #fff;\n  border: 1px solid transparent;\n  border-radius: 4px;\n  -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, .05);\n          box-shadow: 0 1px 1px rgba(0, 0, 0, .05);\n}\n.panel-body {\n  padding: 15px;\n}\n.panel-heading {\n  padding: 10px 15px;\n  border-bottom: 1px solid transparent;\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel-heading > .dropdown .dropdown-toggle {\n  color: inherit;\n}\n.panel-title {\n  margin-top: 0;\n  margin-bottom: 0;\n  font-size: 16px;\n  color: inherit;\n}\n.panel-title > a,\n.panel-title > small,\n.panel-title > .small,\n.panel-title > small > a,\n.panel-title > .small > a {\n  color: inherit;\n}\n.panel-footer {\n  padding: 10px 15px;\n  background-color: #f5f5f5;\n  border-top: 1px solid #ddd;\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .list-group,\n.panel > .panel-collapse > .list-group {\n  margin-bottom: 0;\n}\n.panel > .list-group .list-group-item,\n.panel > .panel-collapse > .list-group .list-group-item {\n  border-width: 1px 0;\n  border-radius: 0;\n}\n.panel > .list-group:first-child .list-group-item:first-child,\n.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child {\n  border-top: 0;\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .list-group:last-child .list-group-item:last-child,\n.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child {\n  border-bottom: 0;\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel-heading + .list-group .list-group-item:first-child {\n  border-top-width: 0;\n}\n.list-group + .panel-footer {\n  border-top-width: 0;\n}\n.panel > .table,\n.panel > .table-responsive > .table,\n.panel > .panel-collapse > .table {\n  margin-bottom: 0;\n}\n.panel > .table caption,\n.panel > .table-responsive > .table caption,\n.panel > .panel-collapse > .table caption {\n  padding-right: 15px;\n  padding-left: 15px;\n}\n.panel > .table:first-child,\n.panel > .table-responsive:first-child > .table:first-child {\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child {\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {\n  border-top-left-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {\n  border-top-right-radius: 3px;\n}\n.panel > .table:last-child,\n.panel > .table-responsive:last-child > .table:last-child {\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child {\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {\n  border-bottom-right-radius: 3px;\n}\n.panel > .panel-body + .table,\n.panel > .panel-body + .table-responsive,\n.panel > .table + .panel-body,\n.panel > .table-responsive + .panel-body {\n  border-top: 1px solid #ddd;\n}\n.panel > .table > tbody:first-child > tr:first-child th,\n.panel > .table > tbody:first-child > tr:first-child td {\n  border-top: 0;\n}\n.panel > .table-bordered,\n.panel > .table-responsive > .table-bordered {\n  border: 0;\n}\n.panel > .table-bordered > thead > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,\n.panel > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-bordered > thead > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,\n.panel > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-bordered > tfoot > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n  border-left: 0;\n}\n.panel > .table-bordered > thead > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,\n.panel > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-bordered > thead > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,\n.panel > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-bordered > tfoot > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n  border-right: 0;\n}\n.panel > .table-bordered > thead > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,\n.panel > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-bordered > thead > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,\n.panel > .table-bordered > tbody > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th {\n  border-bottom: 0;\n}\n.panel > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-bordered > tfoot > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th {\n  border-bottom: 0;\n}\n.panel > .table-responsive {\n  margin-bottom: 0;\n  border: 0;\n}\n.panel-group {\n  margin-bottom: 20px;\n}\n.panel-group .panel {\n  margin-bottom: 0;\n  border-radius: 4px;\n}\n.panel-group .panel + .panel {\n  margin-top: 5px;\n}\n.panel-group .panel-heading {\n  border-bottom: 0;\n}\n.panel-group .panel-heading + .panel-collapse > .panel-body,\n.panel-group .panel-heading + .panel-collapse > .list-group {\n  border-top: 1px solid #ddd;\n}\n.panel-group .panel-footer {\n  border-top: 0;\n}\n.panel-group .panel-footer + .panel-collapse .panel-body {\n  border-bottom: 1px solid #ddd;\n}\n.panel-default {\n  border-color: #ddd;\n}\n.panel-default > .panel-heading {\n  color: #333;\n  background-color: #f5f5f5;\n  border-color: #ddd;\n}\n.panel-default > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #ddd;\n}\n.panel-default > .panel-heading .badge {\n  color: #f5f5f5;\n  background-color: #333;\n}\n.panel-default > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #ddd;\n}\n.panel-primary {\n  border-color: #337ab7;\n}\n.panel-primary > .panel-heading {\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.panel-primary > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #337ab7;\n}\n.panel-primary > .panel-heading .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.panel-primary > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #337ab7;\n}\n.panel-success {\n  border-color: #d6e9c6;\n}\n.panel-success > .panel-heading {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #d6e9c6;\n}\n.panel-success > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #d6e9c6;\n}\n.panel-success > .panel-heading .badge {\n  color: #dff0d8;\n  background-color: #3c763d;\n}\n.panel-success > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #d6e9c6;\n}\n.panel-info {\n  border-color: #bce8f1;\n}\n.panel-info > .panel-heading {\n  color: #31708f;\n  background-color: #d9edf7;\n  border-color: #bce8f1;\n}\n.panel-info > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #bce8f1;\n}\n.panel-info > .panel-heading .badge {\n  color: #d9edf7;\n  background-color: #31708f;\n}\n.panel-info > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #bce8f1;\n}\n.panel-warning {\n  border-color: #faebcc;\n}\n.panel-warning > .panel-heading {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #faebcc;\n}\n.panel-warning > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #faebcc;\n}\n.panel-warning > .panel-heading .badge {\n  color: #fcf8e3;\n  background-color: #8a6d3b;\n}\n.panel-warning > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #faebcc;\n}\n.panel-danger {\n  border-color: #ebccd1;\n}\n.panel-danger > .panel-heading {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #ebccd1;\n}\n.panel-danger > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #ebccd1;\n}\n.panel-danger > .panel-heading .badge {\n  color: #f2dede;\n  background-color: #a94442;\n}\n.panel-danger > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #ebccd1;\n}\n.embed-responsive {\n  position: relative;\n  display: block;\n  height: 0;\n  padding: 0;\n  overflow: hidden;\n}\n.embed-responsive .embed-responsive-item,\n.embed-responsive iframe,\n.embed-responsive embed,\n.embed-responsive object,\n.embed-responsive video {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: 100%;\n  height: 100%;\n  border: 0;\n}\n.embed-responsive.embed-responsive-16by9 {\n  padding-bottom: 56.25%;\n}\n.embed-responsive.embed-responsive-4by3 {\n  padding-bottom: 75%;\n}\n.well {\n  min-height: 20px;\n  padding: 19px;\n  margin-bottom: 20px;\n  background-color: #f5f5f5;\n  border: 1px solid #e3e3e3;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05);\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05);\n}\n.well blockquote {\n  border-color: #ddd;\n  border-color: rgba(0, 0, 0, .15);\n}\n.well-lg {\n  padding: 24px;\n  border-radius: 6px;\n}\n.well-sm {\n  padding: 9px;\n  border-radius: 3px;\n}\n.close {\n  float: right;\n  font-size: 21px;\n  font-weight: bold;\n  line-height: 1;\n  color: #000;\n  text-shadow: 0 1px 0 #fff;\n  filter: alpha(opacity=20);\n  opacity: .2;\n}\n.close:hover,\n.close:focus {\n  color: #000;\n  text-decoration: none;\n  cursor: pointer;\n  filter: alpha(opacity=50);\n  opacity: .5;\n}\nbutton.close {\n  -webkit-appearance: none;\n  padding: 0;\n  cursor: pointer;\n  background: transparent;\n  border: 0;\n}\n.modal-open {\n  overflow: hidden;\n}\n.modal {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 1040;\n  display: none;\n  overflow: hidden;\n  -webkit-overflow-scrolling: touch;\n  outline: 0;\n}\n.modal.fade .modal-dialog {\n  -webkit-transition: -webkit-transform .3s ease-out;\n       -o-transition:      -o-transform .3s ease-out;\n          transition:         transform .3s ease-out;\n  -webkit-transform: translate(0, -25%);\n      -ms-transform: translate(0, -25%);\n       -o-transform: translate(0, -25%);\n          transform: translate(0, -25%);\n}\n.modal.in .modal-dialog {\n  -webkit-transform: translate(0, 0);\n      -ms-transform: translate(0, 0);\n       -o-transform: translate(0, 0);\n          transform: translate(0, 0);\n}\n.modal-open .modal {\n  overflow-x: hidden;\n  overflow-y: auto;\n}\n.modal-dialog {\n  position: relative;\n  width: auto;\n  margin: 10px;\n}\n.modal-content {\n  position: relative;\n  background-color: #fff;\n  -webkit-background-clip: padding-box;\n          background-clip: padding-box;\n  border: 1px solid #999;\n  border: 1px solid rgba(0, 0, 0, .2);\n  border-radius: 6px;\n  outline: 0;\n  -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, .5);\n          box-shadow: 0 3px 9px rgba(0, 0, 0, .5);\n}\n.modal-backdrop {\n  position: absolute;\n  top: 0;\n  right: 0;\n  left: 0;\n  background-color: #000;\n}\n.modal-backdrop.fade {\n  filter: alpha(opacity=0);\n  opacity: 0;\n}\n.modal-backdrop.in {\n  filter: alpha(opacity=50);\n  opacity: .5;\n}\n.modal-header {\n  min-height: 16.42857143px;\n  padding: 15px;\n  border-bottom: 1px solid #e5e5e5;\n}\n.modal-header .close {\n  margin-top: -2px;\n}\n.modal-title {\n  margin: 0;\n  line-height: 1.42857143;\n}\n.modal-body {\n  position: relative;\n  padding: 15px;\n}\n.modal-footer {\n  padding: 15px;\n  text-align: right;\n  border-top: 1px solid #e5e5e5;\n}\n.modal-footer .btn + .btn {\n  margin-bottom: 0;\n  margin-left: 5px;\n}\n.modal-footer .btn-group .btn + .btn {\n  margin-left: -1px;\n}\n.modal-footer .btn-block + .btn-block {\n  margin-left: 0;\n}\n.modal-scrollbar-measure {\n  position: absolute;\n  top: -9999px;\n  width: 50px;\n  height: 50px;\n  overflow: scroll;\n}\n@media (min-width: 768px) {\n  .modal-dialog {\n    width: 600px;\n    margin: 30px auto;\n  }\n  .modal-content {\n    -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, .5);\n            box-shadow: 0 5px 15px rgba(0, 0, 0, .5);\n  }\n  .modal-sm {\n    width: 300px;\n  }\n}\n@media (min-width: 992px) {\n  .modal-lg {\n    width: 900px;\n  }\n}\n.tooltip {\n  position: absolute;\n  z-index: 1070;\n  display: block;\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-size: 12px;\n  font-weight: normal;\n  line-height: 1.4;\n  visibility: visible;\n  filter: alpha(opacity=0);\n  opacity: 0;\n}\n.tooltip.in {\n  filter: alpha(opacity=90);\n  opacity: .9;\n}\n.tooltip.top {\n  padding: 5px 0;\n  margin-top: -3px;\n}\n.tooltip.right {\n  padding: 0 5px;\n  margin-left: 3px;\n}\n.tooltip.bottom {\n  padding: 5px 0;\n  margin-top: 3px;\n}\n.tooltip.left {\n  padding: 0 5px;\n  margin-left: -3px;\n}\n.tooltip-inner {\n  max-width: 200px;\n  padding: 3px 8px;\n  color: #fff;\n  text-align: center;\n  text-decoration: none;\n  background-color: #000;\n  border-radius: 4px;\n}\n.tooltip-arrow {\n  position: absolute;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.tooltip.top .tooltip-arrow {\n  bottom: 0;\n  left: 50%;\n  margin-left: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.top-left .tooltip-arrow {\n  right: 5px;\n  bottom: 0;\n  margin-bottom: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.top-right .tooltip-arrow {\n  bottom: 0;\n  left: 5px;\n  margin-bottom: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.right .tooltip-arrow {\n  top: 50%;\n  left: 0;\n  margin-top: -5px;\n  border-width: 5px 5px 5px 0;\n  border-right-color: #000;\n}\n.tooltip.left .tooltip-arrow {\n  top: 50%;\n  right: 0;\n  margin-top: -5px;\n  border-width: 5px 0 5px 5px;\n  border-left-color: #000;\n}\n.tooltip.bottom .tooltip-arrow {\n  top: 0;\n  left: 50%;\n  margin-left: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip.bottom-left .tooltip-arrow {\n  top: 0;\n  right: 5px;\n  margin-top: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip.bottom-right .tooltip-arrow {\n  top: 0;\n  left: 5px;\n  margin-top: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.popover {\n  position: absolute;\n  top: 0;\n  left: 0;\n  z-index: 1060;\n  display: none;\n  max-width: 276px;\n  padding: 1px;\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-size: 14px;\n  font-weight: normal;\n  line-height: 1.42857143;\n  text-align: left;\n  white-space: normal;\n  background-color: #fff;\n  -webkit-background-clip: padding-box;\n          background-clip: padding-box;\n  border: 1px solid #ccc;\n  border: 1px solid rgba(0, 0, 0, .2);\n  border-radius: 6px;\n  -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, .2);\n          box-shadow: 0 5px 10px rgba(0, 0, 0, .2);\n}\n.popover.top {\n  margin-top: -10px;\n}\n.popover.right {\n  margin-left: 10px;\n}\n.popover.bottom {\n  margin-top: 10px;\n}\n.popover.left {\n  margin-left: -10px;\n}\n.popover-title {\n  padding: 8px 14px;\n  margin: 0;\n  font-size: 14px;\n  background-color: #f7f7f7;\n  border-bottom: 1px solid #ebebeb;\n  border-radius: 5px 5px 0 0;\n}\n.popover-content {\n  padding: 9px 14px;\n}\n.popover > .arrow,\n.popover > .arrow:after {\n  position: absolute;\n  display: block;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.popover > .arrow {\n  border-width: 11px;\n}\n.popover > .arrow:after {\n  content: \"\";\n  border-width: 10px;\n}\n.popover.top > .arrow {\n  bottom: -11px;\n  left: 50%;\n  margin-left: -11px;\n  border-top-color: #999;\n  border-top-color: rgba(0, 0, 0, .25);\n  border-bottom-width: 0;\n}\n.popover.top > .arrow:after {\n  bottom: 1px;\n  margin-left: -10px;\n  content: \" \";\n  border-top-color: #fff;\n  border-bottom-width: 0;\n}\n.popover.right > .arrow {\n  top: 50%;\n  left: -11px;\n  margin-top: -11px;\n  border-right-color: #999;\n  border-right-color: rgba(0, 0, 0, .25);\n  border-left-width: 0;\n}\n.popover.right > .arrow:after {\n  bottom: -10px;\n  left: 1px;\n  content: \" \";\n  border-right-color: #fff;\n  border-left-width: 0;\n}\n.popover.bottom > .arrow {\n  top: -11px;\n  left: 50%;\n  margin-left: -11px;\n  border-top-width: 0;\n  border-bottom-color: #999;\n  border-bottom-color: rgba(0, 0, 0, .25);\n}\n.popover.bottom > .arrow:after {\n  top: 1px;\n  margin-left: -10px;\n  content: \" \";\n  border-top-width: 0;\n  border-bottom-color: #fff;\n}\n.popover.left > .arrow {\n  top: 50%;\n  right: -11px;\n  margin-top: -11px;\n  border-right-width: 0;\n  border-left-color: #999;\n  border-left-color: rgba(0, 0, 0, .25);\n}\n.popover.left > .arrow:after {\n  right: 1px;\n  bottom: -10px;\n  content: \" \";\n  border-right-width: 0;\n  border-left-color: #fff;\n}\n.carousel {\n  position: relative;\n}\n.carousel-inner {\n  position: relative;\n  width: 100%;\n  overflow: hidden;\n}\n.carousel-inner > .item {\n  position: relative;\n  display: none;\n  -webkit-transition: .6s ease-in-out left;\n       -o-transition: .6s ease-in-out left;\n          transition: .6s ease-in-out left;\n}\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  line-height: 1;\n}\n@media all and (transform-3d), (-webkit-transform-3d) {\n  .carousel-inner > .item {\n    -webkit-transition: -webkit-transform .6s ease-in-out;\n         -o-transition:      -o-transform .6s ease-in-out;\n            transition:         transform .6s ease-in-out;\n\n    -webkit-backface-visibility: hidden;\n            backface-visibility: hidden;\n    -webkit-perspective: 1000;\n            perspective: 1000;\n  }\n  .carousel-inner > .item.next,\n  .carousel-inner > .item.active.right {\n    left: 0;\n    -webkit-transform: translate3d(100%, 0, 0);\n            transform: translate3d(100%, 0, 0);\n  }\n  .carousel-inner > .item.prev,\n  .carousel-inner > .item.active.left {\n    left: 0;\n    -webkit-transform: translate3d(-100%, 0, 0);\n            transform: translate3d(-100%, 0, 0);\n  }\n  .carousel-inner > .item.next.left,\n  .carousel-inner > .item.prev.right,\n  .carousel-inner > .item.active {\n    left: 0;\n    -webkit-transform: translate3d(0, 0, 0);\n            transform: translate3d(0, 0, 0);\n  }\n}\n.carousel-inner > .active,\n.carousel-inner > .next,\n.carousel-inner > .prev {\n  display: block;\n}\n.carousel-inner > .active {\n  left: 0;\n}\n.carousel-inner > .next,\n.carousel-inner > .prev {\n  position: absolute;\n  top: 0;\n  width: 100%;\n}\n.carousel-inner > .next {\n  left: 100%;\n}\n.carousel-inner > .prev {\n  left: -100%;\n}\n.carousel-inner > .next.left,\n.carousel-inner > .prev.right {\n  left: 0;\n}\n.carousel-inner > .active.left {\n  left: -100%;\n}\n.carousel-inner > .active.right {\n  left: 100%;\n}\n.carousel-control {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: 15%;\n  font-size: 20px;\n  color: #fff;\n  text-align: center;\n  text-shadow: 0 1px 2px rgba(0, 0, 0, .6);\n  filter: alpha(opacity=50);\n  opacity: .5;\n}\n.carousel-control.left {\n  background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);\n  background-image:      -o-linear-gradient(left, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);\n  background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, .5)), to(rgba(0, 0, 0, .0001)));\n  background-image:         linear-gradient(to right, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);\n  background-repeat: repeat-x;\n}\n.carousel-control.right {\n  right: 0;\n  left: auto;\n  background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);\n  background-image:      -o-linear-gradient(left, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);\n  background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, .0001)), to(rgba(0, 0, 0, .5)));\n  background-image:         linear-gradient(to right, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);\n  background-repeat: repeat-x;\n}\n.carousel-control:hover,\n.carousel-control:focus {\n  color: #fff;\n  text-decoration: none;\n  filter: alpha(opacity=90);\n  outline: 0;\n  opacity: .9;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-left,\n.carousel-control .glyphicon-chevron-right {\n  position: absolute;\n  top: 50%;\n  z-index: 5;\n  display: inline-block;\n}\n.carousel-control .icon-prev,\n.carousel-control .glyphicon-chevron-left {\n  left: 50%;\n  margin-left: -10px;\n}\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-right {\n  right: 50%;\n  margin-right: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next {\n  width: 20px;\n  height: 20px;\n  margin-top: -10px;\n  font-family: serif;\n  line-height: 1;\n}\n.carousel-control .icon-prev:before {\n  content: '\\2039';\n}\n.carousel-control .icon-next:before {\n  content: '\\203a';\n}\n.carousel-indicators {\n  position: absolute;\n  bottom: 10px;\n  left: 50%;\n  z-index: 15;\n  width: 60%;\n  padding-left: 0;\n  margin-left: -30%;\n  text-align: center;\n  list-style: none;\n}\n.carousel-indicators li {\n  display: inline-block;\n  width: 10px;\n  height: 10px;\n  margin: 1px;\n  text-indent: -999px;\n  cursor: pointer;\n  background-color: #000 \\9;\n  background-color: rgba(0, 0, 0, 0);\n  border: 1px solid #fff;\n  border-radius: 10px;\n}\n.carousel-indicators .active {\n  width: 12px;\n  height: 12px;\n  margin: 0;\n  background-color: #fff;\n}\n.carousel-caption {\n  position: absolute;\n  right: 15%;\n  bottom: 20px;\n  left: 15%;\n  z-index: 10;\n  padding-top: 20px;\n  padding-bottom: 20px;\n  color: #fff;\n  text-align: center;\n  text-shadow: 0 1px 2px rgba(0, 0, 0, .6);\n}\n.carousel-caption .btn {\n  text-shadow: none;\n}\n@media screen and (min-width: 768px) {\n  .carousel-control .glyphicon-chevron-left,\n  .carousel-control .glyphicon-chevron-right,\n  .carousel-control .icon-prev,\n  .carousel-control .icon-next {\n    width: 30px;\n    height: 30px;\n    margin-top: -15px;\n    font-size: 30px;\n  }\n  .carousel-control .glyphicon-chevron-left,\n  .carousel-control .icon-prev {\n    margin-left: -15px;\n  }\n  .carousel-control .glyphicon-chevron-right,\n  .carousel-control .icon-next {\n    margin-right: -15px;\n  }\n  .carousel-caption {\n    right: 20%;\n    left: 20%;\n    padding-bottom: 30px;\n  }\n  .carousel-indicators {\n    bottom: 20px;\n  }\n}\n.clearfix:before,\n.clearfix:after,\n.dl-horizontal dd:before,\n.dl-horizontal dd:after,\n.container:before,\n.container:after,\n.container-fluid:before,\n.container-fluid:after,\n.row:before,\n.row:after,\n.form-horizontal .form-group:before,\n.form-horizontal .form-group:after,\n.btn-toolbar:before,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:before,\n.btn-group-vertical > .btn-group:after,\n.nav:before,\n.nav:after,\n.navbar:before,\n.navbar:after,\n.navbar-header:before,\n.navbar-header:after,\n.navbar-collapse:before,\n.navbar-collapse:after,\n.pager:before,\n.pager:after,\n.panel-body:before,\n.panel-body:after,\n.modal-footer:before,\n.modal-footer:after {\n  display: table;\n  content: \" \";\n}\n.clearfix:after,\n.dl-horizontal dd:after,\n.container:after,\n.container-fluid:after,\n.row:after,\n.form-horizontal .form-group:after,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:after,\n.nav:after,\n.navbar:after,\n.navbar-header:after,\n.navbar-collapse:after,\n.pager:after,\n.panel-body:after,\n.modal-footer:after {\n  clear: both;\n}\n.center-block {\n  display: block;\n  margin-right: auto;\n  margin-left: auto;\n}\n.pull-right {\n  float: right !important;\n}\n.pull-left {\n  float: left !important;\n}\n.hide {\n  display: none !important;\n}\n.show {\n  display: block !important;\n}\n.invisible {\n  visibility: hidden;\n}\n.text-hide {\n  font: 0/0 a;\n  color: transparent;\n  text-shadow: none;\n  background-color: transparent;\n  border: 0;\n}\n.hidden {\n  display: none !important;\n  visibility: hidden !important;\n}\n.affix {\n  position: fixed;\n}\n@-ms-viewport {\n  width: device-width;\n}\n.visible-xs,\n.visible-sm,\n.visible-md,\n.visible-lg {\n  display: none !important;\n}\n.visible-xs-block,\n.visible-xs-inline,\n.visible-xs-inline-block,\n.visible-sm-block,\n.visible-sm-inline,\n.visible-sm-inline-block,\n.visible-md-block,\n.visible-md-inline,\n.visible-md-inline-block,\n.visible-lg-block,\n.visible-lg-inline,\n.visible-lg-inline-block {\n  display: none !important;\n}\n@media (max-width: 767px) {\n  .visible-xs {\n    display: block !important;\n  }\n  table.visible-xs {\n    display: table;\n  }\n  tr.visible-xs {\n    display: table-row !important;\n  }\n  th.visible-xs,\n  td.visible-xs {\n    display: table-cell !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-block {\n    display: block !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-inline {\n    display: inline !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm {\n    display: block !important;\n  }\n  table.visible-sm {\n    display: table;\n  }\n  tr.visible-sm {\n    display: table-row !important;\n  }\n  th.visible-sm,\n  td.visible-sm {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-block {\n    display: block !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md {\n    display: block !important;\n  }\n  table.visible-md {\n    display: table;\n  }\n  tr.visible-md {\n    display: table-row !important;\n  }\n  th.visible-md,\n  td.visible-md {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-block {\n    display: block !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg {\n    display: block !important;\n  }\n  table.visible-lg {\n    display: table;\n  }\n  tr.visible-lg {\n    display: table-row !important;\n  }\n  th.visible-lg,\n  td.visible-lg {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-block {\n    display: block !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (max-width: 767px) {\n  .hidden-xs {\n    display: none !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .hidden-sm {\n    display: none !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .hidden-md {\n    display: none !important;\n  }\n}\n@media (min-width: 1200px) {\n  .hidden-lg {\n    display: none !important;\n  }\n}\n.visible-print {\n  display: none !important;\n}\n@media print {\n  .visible-print {\n    display: block !important;\n  }\n  table.visible-print {\n    display: table;\n  }\n  tr.visible-print {\n    display: table-row !important;\n  }\n  th.visible-print,\n  td.visible-print {\n    display: table-cell !important;\n  }\n}\n.visible-print-block {\n  display: none !important;\n}\n@media print {\n  .visible-print-block {\n    display: block !important;\n  }\n}\n.visible-print-inline {\n  display: none !important;\n}\n@media print {\n  .visible-print-inline {\n    display: inline !important;\n  }\n}\n.visible-print-inline-block {\n  display: none !important;\n}\n@media print {\n  .visible-print-inline-block {\n    display: inline-block !important;\n  }\n}\n@media print {\n  .hidden-print {\n    display: none !important;\n  }\n}\n/*# sourceMappingURL=bootstrap.css.map */\n"
  },
  {
    "path": "website/css/main.scss",
    "content": "---\n---\n\n@charset \"utf-8\";\n\n// Example Defaults\n// $base-font-family: Helvetica, Arial, sans-serif;\n// $base-font-size:   16px;\n// $small-font-size:  $base-font-size * 0.875;\n// $base-line-height: 1.5;\n\n// $spacing-unit:     30px;\n\n// $text-color:       #111;\n// $background-color: #fdfdfd;\n// $brand-color:      #2a7ae2;\n\n// $grey-color:       #828282;\n// $grey-color-light: lighten($grey-color, 40%);\n// $grey-color-dark:  darken($grey-color, 25%);\n\n// // Width of the content area\n// $content-width:    800px;\n\n// $on-palm:          600px;\n// $on-laptop:        800px;\n\n// Using media queries with like this:\n// @include media-query($on-palm) {\n//     .wrapper {\n//         padding-right: $spacing-unit / 2;\n//         padding-left: $spacing-unit / 2;\n//     }\n// }\n\n// variables.less\n$theme-primary: #F05F40;\n$theme-dark: #222;\n\n// Import partials from `sass_dir` (defaults to `_sass`)\n@import\n        \"mixins\",\n        \"base\"\n;\n"
  },
  {
    "path": "website/font-awesome/css/font-awesome.css",
    "content": "/*!\n *  Font Awesome 4.3.0 by @davegandy - http://fontawesome.io - @fontawesome\n *  License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)\n */\n/* FONT PATH\n * -------------------------- */\n@font-face {\n  font-family: 'FontAwesome';\n  src: url('../fonts/fontawesome-webfont.eot?v=4.3.0');\n  src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.3.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff2?v=4.3.0') format('woff2'), url('../fonts/fontawesome-webfont.woff?v=4.3.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.3.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.3.0#fontawesomeregular') format('svg');\n  font-weight: normal;\n  font-style: normal;\n}\n.fa {\n  display: inline-block;\n  font: normal normal normal 14px/1 FontAwesome;\n  font-size: inherit;\n  text-rendering: auto;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n  transform: translate(0, 0);\n}\n/* makes the font 33% larger relative to the icon container */\n.fa-lg {\n  font-size: 1.33333333em;\n  line-height: 0.75em;\n  vertical-align: -15%;\n}\n.fa-2x {\n  font-size: 2em;\n}\n.fa-3x {\n  font-size: 3em;\n}\n.fa-4x {\n  font-size: 4em;\n}\n.fa-5x {\n  font-size: 5em;\n}\n.fa-fw {\n  width: 1.28571429em;\n  text-align: center;\n}\n.fa-ul {\n  padding-left: 0;\n  margin-left: 2.14285714em;\n  list-style-type: none;\n}\n.fa-ul > li {\n  position: relative;\n}\n.fa-li {\n  position: absolute;\n  left: -2.14285714em;\n  width: 2.14285714em;\n  top: 0.14285714em;\n  text-align: center;\n}\n.fa-li.fa-lg {\n  left: -1.85714286em;\n}\n.fa-border {\n  padding: .2em .25em .15em;\n  border: solid 0.08em #eeeeee;\n  border-radius: .1em;\n}\n.pull-right {\n  float: right;\n}\n.pull-left {\n  float: left;\n}\n.fa.pull-left {\n  margin-right: .3em;\n}\n.fa.pull-right {\n  margin-left: .3em;\n}\n.fa-spin {\n  -webkit-animation: fa-spin 2s infinite linear;\n  animation: fa-spin 2s infinite linear;\n}\n.fa-pulse {\n  -webkit-animation: fa-spin 1s infinite steps(8);\n  animation: fa-spin 1s infinite steps(8);\n}\n@-webkit-keyframes fa-spin {\n  0% {\n    -webkit-transform: rotate(0deg);\n    transform: rotate(0deg);\n  }\n  100% {\n    -webkit-transform: rotate(359deg);\n    transform: rotate(359deg);\n  }\n}\n@keyframes fa-spin {\n  0% {\n    -webkit-transform: rotate(0deg);\n    transform: rotate(0deg);\n  }\n  100% {\n    -webkit-transform: rotate(359deg);\n    transform: rotate(359deg);\n  }\n}\n.fa-rotate-90 {\n  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1);\n  -webkit-transform: rotate(90deg);\n  -ms-transform: rotate(90deg);\n  transform: rotate(90deg);\n}\n.fa-rotate-180 {\n  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2);\n  -webkit-transform: rotate(180deg);\n  -ms-transform: rotate(180deg);\n  transform: rotate(180deg);\n}\n.fa-rotate-270 {\n  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3);\n  -webkit-transform: rotate(270deg);\n  -ms-transform: rotate(270deg);\n  transform: rotate(270deg);\n}\n.fa-flip-horizontal {\n  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);\n  -webkit-transform: scale(-1, 1);\n  -ms-transform: scale(-1, 1);\n  transform: scale(-1, 1);\n}\n.fa-flip-vertical {\n  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);\n  -webkit-transform: scale(1, -1);\n  -ms-transform: scale(1, -1);\n  transform: scale(1, -1);\n}\n:root .fa-rotate-90,\n:root .fa-rotate-180,\n:root .fa-rotate-270,\n:root .fa-flip-horizontal,\n:root .fa-flip-vertical {\n  filter: none;\n}\n.fa-stack {\n  position: relative;\n  display: inline-block;\n  width: 2em;\n  height: 2em;\n  line-height: 2em;\n  vertical-align: middle;\n}\n.fa-stack-1x,\n.fa-stack-2x {\n  position: absolute;\n  left: 0;\n  width: 100%;\n  text-align: center;\n}\n.fa-stack-1x {\n  line-height: inherit;\n}\n.fa-stack-2x {\n  font-size: 2em;\n}\n.fa-inverse {\n  color: #ffffff;\n}\n/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen\n   readers do not read off random characters that represent icons */\n.fa-glass:before {\n  content: \"\\f000\";\n}\n.fa-music:before {\n  content: \"\\f001\";\n}\n.fa-search:before {\n  content: \"\\f002\";\n}\n.fa-envelope-o:before {\n  content: \"\\f003\";\n}\n.fa-heart:before {\n  content: \"\\f004\";\n}\n.fa-star:before {\n  content: \"\\f005\";\n}\n.fa-star-o:before {\n  content: \"\\f006\";\n}\n.fa-user:before {\n  content: \"\\f007\";\n}\n.fa-film:before {\n  content: \"\\f008\";\n}\n.fa-th-large:before {\n  content: \"\\f009\";\n}\n.fa-th:before {\n  content: \"\\f00a\";\n}\n.fa-th-list:before {\n  content: \"\\f00b\";\n}\n.fa-check:before {\n  content: \"\\f00c\";\n}\n.fa-remove:before,\n.fa-close:before,\n.fa-times:before {\n  content: \"\\f00d\";\n}\n.fa-search-plus:before {\n  content: \"\\f00e\";\n}\n.fa-search-minus:before {\n  content: \"\\f010\";\n}\n.fa-power-off:before {\n  content: \"\\f011\";\n}\n.fa-signal:before {\n  content: \"\\f012\";\n}\n.fa-gear:before,\n.fa-cog:before {\n  content: \"\\f013\";\n}\n.fa-trash-o:before {\n  content: \"\\f014\";\n}\n.fa-home:before {\n  content: \"\\f015\";\n}\n.fa-file-o:before {\n  content: \"\\f016\";\n}\n.fa-clock-o:before {\n  content: \"\\f017\";\n}\n.fa-road:before {\n  content: \"\\f018\";\n}\n.fa-download:before {\n  content: \"\\f019\";\n}\n.fa-arrow-circle-o-down:before {\n  content: \"\\f01a\";\n}\n.fa-arrow-circle-o-up:before {\n  content: \"\\f01b\";\n}\n.fa-inbox:before {\n  content: \"\\f01c\";\n}\n.fa-play-circle-o:before {\n  content: \"\\f01d\";\n}\n.fa-rotate-right:before,\n.fa-repeat:before {\n  content: \"\\f01e\";\n}\n.fa-refresh:before {\n  content: \"\\f021\";\n}\n.fa-list-alt:before {\n  content: \"\\f022\";\n}\n.fa-lock:before {\n  content: \"\\f023\";\n}\n.fa-flag:before {\n  content: \"\\f024\";\n}\n.fa-headphones:before {\n  content: \"\\f025\";\n}\n.fa-volume-off:before {\n  content: \"\\f026\";\n}\n.fa-volume-down:before {\n  content: \"\\f027\";\n}\n.fa-volume-up:before {\n  content: \"\\f028\";\n}\n.fa-qrcode:before {\n  content: \"\\f029\";\n}\n.fa-barcode:before {\n  content: \"\\f02a\";\n}\n.fa-tag:before {\n  content: \"\\f02b\";\n}\n.fa-tags:before {\n  content: \"\\f02c\";\n}\n.fa-book:before {\n  content: \"\\f02d\";\n}\n.fa-bookmark:before {\n  content: \"\\f02e\";\n}\n.fa-print:before {\n  content: \"\\f02f\";\n}\n.fa-camera:before {\n  content: \"\\f030\";\n}\n.fa-font:before {\n  content: \"\\f031\";\n}\n.fa-bold:before {\n  content: \"\\f032\";\n}\n.fa-italic:before {\n  content: \"\\f033\";\n}\n.fa-text-height:before {\n  content: \"\\f034\";\n}\n.fa-text-width:before {\n  content: \"\\f035\";\n}\n.fa-align-left:before {\n  content: \"\\f036\";\n}\n.fa-align-center:before {\n  content: \"\\f037\";\n}\n.fa-align-right:before {\n  content: \"\\f038\";\n}\n.fa-align-justify:before {\n  content: \"\\f039\";\n}\n.fa-list:before {\n  content: \"\\f03a\";\n}\n.fa-dedent:before,\n.fa-outdent:before {\n  content: \"\\f03b\";\n}\n.fa-indent:before {\n  content: \"\\f03c\";\n}\n.fa-video-camera:before {\n  content: \"\\f03d\";\n}\n.fa-photo:before,\n.fa-image:before,\n.fa-picture-o:before {\n  content: \"\\f03e\";\n}\n.fa-pencil:before {\n  content: \"\\f040\";\n}\n.fa-map-marker:before {\n  content: \"\\f041\";\n}\n.fa-adjust:before {\n  content: \"\\f042\";\n}\n.fa-tint:before {\n  content: \"\\f043\";\n}\n.fa-edit:before,\n.fa-pencil-square-o:before {\n  content: \"\\f044\";\n}\n.fa-share-square-o:before {\n  content: \"\\f045\";\n}\n.fa-check-square-o:before {\n  content: \"\\f046\";\n}\n.fa-arrows:before {\n  content: \"\\f047\";\n}\n.fa-step-backward:before {\n  content: \"\\f048\";\n}\n.fa-fast-backward:before {\n  content: \"\\f049\";\n}\n.fa-backward:before {\n  content: \"\\f04a\";\n}\n.fa-play:before {\n  content: \"\\f04b\";\n}\n.fa-pause:before {\n  content: \"\\f04c\";\n}\n.fa-stop:before {\n  content: \"\\f04d\";\n}\n.fa-forward:before {\n  content: \"\\f04e\";\n}\n.fa-fast-forward:before {\n  content: \"\\f050\";\n}\n.fa-step-forward:before {\n  content: \"\\f051\";\n}\n.fa-eject:before {\n  content: \"\\f052\";\n}\n.fa-chevron-left:before {\n  content: \"\\f053\";\n}\n.fa-chevron-right:before {\n  content: \"\\f054\";\n}\n.fa-plus-circle:before {\n  content: \"\\f055\";\n}\n.fa-minus-circle:before {\n  content: \"\\f056\";\n}\n.fa-times-circle:before {\n  content: \"\\f057\";\n}\n.fa-check-circle:before {\n  content: \"\\f058\";\n}\n.fa-question-circle:before {\n  content: \"\\f059\";\n}\n.fa-info-circle:before {\n  content: \"\\f05a\";\n}\n.fa-crosshairs:before {\n  content: \"\\f05b\";\n}\n.fa-times-circle-o:before {\n  content: \"\\f05c\";\n}\n.fa-check-circle-o:before {\n  content: \"\\f05d\";\n}\n.fa-ban:before {\n  content: \"\\f05e\";\n}\n.fa-arrow-left:before {\n  content: \"\\f060\";\n}\n.fa-arrow-right:before {\n  content: \"\\f061\";\n}\n.fa-arrow-up:before {\n  content: \"\\f062\";\n}\n.fa-arrow-down:before {\n  content: \"\\f063\";\n}\n.fa-mail-forward:before,\n.fa-share:before {\n  content: \"\\f064\";\n}\n.fa-expand:before {\n  content: \"\\f065\";\n}\n.fa-compress:before {\n  content: \"\\f066\";\n}\n.fa-plus:before {\n  content: \"\\f067\";\n}\n.fa-minus:before {\n  content: \"\\f068\";\n}\n.fa-asterisk:before {\n  content: \"\\f069\";\n}\n.fa-exclamation-circle:before {\n  content: \"\\f06a\";\n}\n.fa-gift:before {\n  content: \"\\f06b\";\n}\n.fa-leaf:before {\n  content: \"\\f06c\";\n}\n.fa-fire:before {\n  content: \"\\f06d\";\n}\n.fa-eye:before {\n  content: \"\\f06e\";\n}\n.fa-eye-slash:before {\n  content: \"\\f070\";\n}\n.fa-warning:before,\n.fa-exclamation-triangle:before {\n  content: \"\\f071\";\n}\n.fa-plane:before {\n  content: \"\\f072\";\n}\n.fa-calendar:before {\n  content: \"\\f073\";\n}\n.fa-random:before {\n  content: \"\\f074\";\n}\n.fa-comment:before {\n  content: \"\\f075\";\n}\n.fa-magnet:before {\n  content: \"\\f076\";\n}\n.fa-chevron-up:before {\n  content: \"\\f077\";\n}\n.fa-chevron-down:before {\n  content: \"\\f078\";\n}\n.fa-retweet:before {\n  content: \"\\f079\";\n}\n.fa-shopping-cart:before {\n  content: \"\\f07a\";\n}\n.fa-folder:before {\n  content: \"\\f07b\";\n}\n.fa-folder-open:before {\n  content: \"\\f07c\";\n}\n.fa-arrows-v:before {\n  content: \"\\f07d\";\n}\n.fa-arrows-h:before {\n  content: \"\\f07e\";\n}\n.fa-bar-chart-o:before,\n.fa-bar-chart:before {\n  content: \"\\f080\";\n}\n.fa-twitter-square:before {\n  content: \"\\f081\";\n}\n.fa-facebook-square:before {\n  content: \"\\f082\";\n}\n.fa-camera-retro:before {\n  content: \"\\f083\";\n}\n.fa-key:before {\n  content: \"\\f084\";\n}\n.fa-gears:before,\n.fa-cogs:before {\n  content: \"\\f085\";\n}\n.fa-comments:before {\n  content: \"\\f086\";\n}\n.fa-thumbs-o-up:before {\n  content: \"\\f087\";\n}\n.fa-thumbs-o-down:before {\n  content: \"\\f088\";\n}\n.fa-star-half:before {\n  content: \"\\f089\";\n}\n.fa-heart-o:before {\n  content: \"\\f08a\";\n}\n.fa-sign-out:before {\n  content: \"\\f08b\";\n}\n.fa-linkedin-square:before {\n  content: \"\\f08c\";\n}\n.fa-thumb-tack:before {\n  content: \"\\f08d\";\n}\n.fa-external-link:before {\n  content: \"\\f08e\";\n}\n.fa-sign-in:before {\n  content: \"\\f090\";\n}\n.fa-trophy:before {\n  content: \"\\f091\";\n}\n.fa-github-square:before {\n  content: \"\\f092\";\n}\n.fa-upload:before {\n  content: \"\\f093\";\n}\n.fa-lemon-o:before {\n  content: \"\\f094\";\n}\n.fa-phone:before {\n  content: \"\\f095\";\n}\n.fa-square-o:before {\n  content: \"\\f096\";\n}\n.fa-bookmark-o:before {\n  content: \"\\f097\";\n}\n.fa-phone-square:before {\n  content: \"\\f098\";\n}\n.fa-twitter:before {\n  content: \"\\f099\";\n}\n.fa-facebook-f:before,\n.fa-facebook:before {\n  content: \"\\f09a\";\n}\n.fa-github:before {\n  content: \"\\f09b\";\n}\n.fa-unlock:before {\n  content: \"\\f09c\";\n}\n.fa-credit-card:before {\n  content: \"\\f09d\";\n}\n.fa-rss:before {\n  content: \"\\f09e\";\n}\n.fa-hdd-o:before {\n  content: \"\\f0a0\";\n}\n.fa-bullhorn:before {\n  content: \"\\f0a1\";\n}\n.fa-bell:before {\n  content: \"\\f0f3\";\n}\n.fa-certificate:before {\n  content: \"\\f0a3\";\n}\n.fa-hand-o-right:before {\n  content: \"\\f0a4\";\n}\n.fa-hand-o-left:before {\n  content: \"\\f0a5\";\n}\n.fa-hand-o-up:before {\n  content: \"\\f0a6\";\n}\n.fa-hand-o-down:before {\n  content: \"\\f0a7\";\n}\n.fa-arrow-circle-left:before {\n  content: \"\\f0a8\";\n}\n.fa-arrow-circle-right:before {\n  content: \"\\f0a9\";\n}\n.fa-arrow-circle-up:before {\n  content: \"\\f0aa\";\n}\n.fa-arrow-circle-down:before {\n  content: \"\\f0ab\";\n}\n.fa-globe:before {\n  content: \"\\f0ac\";\n}\n.fa-wrench:before {\n  content: \"\\f0ad\";\n}\n.fa-tasks:before {\n  content: \"\\f0ae\";\n}\n.fa-filter:before {\n  content: \"\\f0b0\";\n}\n.fa-briefcase:before {\n  content: \"\\f0b1\";\n}\n.fa-arrows-alt:before {\n  content: \"\\f0b2\";\n}\n.fa-group:before,\n.fa-users:before {\n  content: \"\\f0c0\";\n}\n.fa-chain:before,\n.fa-link:before {\n  content: \"\\f0c1\";\n}\n.fa-cloud:before {\n  content: \"\\f0c2\";\n}\n.fa-flask:before {\n  content: \"\\f0c3\";\n}\n.fa-cut:before,\n.fa-scissors:before {\n  content: \"\\f0c4\";\n}\n.fa-copy:before,\n.fa-files-o:before {\n  content: \"\\f0c5\";\n}\n.fa-paperclip:before {\n  content: \"\\f0c6\";\n}\n.fa-save:before,\n.fa-floppy-o:before {\n  content: \"\\f0c7\";\n}\n.fa-square:before {\n  content: \"\\f0c8\";\n}\n.fa-navicon:before,\n.fa-reorder:before,\n.fa-bars:before {\n  content: \"\\f0c9\";\n}\n.fa-list-ul:before {\n  content: \"\\f0ca\";\n}\n.fa-list-ol:before {\n  content: \"\\f0cb\";\n}\n.fa-strikethrough:before {\n  content: \"\\f0cc\";\n}\n.fa-underline:before {\n  content: \"\\f0cd\";\n}\n.fa-table:before {\n  content: \"\\f0ce\";\n}\n.fa-magic:before {\n  content: \"\\f0d0\";\n}\n.fa-truck:before {\n  content: \"\\f0d1\";\n}\n.fa-pinterest:before {\n  content: \"\\f0d2\";\n}\n.fa-pinterest-square:before {\n  content: \"\\f0d3\";\n}\n.fa-google-plus-square:before {\n  content: \"\\f0d4\";\n}\n.fa-google-plus:before {\n  content: \"\\f0d5\";\n}\n.fa-money:before {\n  content: \"\\f0d6\";\n}\n.fa-caret-down:before {\n  content: \"\\f0d7\";\n}\n.fa-caret-up:before {\n  content: \"\\f0d8\";\n}\n.fa-caret-left:before {\n  content: \"\\f0d9\";\n}\n.fa-caret-right:before {\n  content: \"\\f0da\";\n}\n.fa-columns:before {\n  content: \"\\f0db\";\n}\n.fa-unsorted:before,\n.fa-sort:before {\n  content: \"\\f0dc\";\n}\n.fa-sort-down:before,\n.fa-sort-desc:before {\n  content: \"\\f0dd\";\n}\n.fa-sort-up:before,\n.fa-sort-asc:before {\n  content: \"\\f0de\";\n}\n.fa-envelope:before {\n  content: \"\\f0e0\";\n}\n.fa-linkedin:before {\n  content: \"\\f0e1\";\n}\n.fa-rotate-left:before,\n.fa-undo:before {\n  content: \"\\f0e2\";\n}\n.fa-legal:before,\n.fa-gavel:before {\n  content: \"\\f0e3\";\n}\n.fa-dashboard:before,\n.fa-tachometer:before {\n  content: \"\\f0e4\";\n}\n.fa-comment-o:before {\n  content: \"\\f0e5\";\n}\n.fa-comments-o:before {\n  content: \"\\f0e6\";\n}\n.fa-flash:before,\n.fa-bolt:before {\n  content: \"\\f0e7\";\n}\n.fa-sitemap:before {\n  content: \"\\f0e8\";\n}\n.fa-umbrella:before {\n  content: \"\\f0e9\";\n}\n.fa-paste:before,\n.fa-clipboard:before {\n  content: \"\\f0ea\";\n}\n.fa-lightbulb-o:before {\n  content: \"\\f0eb\";\n}\n.fa-exchange:before {\n  content: \"\\f0ec\";\n}\n.fa-cloud-download:before {\n  content: \"\\f0ed\";\n}\n.fa-cloud-upload:before {\n  content: \"\\f0ee\";\n}\n.fa-user-md:before {\n  content: \"\\f0f0\";\n}\n.fa-stethoscope:before {\n  content: \"\\f0f1\";\n}\n.fa-suitcase:before {\n  content: \"\\f0f2\";\n}\n.fa-bell-o:before {\n  content: \"\\f0a2\";\n}\n.fa-coffee:before {\n  content: \"\\f0f4\";\n}\n.fa-cutlery:before {\n  content: \"\\f0f5\";\n}\n.fa-file-text-o:before {\n  content: \"\\f0f6\";\n}\n.fa-building-o:before {\n  content: \"\\f0f7\";\n}\n.fa-hospital-o:before {\n  content: \"\\f0f8\";\n}\n.fa-ambulance:before {\n  content: \"\\f0f9\";\n}\n.fa-medkit:before {\n  content: \"\\f0fa\";\n}\n.fa-fighter-jet:before {\n  content: \"\\f0fb\";\n}\n.fa-beer:before {\n  content: \"\\f0fc\";\n}\n.fa-h-square:before {\n  content: \"\\f0fd\";\n}\n.fa-plus-square:before {\n  content: \"\\f0fe\";\n}\n.fa-angle-double-left:before {\n  content: \"\\f100\";\n}\n.fa-angle-double-right:before {\n  content: \"\\f101\";\n}\n.fa-angle-double-up:before {\n  content: \"\\f102\";\n}\n.fa-angle-double-down:before {\n  content: \"\\f103\";\n}\n.fa-angle-left:before {\n  content: \"\\f104\";\n}\n.fa-angle-right:before {\n  content: \"\\f105\";\n}\n.fa-angle-up:before {\n  content: \"\\f106\";\n}\n.fa-angle-down:before {\n  content: \"\\f107\";\n}\n.fa-desktop:before {\n  content: \"\\f108\";\n}\n.fa-laptop:before {\n  content: \"\\f109\";\n}\n.fa-tablet:before {\n  content: \"\\f10a\";\n}\n.fa-mobile-phone:before,\n.fa-mobile:before {\n  content: \"\\f10b\";\n}\n.fa-circle-o:before {\n  content: \"\\f10c\";\n}\n.fa-quote-left:before {\n  content: \"\\f10d\";\n}\n.fa-quote-right:before {\n  content: \"\\f10e\";\n}\n.fa-spinner:before {\n  content: \"\\f110\";\n}\n.fa-circle:before {\n  content: \"\\f111\";\n}\n.fa-mail-reply:before,\n.fa-reply:before {\n  content: \"\\f112\";\n}\n.fa-github-alt:before {\n  content: \"\\f113\";\n}\n.fa-folder-o:before {\n  content: \"\\f114\";\n}\n.fa-folder-open-o:before {\n  content: \"\\f115\";\n}\n.fa-smile-o:before {\n  content: \"\\f118\";\n}\n.fa-frown-o:before {\n  content: \"\\f119\";\n}\n.fa-meh-o:before {\n  content: \"\\f11a\";\n}\n.fa-gamepad:before {\n  content: \"\\f11b\";\n}\n.fa-keyboard-o:before {\n  content: \"\\f11c\";\n}\n.fa-flag-o:before {\n  content: \"\\f11d\";\n}\n.fa-flag-checkered:before {\n  content: \"\\f11e\";\n}\n.fa-terminal:before {\n  content: \"\\f120\";\n}\n.fa-code:before {\n  content: \"\\f121\";\n}\n.fa-mail-reply-all:before,\n.fa-reply-all:before {\n  content: \"\\f122\";\n}\n.fa-star-half-empty:before,\n.fa-star-half-full:before,\n.fa-star-half-o:before {\n  content: \"\\f123\";\n}\n.fa-location-arrow:before {\n  content: \"\\f124\";\n}\n.fa-crop:before {\n  content: \"\\f125\";\n}\n.fa-code-fork:before {\n  content: \"\\f126\";\n}\n.fa-unlink:before,\n.fa-chain-broken:before {\n  content: \"\\f127\";\n}\n.fa-question:before {\n  content: \"\\f128\";\n}\n.fa-info:before {\n  content: \"\\f129\";\n}\n.fa-exclamation:before {\n  content: \"\\f12a\";\n}\n.fa-superscript:before {\n  content: \"\\f12b\";\n}\n.fa-subscript:before {\n  content: \"\\f12c\";\n}\n.fa-eraser:before {\n  content: \"\\f12d\";\n}\n.fa-puzzle-piece:before {\n  content: \"\\f12e\";\n}\n.fa-microphone:before {\n  content: \"\\f130\";\n}\n.fa-microphone-slash:before {\n  content: \"\\f131\";\n}\n.fa-shield:before {\n  content: \"\\f132\";\n}\n.fa-calendar-o:before {\n  content: \"\\f133\";\n}\n.fa-fire-extinguisher:before {\n  content: \"\\f134\";\n}\n.fa-rocket:before {\n  content: \"\\f135\";\n}\n.fa-maxcdn:before {\n  content: \"\\f136\";\n}\n.fa-chevron-circle-left:before {\n  content: \"\\f137\";\n}\n.fa-chevron-circle-right:before {\n  content: \"\\f138\";\n}\n.fa-chevron-circle-up:before {\n  content: \"\\f139\";\n}\n.fa-chevron-circle-down:before {\n  content: \"\\f13a\";\n}\n.fa-html5:before {\n  content: \"\\f13b\";\n}\n.fa-css3:before {\n  content: \"\\f13c\";\n}\n.fa-anchor:before {\n  content: \"\\f13d\";\n}\n.fa-unlock-alt:before {\n  content: \"\\f13e\";\n}\n.fa-bullseye:before {\n  content: \"\\f140\";\n}\n.fa-ellipsis-h:before {\n  content: \"\\f141\";\n}\n.fa-ellipsis-v:before {\n  content: \"\\f142\";\n}\n.fa-rss-square:before {\n  content: \"\\f143\";\n}\n.fa-play-circle:before {\n  content: \"\\f144\";\n}\n.fa-ticket:before {\n  content: \"\\f145\";\n}\n.fa-minus-square:before {\n  content: \"\\f146\";\n}\n.fa-minus-square-o:before {\n  content: \"\\f147\";\n}\n.fa-level-up:before {\n  content: \"\\f148\";\n}\n.fa-level-down:before {\n  content: \"\\f149\";\n}\n.fa-check-square:before {\n  content: \"\\f14a\";\n}\n.fa-pencil-square:before {\n  content: \"\\f14b\";\n}\n.fa-external-link-square:before {\n  content: \"\\f14c\";\n}\n.fa-share-square:before {\n  content: \"\\f14d\";\n}\n.fa-compass:before {\n  content: \"\\f14e\";\n}\n.fa-toggle-down:before,\n.fa-caret-square-o-down:before {\n  content: \"\\f150\";\n}\n.fa-toggle-up:before,\n.fa-caret-square-o-up:before {\n  content: \"\\f151\";\n}\n.fa-toggle-right:before,\n.fa-caret-square-o-right:before {\n  content: \"\\f152\";\n}\n.fa-euro:before,\n.fa-eur:before {\n  content: \"\\f153\";\n}\n.fa-gbp:before {\n  content: \"\\f154\";\n}\n.fa-dollar:before,\n.fa-usd:before {\n  content: \"\\f155\";\n}\n.fa-rupee:before,\n.fa-inr:before {\n  content: \"\\f156\";\n}\n.fa-cny:before,\n.fa-rmb:before,\n.fa-yen:before,\n.fa-jpy:before {\n  content: \"\\f157\";\n}\n.fa-ruble:before,\n.fa-rouble:before,\n.fa-rub:before {\n  content: \"\\f158\";\n}\n.fa-won:before,\n.fa-krw:before {\n  content: \"\\f159\";\n}\n.fa-bitcoin:before,\n.fa-btc:before {\n  content: \"\\f15a\";\n}\n.fa-file:before {\n  content: \"\\f15b\";\n}\n.fa-file-text:before {\n  content: \"\\f15c\";\n}\n.fa-sort-alpha-asc:before {\n  content: \"\\f15d\";\n}\n.fa-sort-alpha-desc:before {\n  content: \"\\f15e\";\n}\n.fa-sort-amount-asc:before {\n  content: \"\\f160\";\n}\n.fa-sort-amount-desc:before {\n  content: \"\\f161\";\n}\n.fa-sort-numeric-asc:before {\n  content: \"\\f162\";\n}\n.fa-sort-numeric-desc:before {\n  content: \"\\f163\";\n}\n.fa-thumbs-up:before {\n  content: \"\\f164\";\n}\n.fa-thumbs-down:before {\n  content: \"\\f165\";\n}\n.fa-youtube-square:before {\n  content: \"\\f166\";\n}\n.fa-youtube:before {\n  content: \"\\f167\";\n}\n.fa-xing:before {\n  content: \"\\f168\";\n}\n.fa-xing-square:before {\n  content: \"\\f169\";\n}\n.fa-youtube-play:before {\n  content: \"\\f16a\";\n}\n.fa-dropbox:before {\n  content: \"\\f16b\";\n}\n.fa-stack-overflow:before {\n  content: \"\\f16c\";\n}\n.fa-instagram:before {\n  content: \"\\f16d\";\n}\n.fa-flickr:before {\n  content: \"\\f16e\";\n}\n.fa-adn:before {\n  content: \"\\f170\";\n}\n.fa-bitbucket:before {\n  content: \"\\f171\";\n}\n.fa-bitbucket-square:before {\n  content: \"\\f172\";\n}\n.fa-tumblr:before {\n  content: \"\\f173\";\n}\n.fa-tumblr-square:before {\n  content: \"\\f174\";\n}\n.fa-long-arrow-down:before {\n  content: \"\\f175\";\n}\n.fa-long-arrow-up:before {\n  content: \"\\f176\";\n}\n.fa-long-arrow-left:before {\n  content: \"\\f177\";\n}\n.fa-long-arrow-right:before {\n  content: \"\\f178\";\n}\n.fa-apple:before {\n  content: \"\\f179\";\n}\n.fa-windows:before {\n  content: \"\\f17a\";\n}\n.fa-android:before {\n  content: \"\\f17b\";\n}\n.fa-linux:before {\n  content: \"\\f17c\";\n}\n.fa-dribbble:before {\n  content: \"\\f17d\";\n}\n.fa-skype:before {\n  content: \"\\f17e\";\n}\n.fa-foursquare:before {\n  content: \"\\f180\";\n}\n.fa-trello:before {\n  content: \"\\f181\";\n}\n.fa-female:before {\n  content: \"\\f182\";\n}\n.fa-male:before {\n  content: \"\\f183\";\n}\n.fa-gittip:before,\n.fa-gratipay:before {\n  content: \"\\f184\";\n}\n.fa-sun-o:before {\n  content: \"\\f185\";\n}\n.fa-moon-o:before {\n  content: \"\\f186\";\n}\n.fa-archive:before {\n  content: \"\\f187\";\n}\n.fa-bug:before {\n  content: \"\\f188\";\n}\n.fa-vk:before {\n  content: \"\\f189\";\n}\n.fa-weibo:before {\n  content: \"\\f18a\";\n}\n.fa-renren:before {\n  content: \"\\f18b\";\n}\n.fa-pagelines:before {\n  content: \"\\f18c\";\n}\n.fa-stack-exchange:before {\n  content: \"\\f18d\";\n}\n.fa-arrow-circle-o-right:before {\n  content: \"\\f18e\";\n}\n.fa-arrow-circle-o-left:before {\n  content: \"\\f190\";\n}\n.fa-toggle-left:before,\n.fa-caret-square-o-left:before {\n  content: \"\\f191\";\n}\n.fa-dot-circle-o:before {\n  content: \"\\f192\";\n}\n.fa-wheelchair:before {\n  content: \"\\f193\";\n}\n.fa-vimeo-square:before {\n  content: \"\\f194\";\n}\n.fa-turkish-lira:before,\n.fa-try:before {\n  content: \"\\f195\";\n}\n.fa-plus-square-o:before {\n  content: \"\\f196\";\n}\n.fa-space-shuttle:before {\n  content: \"\\f197\";\n}\n.fa-slack:before {\n  content: \"\\f198\";\n}\n.fa-envelope-square:before {\n  content: \"\\f199\";\n}\n.fa-wordpress:before {\n  content: \"\\f19a\";\n}\n.fa-openid:before {\n  content: \"\\f19b\";\n}\n.fa-institution:before,\n.fa-bank:before,\n.fa-university:before {\n  content: \"\\f19c\";\n}\n.fa-mortar-board:before,\n.fa-graduation-cap:before {\n  content: \"\\f19d\";\n}\n.fa-yahoo:before {\n  content: \"\\f19e\";\n}\n.fa-google:before {\n  content: \"\\f1a0\";\n}\n.fa-reddit:before {\n  content: \"\\f1a1\";\n}\n.fa-reddit-square:before {\n  content: \"\\f1a2\";\n}\n.fa-stumbleupon-circle:before {\n  content: \"\\f1a3\";\n}\n.fa-stumbleupon:before {\n  content: \"\\f1a4\";\n}\n.fa-delicious:before {\n  content: \"\\f1a5\";\n}\n.fa-digg:before {\n  content: \"\\f1a6\";\n}\n.fa-pied-piper:before {\n  content: \"\\f1a7\";\n}\n.fa-pied-piper-alt:before {\n  content: \"\\f1a8\";\n}\n.fa-drupal:before {\n  content: \"\\f1a9\";\n}\n.fa-joomla:before {\n  content: \"\\f1aa\";\n}\n.fa-language:before {\n  content: \"\\f1ab\";\n}\n.fa-fax:before {\n  content: \"\\f1ac\";\n}\n.fa-building:before {\n  content: \"\\f1ad\";\n}\n.fa-child:before {\n  content: \"\\f1ae\";\n}\n.fa-paw:before {\n  content: \"\\f1b0\";\n}\n.fa-spoon:before {\n  content: \"\\f1b1\";\n}\n.fa-cube:before {\n  content: \"\\f1b2\";\n}\n.fa-cubes:before {\n  content: \"\\f1b3\";\n}\n.fa-behance:before {\n  content: \"\\f1b4\";\n}\n.fa-behance-square:before {\n  content: \"\\f1b5\";\n}\n.fa-steam:before {\n  content: \"\\f1b6\";\n}\n.fa-steam-square:before {\n  content: \"\\f1b7\";\n}\n.fa-recycle:before {\n  content: \"\\f1b8\";\n}\n.fa-automobile:before,\n.fa-car:before {\n  content: \"\\f1b9\";\n}\n.fa-cab:before,\n.fa-taxi:before {\n  content: \"\\f1ba\";\n}\n.fa-tree:before {\n  content: \"\\f1bb\";\n}\n.fa-spotify:before {\n  content: \"\\f1bc\";\n}\n.fa-deviantart:before {\n  content: \"\\f1bd\";\n}\n.fa-soundcloud:before {\n  content: \"\\f1be\";\n}\n.fa-database:before {\n  content: \"\\f1c0\";\n}\n.fa-file-pdf-o:before {\n  content: \"\\f1c1\";\n}\n.fa-file-word-o:before {\n  content: \"\\f1c2\";\n}\n.fa-file-excel-o:before {\n  content: \"\\f1c3\";\n}\n.fa-file-powerpoint-o:before {\n  content: \"\\f1c4\";\n}\n.fa-file-photo-o:before,\n.fa-file-picture-o:before,\n.fa-file-image-o:before {\n  content: \"\\f1c5\";\n}\n.fa-file-zip-o:before,\n.fa-file-archive-o:before {\n  content: \"\\f1c6\";\n}\n.fa-file-sound-o:before,\n.fa-file-audio-o:before {\n  content: \"\\f1c7\";\n}\n.fa-file-movie-o:before,\n.fa-file-video-o:before {\n  content: \"\\f1c8\";\n}\n.fa-file-code-o:before {\n  content: \"\\f1c9\";\n}\n.fa-vine:before {\n  content: \"\\f1ca\";\n}\n.fa-codepen:before {\n  content: \"\\f1cb\";\n}\n.fa-jsfiddle:before {\n  content: \"\\f1cc\";\n}\n.fa-life-bouy:before,\n.fa-life-buoy:before,\n.fa-life-saver:before,\n.fa-support:before,\n.fa-life-ring:before {\n  content: \"\\f1cd\";\n}\n.fa-circle-o-notch:before {\n  content: \"\\f1ce\";\n}\n.fa-ra:before,\n.fa-rebel:before {\n  content: \"\\f1d0\";\n}\n.fa-ge:before,\n.fa-empire:before {\n  content: \"\\f1d1\";\n}\n.fa-git-square:before {\n  content: \"\\f1d2\";\n}\n.fa-git:before {\n  content: \"\\f1d3\";\n}\n.fa-hacker-news:before {\n  content: \"\\f1d4\";\n}\n.fa-tencent-weibo:before {\n  content: \"\\f1d5\";\n}\n.fa-qq:before {\n  content: \"\\f1d6\";\n}\n.fa-wechat:before,\n.fa-weixin:before {\n  content: \"\\f1d7\";\n}\n.fa-send:before,\n.fa-paper-plane:before {\n  content: \"\\f1d8\";\n}\n.fa-send-o:before,\n.fa-paper-plane-o:before {\n  content: \"\\f1d9\";\n}\n.fa-history:before {\n  content: \"\\f1da\";\n}\n.fa-genderless:before,\n.fa-circle-thin:before {\n  content: \"\\f1db\";\n}\n.fa-header:before {\n  content: \"\\f1dc\";\n}\n.fa-paragraph:before {\n  content: \"\\f1dd\";\n}\n.fa-sliders:before {\n  content: \"\\f1de\";\n}\n.fa-share-alt:before {\n  content: \"\\f1e0\";\n}\n.fa-share-alt-square:before {\n  content: \"\\f1e1\";\n}\n.fa-bomb:before {\n  content: \"\\f1e2\";\n}\n.fa-soccer-ball-o:before,\n.fa-futbol-o:before {\n  content: \"\\f1e3\";\n}\n.fa-tty:before {\n  content: \"\\f1e4\";\n}\n.fa-binoculars:before {\n  content: \"\\f1e5\";\n}\n.fa-plug:before {\n  content: \"\\f1e6\";\n}\n.fa-slideshare:before {\n  content: \"\\f1e7\";\n}\n.fa-twitch:before {\n  content: \"\\f1e8\";\n}\n.fa-yelp:before {\n  content: \"\\f1e9\";\n}\n.fa-newspaper-o:before {\n  content: \"\\f1ea\";\n}\n.fa-wifi:before {\n  content: \"\\f1eb\";\n}\n.fa-calculator:before {\n  content: \"\\f1ec\";\n}\n.fa-paypal:before {\n  content: \"\\f1ed\";\n}\n.fa-google-wallet:before {\n  content: \"\\f1ee\";\n}\n.fa-cc-visa:before {\n  content: \"\\f1f0\";\n}\n.fa-cc-mastercard:before {\n  content: \"\\f1f1\";\n}\n.fa-cc-discover:before {\n  content: \"\\f1f2\";\n}\n.fa-cc-amex:before {\n  content: \"\\f1f3\";\n}\n.fa-cc-paypal:before {\n  content: \"\\f1f4\";\n}\n.fa-cc-stripe:before {\n  content: \"\\f1f5\";\n}\n.fa-bell-slash:before {\n  content: \"\\f1f6\";\n}\n.fa-bell-slash-o:before {\n  content: \"\\f1f7\";\n}\n.fa-trash:before {\n  content: \"\\f1f8\";\n}\n.fa-copyright:before {\n  content: \"\\f1f9\";\n}\n.fa-at:before {\n  content: \"\\f1fa\";\n}\n.fa-eyedropper:before {\n  content: \"\\f1fb\";\n}\n.fa-paint-brush:before {\n  content: \"\\f1fc\";\n}\n.fa-birthday-cake:before {\n  content: \"\\f1fd\";\n}\n.fa-area-chart:before {\n  content: \"\\f1fe\";\n}\n.fa-pie-chart:before {\n  content: \"\\f200\";\n}\n.fa-line-chart:before {\n  content: \"\\f201\";\n}\n.fa-lastfm:before {\n  content: \"\\f202\";\n}\n.fa-lastfm-square:before {\n  content: \"\\f203\";\n}\n.fa-toggle-off:before {\n  content: \"\\f204\";\n}\n.fa-toggle-on:before {\n  content: \"\\f205\";\n}\n.fa-bicycle:before {\n  content: \"\\f206\";\n}\n.fa-bus:before {\n  content: \"\\f207\";\n}\n.fa-ioxhost:before {\n  content: \"\\f208\";\n}\n.fa-angellist:before {\n  content: \"\\f209\";\n}\n.fa-cc:before {\n  content: \"\\f20a\";\n}\n.fa-shekel:before,\n.fa-sheqel:before,\n.fa-ils:before {\n  content: \"\\f20b\";\n}\n.fa-meanpath:before {\n  content: \"\\f20c\";\n}\n.fa-buysellads:before {\n  content: \"\\f20d\";\n}\n.fa-connectdevelop:before {\n  content: \"\\f20e\";\n}\n.fa-dashcube:before {\n  content: \"\\f210\";\n}\n.fa-forumbee:before {\n  content: \"\\f211\";\n}\n.fa-leanpub:before {\n  content: \"\\f212\";\n}\n.fa-sellsy:before {\n  content: \"\\f213\";\n}\n.fa-shirtsinbulk:before {\n  content: \"\\f214\";\n}\n.fa-simplybuilt:before {\n  content: \"\\f215\";\n}\n.fa-skyatlas:before {\n  content: \"\\f216\";\n}\n.fa-cart-plus:before {\n  content: \"\\f217\";\n}\n.fa-cart-arrow-down:before {\n  content: \"\\f218\";\n}\n.fa-diamond:before {\n  content: \"\\f219\";\n}\n.fa-ship:before {\n  content: \"\\f21a\";\n}\n.fa-user-secret:before {\n  content: \"\\f21b\";\n}\n.fa-motorcycle:before {\n  content: \"\\f21c\";\n}\n.fa-street-view:before {\n  content: \"\\f21d\";\n}\n.fa-heartbeat:before {\n  content: \"\\f21e\";\n}\n.fa-venus:before {\n  content: \"\\f221\";\n}\n.fa-mars:before {\n  content: \"\\f222\";\n}\n.fa-mercury:before {\n  content: \"\\f223\";\n}\n.fa-transgender:before {\n  content: \"\\f224\";\n}\n.fa-transgender-alt:before {\n  content: \"\\f225\";\n}\n.fa-venus-double:before {\n  content: \"\\f226\";\n}\n.fa-mars-double:before {\n  content: \"\\f227\";\n}\n.fa-venus-mars:before {\n  content: \"\\f228\";\n}\n.fa-mars-stroke:before {\n  content: \"\\f229\";\n}\n.fa-mars-stroke-v:before {\n  content: \"\\f22a\";\n}\n.fa-mars-stroke-h:before {\n  content: \"\\f22b\";\n}\n.fa-neuter:before {\n  content: \"\\f22c\";\n}\n.fa-facebook-official:before {\n  content: \"\\f230\";\n}\n.fa-pinterest-p:before {\n  content: \"\\f231\";\n}\n.fa-whatsapp:before {\n  content: \"\\f232\";\n}\n.fa-server:before {\n  content: \"\\f233\";\n}\n.fa-user-plus:before {\n  content: \"\\f234\";\n}\n.fa-user-times:before {\n  content: \"\\f235\";\n}\n.fa-hotel:before,\n.fa-bed:before {\n  content: \"\\f236\";\n}\n.fa-viacoin:before {\n  content: \"\\f237\";\n}\n.fa-train:before {\n  content: \"\\f238\";\n}\n.fa-subway:before {\n  content: \"\\f239\";\n}\n.fa-medium:before {\n  content: \"\\f23a\";\n}\n"
  },
  {
    "path": "website/font-awesome/less/animated.less",
    "content": "// Animated Icons\n// --------------------------\n\n.@{fa-css-prefix}-spin {\n  -webkit-animation: fa-spin 2s infinite linear;\n          animation: fa-spin 2s infinite linear;\n}\n\n.@{fa-css-prefix}-pulse {\n  -webkit-animation: fa-spin 1s infinite steps(8);\n          animation: fa-spin 1s infinite steps(8);\n}\n\n@-webkit-keyframes fa-spin {\n  0% {\n    -webkit-transform: rotate(0deg);\n            transform: rotate(0deg);\n  }\n  100% {\n    -webkit-transform: rotate(359deg);\n            transform: rotate(359deg);\n  }\n}\n\n@keyframes fa-spin {\n  0% {\n    -webkit-transform: rotate(0deg);\n            transform: rotate(0deg);\n  }\n  100% {\n    -webkit-transform: rotate(359deg);\n            transform: rotate(359deg);\n  }\n}\n"
  },
  {
    "path": "website/font-awesome/less/bordered-pulled.less",
    "content": "// Bordered & Pulled\n// -------------------------\n\n.@{fa-css-prefix}-border {\n  padding: .2em .25em .15em;\n  border: solid .08em @fa-border-color;\n  border-radius: .1em;\n}\n\n.pull-right { float: right; }\n.pull-left { float: left; }\n\n.@{fa-css-prefix} {\n  &.pull-left { margin-right: .3em; }\n  &.pull-right { margin-left: .3em; }\n}\n"
  },
  {
    "path": "website/font-awesome/less/core.less",
    "content": "// Base Class Definition\n// -------------------------\n\n.@{fa-css-prefix} {\n  display: inline-block;\n  font: normal normal normal @fa-font-size-base/1 FontAwesome; // shortening font declaration\n  font-size: inherit; // can't have font-size inherit on line above, so need to override\n  text-rendering: auto; // optimizelegibility throws things off #1094\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n  transform: translate(0, 0); // ensures no half-pixel rendering in firefox\n\n}\n"
  },
  {
    "path": "website/font-awesome/less/fixed-width.less",
    "content": "// Fixed Width Icons\n// -------------------------\n.@{fa-css-prefix}-fw {\n  width: (18em / 14);\n  text-align: center;\n}\n"
  },
  {
    "path": "website/font-awesome/less/font-awesome.less",
    "content": "/*!\n *  Font Awesome 4.3.0 by @davegandy - http://fontawesome.io - @fontawesome\n *  License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)\n */\n\n@import \"variables.less\";\n@import \"mixins.less\";\n@import \"path.less\";\n@import \"core.less\";\n@import \"larger.less\";\n@import \"fixed-width.less\";\n@import \"list.less\";\n@import \"bordered-pulled.less\";\n@import \"animated.less\";\n@import \"rotated-flipped.less\";\n@import \"stacked.less\";\n@import \"icons.less\";\n"
  },
  {
    "path": "website/font-awesome/less/icons.less",
    "content": "/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen\n   readers do not read off random characters that represent icons */\n\n.@{fa-css-prefix}-glass:before { content: @fa-var-glass; }\n.@{fa-css-prefix}-music:before { content: @fa-var-music; }\n.@{fa-css-prefix}-search:before { content: @fa-var-search; }\n.@{fa-css-prefix}-envelope-o:before { content: @fa-var-envelope-o; }\n.@{fa-css-prefix}-heart:before { content: @fa-var-heart; }\n.@{fa-css-prefix}-star:before { content: @fa-var-star; }\n.@{fa-css-prefix}-star-o:before { content: @fa-var-star-o; }\n.@{fa-css-prefix}-user:before { content: @fa-var-user; }\n.@{fa-css-prefix}-film:before { content: @fa-var-film; }\n.@{fa-css-prefix}-th-large:before { content: @fa-var-th-large; }\n.@{fa-css-prefix}-th:before { content: @fa-var-th; }\n.@{fa-css-prefix}-th-list:before { content: @fa-var-th-list; }\n.@{fa-css-prefix}-check:before { content: @fa-var-check; }\n.@{fa-css-prefix}-remove:before,\n.@{fa-css-prefix}-close:before,\n.@{fa-css-prefix}-times:before { content: @fa-var-times; }\n.@{fa-css-prefix}-search-plus:before { content: @fa-var-search-plus; }\n.@{fa-css-prefix}-search-minus:before { content: @fa-var-search-minus; }\n.@{fa-css-prefix}-power-off:before { content: @fa-var-power-off; }\n.@{fa-css-prefix}-signal:before { content: @fa-var-signal; }\n.@{fa-css-prefix}-gear:before,\n.@{fa-css-prefix}-cog:before { content: @fa-var-cog; }\n.@{fa-css-prefix}-trash-o:before { content: @fa-var-trash-o; }\n.@{fa-css-prefix}-home:before { content: @fa-var-home; }\n.@{fa-css-prefix}-file-o:before { content: @fa-var-file-o; }\n.@{fa-css-prefix}-clock-o:before { content: @fa-var-clock-o; }\n.@{fa-css-prefix}-road:before { content: @fa-var-road; }\n.@{fa-css-prefix}-download:before { content: @fa-var-download; }\n.@{fa-css-prefix}-arrow-circle-o-down:before { content: @fa-var-arrow-circle-o-down; }\n.@{fa-css-prefix}-arrow-circle-o-up:before { content: @fa-var-arrow-circle-o-up; }\n.@{fa-css-prefix}-inbox:before { content: @fa-var-inbox; }\n.@{fa-css-prefix}-play-circle-o:before { content: @fa-var-play-circle-o; }\n.@{fa-css-prefix}-rotate-right:before,\n.@{fa-css-prefix}-repeat:before { content: @fa-var-repeat; }\n.@{fa-css-prefix}-refresh:before { content: @fa-var-refresh; }\n.@{fa-css-prefix}-list-alt:before { content: @fa-var-list-alt; }\n.@{fa-css-prefix}-lock:before { content: @fa-var-lock; }\n.@{fa-css-prefix}-flag:before { content: @fa-var-flag; }\n.@{fa-css-prefix}-headphones:before { content: @fa-var-headphones; }\n.@{fa-css-prefix}-volume-off:before { content: @fa-var-volume-off; }\n.@{fa-css-prefix}-volume-down:before { content: @fa-var-volume-down; }\n.@{fa-css-prefix}-volume-up:before { content: @fa-var-volume-up; }\n.@{fa-css-prefix}-qrcode:before { content: @fa-var-qrcode; }\n.@{fa-css-prefix}-barcode:before { content: @fa-var-barcode; }\n.@{fa-css-prefix}-tag:before { content: @fa-var-tag; }\n.@{fa-css-prefix}-tags:before { content: @fa-var-tags; }\n.@{fa-css-prefix}-book:before { content: @fa-var-book; }\n.@{fa-css-prefix}-bookmark:before { content: @fa-var-bookmark; }\n.@{fa-css-prefix}-print:before { content: @fa-var-print; }\n.@{fa-css-prefix}-camera:before { content: @fa-var-camera; }\n.@{fa-css-prefix}-font:before { content: @fa-var-font; }\n.@{fa-css-prefix}-bold:before { content: @fa-var-bold; }\n.@{fa-css-prefix}-italic:before { content: @fa-var-italic; }\n.@{fa-css-prefix}-text-height:before { content: @fa-var-text-height; }\n.@{fa-css-prefix}-text-width:before { content: @fa-var-text-width; }\n.@{fa-css-prefix}-align-left:before { content: @fa-var-align-left; }\n.@{fa-css-prefix}-align-center:before { content: @fa-var-align-center; }\n.@{fa-css-prefix}-align-right:before { content: @fa-var-align-right; }\n.@{fa-css-prefix}-align-justify:before { content: @fa-var-align-justify; }\n.@{fa-css-prefix}-list:before { content: @fa-var-list; }\n.@{fa-css-prefix}-dedent:before,\n.@{fa-css-prefix}-outdent:before { content: @fa-var-outdent; }\n.@{fa-css-prefix}-indent:before { content: @fa-var-indent; }\n.@{fa-css-prefix}-video-camera:before { content: @fa-var-video-camera; }\n.@{fa-css-prefix}-photo:before,\n.@{fa-css-prefix}-image:before,\n.@{fa-css-prefix}-picture-o:before { content: @fa-var-picture-o; }\n.@{fa-css-prefix}-pencil:before { content: @fa-var-pencil; }\n.@{fa-css-prefix}-map-marker:before { content: @fa-var-map-marker; }\n.@{fa-css-prefix}-adjust:before { content: @fa-var-adjust; }\n.@{fa-css-prefix}-tint:before { content: @fa-var-tint; }\n.@{fa-css-prefix}-edit:before,\n.@{fa-css-prefix}-pencil-square-o:before { content: @fa-var-pencil-square-o; }\n.@{fa-css-prefix}-share-square-o:before { content: @fa-var-share-square-o; }\n.@{fa-css-prefix}-check-square-o:before { content: @fa-var-check-square-o; }\n.@{fa-css-prefix}-arrows:before { content: @fa-var-arrows; }\n.@{fa-css-prefix}-step-backward:before { content: @fa-var-step-backward; }\n.@{fa-css-prefix}-fast-backward:before { content: @fa-var-fast-backward; }\n.@{fa-css-prefix}-backward:before { content: @fa-var-backward; }\n.@{fa-css-prefix}-play:before { content: @fa-var-play; }\n.@{fa-css-prefix}-pause:before { content: @fa-var-pause; }\n.@{fa-css-prefix}-stop:before { content: @fa-var-stop; }\n.@{fa-css-prefix}-forward:before { content: @fa-var-forward; }\n.@{fa-css-prefix}-fast-forward:before { content: @fa-var-fast-forward; }\n.@{fa-css-prefix}-step-forward:before { content: @fa-var-step-forward; }\n.@{fa-css-prefix}-eject:before { content: @fa-var-eject; }\n.@{fa-css-prefix}-chevron-left:before { content: @fa-var-chevron-left; }\n.@{fa-css-prefix}-chevron-right:before { content: @fa-var-chevron-right; }\n.@{fa-css-prefix}-plus-circle:before { content: @fa-var-plus-circle; }\n.@{fa-css-prefix}-minus-circle:before { content: @fa-var-minus-circle; }\n.@{fa-css-prefix}-times-circle:before { content: @fa-var-times-circle; }\n.@{fa-css-prefix}-check-circle:before { content: @fa-var-check-circle; }\n.@{fa-css-prefix}-question-circle:before { content: @fa-var-question-circle; }\n.@{fa-css-prefix}-info-circle:before { content: @fa-var-info-circle; }\n.@{fa-css-prefix}-crosshairs:before { content: @fa-var-crosshairs; }\n.@{fa-css-prefix}-times-circle-o:before { content: @fa-var-times-circle-o; }\n.@{fa-css-prefix}-check-circle-o:before { content: @fa-var-check-circle-o; }\n.@{fa-css-prefix}-ban:before { content: @fa-var-ban; }\n.@{fa-css-prefix}-arrow-left:before { content: @fa-var-arrow-left; }\n.@{fa-css-prefix}-arrow-right:before { content: @fa-var-arrow-right; }\n.@{fa-css-prefix}-arrow-up:before { content: @fa-var-arrow-up; }\n.@{fa-css-prefix}-arrow-down:before { content: @fa-var-arrow-down; }\n.@{fa-css-prefix}-mail-forward:before,\n.@{fa-css-prefix}-share:before { content: @fa-var-share; }\n.@{fa-css-prefix}-expand:before { content: @fa-var-expand; }\n.@{fa-css-prefix}-compress:before { content: @fa-var-compress; }\n.@{fa-css-prefix}-plus:before { content: @fa-var-plus; }\n.@{fa-css-prefix}-minus:before { content: @fa-var-minus; }\n.@{fa-css-prefix}-asterisk:before { content: @fa-var-asterisk; }\n.@{fa-css-prefix}-exclamation-circle:before { content: @fa-var-exclamation-circle; }\n.@{fa-css-prefix}-gift:before { content: @fa-var-gift; }\n.@{fa-css-prefix}-leaf:before { content: @fa-var-leaf; }\n.@{fa-css-prefix}-fire:before { content: @fa-var-fire; }\n.@{fa-css-prefix}-eye:before { content: @fa-var-eye; }\n.@{fa-css-prefix}-eye-slash:before { content: @fa-var-eye-slash; }\n.@{fa-css-prefix}-warning:before,\n.@{fa-css-prefix}-exclamation-triangle:before { content: @fa-var-exclamation-triangle; }\n.@{fa-css-prefix}-plane:before { content: @fa-var-plane; }\n.@{fa-css-prefix}-calendar:before { content: @fa-var-calendar; }\n.@{fa-css-prefix}-random:before { content: @fa-var-random; }\n.@{fa-css-prefix}-comment:before { content: @fa-var-comment; }\n.@{fa-css-prefix}-magnet:before { content: @fa-var-magnet; }\n.@{fa-css-prefix}-chevron-up:before { content: @fa-var-chevron-up; }\n.@{fa-css-prefix}-chevron-down:before { content: @fa-var-chevron-down; }\n.@{fa-css-prefix}-retweet:before { content: @fa-var-retweet; }\n.@{fa-css-prefix}-shopping-cart:before { content: @fa-var-shopping-cart; }\n.@{fa-css-prefix}-folder:before { content: @fa-var-folder; }\n.@{fa-css-prefix}-folder-open:before { content: @fa-var-folder-open; }\n.@{fa-css-prefix}-arrows-v:before { content: @fa-var-arrows-v; }\n.@{fa-css-prefix}-arrows-h:before { content: @fa-var-arrows-h; }\n.@{fa-css-prefix}-bar-chart-o:before,\n.@{fa-css-prefix}-bar-chart:before { content: @fa-var-bar-chart; }\n.@{fa-css-prefix}-twitter-square:before { content: @fa-var-twitter-square; }\n.@{fa-css-prefix}-facebook-square:before { content: @fa-var-facebook-square; }\n.@{fa-css-prefix}-camera-retro:before { content: @fa-var-camera-retro; }\n.@{fa-css-prefix}-key:before { content: @fa-var-key; }\n.@{fa-css-prefix}-gears:before,\n.@{fa-css-prefix}-cogs:before { content: @fa-var-cogs; }\n.@{fa-css-prefix}-comments:before { content: @fa-var-comments; }\n.@{fa-css-prefix}-thumbs-o-up:before { content: @fa-var-thumbs-o-up; }\n.@{fa-css-prefix}-thumbs-o-down:before { content: @fa-var-thumbs-o-down; }\n.@{fa-css-prefix}-star-half:before { content: @fa-var-star-half; }\n.@{fa-css-prefix}-heart-o:before { content: @fa-var-heart-o; }\n.@{fa-css-prefix}-sign-out:before { content: @fa-var-sign-out; }\n.@{fa-css-prefix}-linkedin-square:before { content: @fa-var-linkedin-square; }\n.@{fa-css-prefix}-thumb-tack:before { content: @fa-var-thumb-tack; }\n.@{fa-css-prefix}-external-link:before { content: @fa-var-external-link; }\n.@{fa-css-prefix}-sign-in:before { content: @fa-var-sign-in; }\n.@{fa-css-prefix}-trophy:before { content: @fa-var-trophy; }\n.@{fa-css-prefix}-github-square:before { content: @fa-var-github-square; }\n.@{fa-css-prefix}-upload:before { content: @fa-var-upload; }\n.@{fa-css-prefix}-lemon-o:before { content: @fa-var-lemon-o; }\n.@{fa-css-prefix}-phone:before { content: @fa-var-phone; }\n.@{fa-css-prefix}-square-o:before { content: @fa-var-square-o; }\n.@{fa-css-prefix}-bookmark-o:before { content: @fa-var-bookmark-o; }\n.@{fa-css-prefix}-phone-square:before { content: @fa-var-phone-square; }\n.@{fa-css-prefix}-twitter:before { content: @fa-var-twitter; }\n.@{fa-css-prefix}-facebook-f:before,\n.@{fa-css-prefix}-facebook:before { content: @fa-var-facebook; }\n.@{fa-css-prefix}-github:before { content: @fa-var-github; }\n.@{fa-css-prefix}-unlock:before { content: @fa-var-unlock; }\n.@{fa-css-prefix}-credit-card:before { content: @fa-var-credit-card; }\n.@{fa-css-prefix}-rss:before { content: @fa-var-rss; }\n.@{fa-css-prefix}-hdd-o:before { content: @fa-var-hdd-o; }\n.@{fa-css-prefix}-bullhorn:before { content: @fa-var-bullhorn; }\n.@{fa-css-prefix}-bell:before { content: @fa-var-bell; }\n.@{fa-css-prefix}-certificate:before { content: @fa-var-certificate; }\n.@{fa-css-prefix}-hand-o-right:before { content: @fa-var-hand-o-right; }\n.@{fa-css-prefix}-hand-o-left:before { content: @fa-var-hand-o-left; }\n.@{fa-css-prefix}-hand-o-up:before { content: @fa-var-hand-o-up; }\n.@{fa-css-prefix}-hand-o-down:before { content: @fa-var-hand-o-down; }\n.@{fa-css-prefix}-arrow-circle-left:before { content: @fa-var-arrow-circle-left; }\n.@{fa-css-prefix}-arrow-circle-right:before { content: @fa-var-arrow-circle-right; }\n.@{fa-css-prefix}-arrow-circle-up:before { content: @fa-var-arrow-circle-up; }\n.@{fa-css-prefix}-arrow-circle-down:before { content: @fa-var-arrow-circle-down; }\n.@{fa-css-prefix}-globe:before { content: @fa-var-globe; }\n.@{fa-css-prefix}-wrench:before { content: @fa-var-wrench; }\n.@{fa-css-prefix}-tasks:before { content: @fa-var-tasks; }\n.@{fa-css-prefix}-filter:before { content: @fa-var-filter; }\n.@{fa-css-prefix}-briefcase:before { content: @fa-var-briefcase; }\n.@{fa-css-prefix}-arrows-alt:before { content: @fa-var-arrows-alt; }\n.@{fa-css-prefix}-group:before,\n.@{fa-css-prefix}-users:before { content: @fa-var-users; }\n.@{fa-css-prefix}-chain:before,\n.@{fa-css-prefix}-link:before { content: @fa-var-link; }\n.@{fa-css-prefix}-cloud:before { content: @fa-var-cloud; }\n.@{fa-css-prefix}-flask:before { content: @fa-var-flask; }\n.@{fa-css-prefix}-cut:before,\n.@{fa-css-prefix}-scissors:before { content: @fa-var-scissors; }\n.@{fa-css-prefix}-copy:before,\n.@{fa-css-prefix}-files-o:before { content: @fa-var-files-o; }\n.@{fa-css-prefix}-paperclip:before { content: @fa-var-paperclip; }\n.@{fa-css-prefix}-save:before,\n.@{fa-css-prefix}-floppy-o:before { content: @fa-var-floppy-o; }\n.@{fa-css-prefix}-square:before { content: @fa-var-square; }\n.@{fa-css-prefix}-navicon:before,\n.@{fa-css-prefix}-reorder:before,\n.@{fa-css-prefix}-bars:before { content: @fa-var-bars; }\n.@{fa-css-prefix}-list-ul:before { content: @fa-var-list-ul; }\n.@{fa-css-prefix}-list-ol:before { content: @fa-var-list-ol; }\n.@{fa-css-prefix}-strikethrough:before { content: @fa-var-strikethrough; }\n.@{fa-css-prefix}-underline:before { content: @fa-var-underline; }\n.@{fa-css-prefix}-table:before { content: @fa-var-table; }\n.@{fa-css-prefix}-magic:before { content: @fa-var-magic; }\n.@{fa-css-prefix}-truck:before { content: @fa-var-truck; }\n.@{fa-css-prefix}-pinterest:before { content: @fa-var-pinterest; }\n.@{fa-css-prefix}-pinterest-square:before { content: @fa-var-pinterest-square; }\n.@{fa-css-prefix}-google-plus-square:before { content: @fa-var-google-plus-square; }\n.@{fa-css-prefix}-google-plus:before { content: @fa-var-google-plus; }\n.@{fa-css-prefix}-money:before { content: @fa-var-money; }\n.@{fa-css-prefix}-caret-down:before { content: @fa-var-caret-down; }\n.@{fa-css-prefix}-caret-up:before { content: @fa-var-caret-up; }\n.@{fa-css-prefix}-caret-left:before { content: @fa-var-caret-left; }\n.@{fa-css-prefix}-caret-right:before { content: @fa-var-caret-right; }\n.@{fa-css-prefix}-columns:before { content: @fa-var-columns; }\n.@{fa-css-prefix}-unsorted:before,\n.@{fa-css-prefix}-sort:before { content: @fa-var-sort; }\n.@{fa-css-prefix}-sort-down:before,\n.@{fa-css-prefix}-sort-desc:before { content: @fa-var-sort-desc; }\n.@{fa-css-prefix}-sort-up:before,\n.@{fa-css-prefix}-sort-asc:before { content: @fa-var-sort-asc; }\n.@{fa-css-prefix}-envelope:before { content: @fa-var-envelope; }\n.@{fa-css-prefix}-linkedin:before { content: @fa-var-linkedin; }\n.@{fa-css-prefix}-rotate-left:before,\n.@{fa-css-prefix}-undo:before { content: @fa-var-undo; }\n.@{fa-css-prefix}-legal:before,\n.@{fa-css-prefix}-gavel:before { content: @fa-var-gavel; }\n.@{fa-css-prefix}-dashboard:before,\n.@{fa-css-prefix}-tachometer:before { content: @fa-var-tachometer; }\n.@{fa-css-prefix}-comment-o:before { content: @fa-var-comment-o; }\n.@{fa-css-prefix}-comments-o:before { content: @fa-var-comments-o; }\n.@{fa-css-prefix}-flash:before,\n.@{fa-css-prefix}-bolt:before { content: @fa-var-bolt; }\n.@{fa-css-prefix}-sitemap:before { content: @fa-var-sitemap; }\n.@{fa-css-prefix}-umbrella:before { content: @fa-var-umbrella; }\n.@{fa-css-prefix}-paste:before,\n.@{fa-css-prefix}-clipboard:before { content: @fa-var-clipboard; }\n.@{fa-css-prefix}-lightbulb-o:before { content: @fa-var-lightbulb-o; }\n.@{fa-css-prefix}-exchange:before { content: @fa-var-exchange; }\n.@{fa-css-prefix}-cloud-download:before { content: @fa-var-cloud-download; }\n.@{fa-css-prefix}-cloud-upload:before { content: @fa-var-cloud-upload; }\n.@{fa-css-prefix}-user-md:before { content: @fa-var-user-md; }\n.@{fa-css-prefix}-stethoscope:before { content: @fa-var-stethoscope; }\n.@{fa-css-prefix}-suitcase:before { content: @fa-var-suitcase; }\n.@{fa-css-prefix}-bell-o:before { content: @fa-var-bell-o; }\n.@{fa-css-prefix}-coffee:before { content: @fa-var-coffee; }\n.@{fa-css-prefix}-cutlery:before { content: @fa-var-cutlery; }\n.@{fa-css-prefix}-file-text-o:before { content: @fa-var-file-text-o; }\n.@{fa-css-prefix}-building-o:before { content: @fa-var-building-o; }\n.@{fa-css-prefix}-hospital-o:before { content: @fa-var-hospital-o; }\n.@{fa-css-prefix}-ambulance:before { content: @fa-var-ambulance; }\n.@{fa-css-prefix}-medkit:before { content: @fa-var-medkit; }\n.@{fa-css-prefix}-fighter-jet:before { content: @fa-var-fighter-jet; }\n.@{fa-css-prefix}-beer:before { content: @fa-var-beer; }\n.@{fa-css-prefix}-h-square:before { content: @fa-var-h-square; }\n.@{fa-css-prefix}-plus-square:before { content: @fa-var-plus-square; }\n.@{fa-css-prefix}-angle-double-left:before { content: @fa-var-angle-double-left; }\n.@{fa-css-prefix}-angle-double-right:before { content: @fa-var-angle-double-right; }\n.@{fa-css-prefix}-angle-double-up:before { content: @fa-var-angle-double-up; }\n.@{fa-css-prefix}-angle-double-down:before { content: @fa-var-angle-double-down; }\n.@{fa-css-prefix}-angle-left:before { content: @fa-var-angle-left; }\n.@{fa-css-prefix}-angle-right:before { content: @fa-var-angle-right; }\n.@{fa-css-prefix}-angle-up:before { content: @fa-var-angle-up; }\n.@{fa-css-prefix}-angle-down:before { content: @fa-var-angle-down; }\n.@{fa-css-prefix}-desktop:before { content: @fa-var-desktop; }\n.@{fa-css-prefix}-laptop:before { content: @fa-var-laptop; }\n.@{fa-css-prefix}-tablet:before { content: @fa-var-tablet; }\n.@{fa-css-prefix}-mobile-phone:before,\n.@{fa-css-prefix}-mobile:before { content: @fa-var-mobile; }\n.@{fa-css-prefix}-circle-o:before { content: @fa-var-circle-o; }\n.@{fa-css-prefix}-quote-left:before { content: @fa-var-quote-left; }\n.@{fa-css-prefix}-quote-right:before { content: @fa-var-quote-right; }\n.@{fa-css-prefix}-spinner:before { content: @fa-var-spinner; }\n.@{fa-css-prefix}-circle:before { content: @fa-var-circle; }\n.@{fa-css-prefix}-mail-reply:before,\n.@{fa-css-prefix}-reply:before { content: @fa-var-reply; }\n.@{fa-css-prefix}-github-alt:before { content: @fa-var-github-alt; }\n.@{fa-css-prefix}-folder-o:before { content: @fa-var-folder-o; }\n.@{fa-css-prefix}-folder-open-o:before { content: @fa-var-folder-open-o; }\n.@{fa-css-prefix}-smile-o:before { content: @fa-var-smile-o; }\n.@{fa-css-prefix}-frown-o:before { content: @fa-var-frown-o; }\n.@{fa-css-prefix}-meh-o:before { content: @fa-var-meh-o; }\n.@{fa-css-prefix}-gamepad:before { content: @fa-var-gamepad; }\n.@{fa-css-prefix}-keyboard-o:before { content: @fa-var-keyboard-o; }\n.@{fa-css-prefix}-flag-o:before { content: @fa-var-flag-o; }\n.@{fa-css-prefix}-flag-checkered:before { content: @fa-var-flag-checkered; }\n.@{fa-css-prefix}-terminal:before { content: @fa-var-terminal; }\n.@{fa-css-prefix}-code:before { content: @fa-var-code; }\n.@{fa-css-prefix}-mail-reply-all:before,\n.@{fa-css-prefix}-reply-all:before { content: @fa-var-reply-all; }\n.@{fa-css-prefix}-star-half-empty:before,\n.@{fa-css-prefix}-star-half-full:before,\n.@{fa-css-prefix}-star-half-o:before { content: @fa-var-star-half-o; }\n.@{fa-css-prefix}-location-arrow:before { content: @fa-var-location-arrow; }\n.@{fa-css-prefix}-crop:before { content: @fa-var-crop; }\n.@{fa-css-prefix}-code-fork:before { content: @fa-var-code-fork; }\n.@{fa-css-prefix}-unlink:before,\n.@{fa-css-prefix}-chain-broken:before { content: @fa-var-chain-broken; }\n.@{fa-css-prefix}-question:before { content: @fa-var-question; }\n.@{fa-css-prefix}-info:before { content: @fa-var-info; }\n.@{fa-css-prefix}-exclamation:before { content: @fa-var-exclamation; }\n.@{fa-css-prefix}-superscript:before { content: @fa-var-superscript; }\n.@{fa-css-prefix}-subscript:before { content: @fa-var-subscript; }\n.@{fa-css-prefix}-eraser:before { content: @fa-var-eraser; }\n.@{fa-css-prefix}-puzzle-piece:before { content: @fa-var-puzzle-piece; }\n.@{fa-css-prefix}-microphone:before { content: @fa-var-microphone; }\n.@{fa-css-prefix}-microphone-slash:before { content: @fa-var-microphone-slash; }\n.@{fa-css-prefix}-shield:before { content: @fa-var-shield; }\n.@{fa-css-prefix}-calendar-o:before { content: @fa-var-calendar-o; }\n.@{fa-css-prefix}-fire-extinguisher:before { content: @fa-var-fire-extinguisher; }\n.@{fa-css-prefix}-rocket:before { content: @fa-var-rocket; }\n.@{fa-css-prefix}-maxcdn:before { content: @fa-var-maxcdn; }\n.@{fa-css-prefix}-chevron-circle-left:before { content: @fa-var-chevron-circle-left; }\n.@{fa-css-prefix}-chevron-circle-right:before { content: @fa-var-chevron-circle-right; }\n.@{fa-css-prefix}-chevron-circle-up:before { content: @fa-var-chevron-circle-up; }\n.@{fa-css-prefix}-chevron-circle-down:before { content: @fa-var-chevron-circle-down; }\n.@{fa-css-prefix}-html5:before { content: @fa-var-html5; }\n.@{fa-css-prefix}-css3:before { content: @fa-var-css3; }\n.@{fa-css-prefix}-anchor:before { content: @fa-var-anchor; }\n.@{fa-css-prefix}-unlock-alt:before { content: @fa-var-unlock-alt; }\n.@{fa-css-prefix}-bullseye:before { content: @fa-var-bullseye; }\n.@{fa-css-prefix}-ellipsis-h:before { content: @fa-var-ellipsis-h; }\n.@{fa-css-prefix}-ellipsis-v:before { content: @fa-var-ellipsis-v; }\n.@{fa-css-prefix}-rss-square:before { content: @fa-var-rss-square; }\n.@{fa-css-prefix}-play-circle:before { content: @fa-var-play-circle; }\n.@{fa-css-prefix}-ticket:before { content: @fa-var-ticket; }\n.@{fa-css-prefix}-minus-square:before { content: @fa-var-minus-square; }\n.@{fa-css-prefix}-minus-square-o:before { content: @fa-var-minus-square-o; }\n.@{fa-css-prefix}-level-up:before { content: @fa-var-level-up; }\n.@{fa-css-prefix}-level-down:before { content: @fa-var-level-down; }\n.@{fa-css-prefix}-check-square:before { content: @fa-var-check-square; }\n.@{fa-css-prefix}-pencil-square:before { content: @fa-var-pencil-square; }\n.@{fa-css-prefix}-external-link-square:before { content: @fa-var-external-link-square; }\n.@{fa-css-prefix}-share-square:before { content: @fa-var-share-square; }\n.@{fa-css-prefix}-compass:before { content: @fa-var-compass; }\n.@{fa-css-prefix}-toggle-down:before,\n.@{fa-css-prefix}-caret-square-o-down:before { content: @fa-var-caret-square-o-down; }\n.@{fa-css-prefix}-toggle-up:before,\n.@{fa-css-prefix}-caret-square-o-up:before { content: @fa-var-caret-square-o-up; }\n.@{fa-css-prefix}-toggle-right:before,\n.@{fa-css-prefix}-caret-square-o-right:before { content: @fa-var-caret-square-o-right; }\n.@{fa-css-prefix}-euro:before,\n.@{fa-css-prefix}-eur:before { content: @fa-var-eur; }\n.@{fa-css-prefix}-gbp:before { content: @fa-var-gbp; }\n.@{fa-css-prefix}-dollar:before,\n.@{fa-css-prefix}-usd:before { content: @fa-var-usd; }\n.@{fa-css-prefix}-rupee:before,\n.@{fa-css-prefix}-inr:before { content: @fa-var-inr; }\n.@{fa-css-prefix}-cny:before,\n.@{fa-css-prefix}-rmb:before,\n.@{fa-css-prefix}-yen:before,\n.@{fa-css-prefix}-jpy:before { content: @fa-var-jpy; }\n.@{fa-css-prefix}-ruble:before,\n.@{fa-css-prefix}-rouble:before,\n.@{fa-css-prefix}-rub:before { content: @fa-var-rub; }\n.@{fa-css-prefix}-won:before,\n.@{fa-css-prefix}-krw:before { content: @fa-var-krw; }\n.@{fa-css-prefix}-bitcoin:before,\n.@{fa-css-prefix}-btc:before { content: @fa-var-btc; }\n.@{fa-css-prefix}-file:before { content: @fa-var-file; }\n.@{fa-css-prefix}-file-text:before { content: @fa-var-file-text; }\n.@{fa-css-prefix}-sort-alpha-asc:before { content: @fa-var-sort-alpha-asc; }\n.@{fa-css-prefix}-sort-alpha-desc:before { content: @fa-var-sort-alpha-desc; }\n.@{fa-css-prefix}-sort-amount-asc:before { content: @fa-var-sort-amount-asc; }\n.@{fa-css-prefix}-sort-amount-desc:before { content: @fa-var-sort-amount-desc; }\n.@{fa-css-prefix}-sort-numeric-asc:before { content: @fa-var-sort-numeric-asc; }\n.@{fa-css-prefix}-sort-numeric-desc:before { content: @fa-var-sort-numeric-desc; }\n.@{fa-css-prefix}-thumbs-up:before { content: @fa-var-thumbs-up; }\n.@{fa-css-prefix}-thumbs-down:before { content: @fa-var-thumbs-down; }\n.@{fa-css-prefix}-youtube-square:before { content: @fa-var-youtube-square; }\n.@{fa-css-prefix}-youtube:before { content: @fa-var-youtube; }\n.@{fa-css-prefix}-xing:before { content: @fa-var-xing; }\n.@{fa-css-prefix}-xing-square:before { content: @fa-var-xing-square; }\n.@{fa-css-prefix}-youtube-play:before { content: @fa-var-youtube-play; }\n.@{fa-css-prefix}-dropbox:before { content: @fa-var-dropbox; }\n.@{fa-css-prefix}-stack-overflow:before { content: @fa-var-stack-overflow; }\n.@{fa-css-prefix}-instagram:before { content: @fa-var-instagram; }\n.@{fa-css-prefix}-flickr:before { content: @fa-var-flickr; }\n.@{fa-css-prefix}-adn:before { content: @fa-var-adn; }\n.@{fa-css-prefix}-bitbucket:before { content: @fa-var-bitbucket; }\n.@{fa-css-prefix}-bitbucket-square:before { content: @fa-var-bitbucket-square; }\n.@{fa-css-prefix}-tumblr:before { content: @fa-var-tumblr; }\n.@{fa-css-prefix}-tumblr-square:before { content: @fa-var-tumblr-square; }\n.@{fa-css-prefix}-long-arrow-down:before { content: @fa-var-long-arrow-down; }\n.@{fa-css-prefix}-long-arrow-up:before { content: @fa-var-long-arrow-up; }\n.@{fa-css-prefix}-long-arrow-left:before { content: @fa-var-long-arrow-left; }\n.@{fa-css-prefix}-long-arrow-right:before { content: @fa-var-long-arrow-right; }\n.@{fa-css-prefix}-apple:before { content: @fa-var-apple; }\n.@{fa-css-prefix}-windows:before { content: @fa-var-windows; }\n.@{fa-css-prefix}-android:before { content: @fa-var-android; }\n.@{fa-css-prefix}-linux:before { content: @fa-var-linux; }\n.@{fa-css-prefix}-dribbble:before { content: @fa-var-dribbble; }\n.@{fa-css-prefix}-skype:before { content: @fa-var-skype; }\n.@{fa-css-prefix}-foursquare:before { content: @fa-var-foursquare; }\n.@{fa-css-prefix}-trello:before { content: @fa-var-trello; }\n.@{fa-css-prefix}-female:before { content: @fa-var-female; }\n.@{fa-css-prefix}-male:before { content: @fa-var-male; }\n.@{fa-css-prefix}-gittip:before,\n.@{fa-css-prefix}-gratipay:before { content: @fa-var-gratipay; }\n.@{fa-css-prefix}-sun-o:before { content: @fa-var-sun-o; }\n.@{fa-css-prefix}-moon-o:before { content: @fa-var-moon-o; }\n.@{fa-css-prefix}-archive:before { content: @fa-var-archive; }\n.@{fa-css-prefix}-bug:before { content: @fa-var-bug; }\n.@{fa-css-prefix}-vk:before { content: @fa-var-vk; }\n.@{fa-css-prefix}-weibo:before { content: @fa-var-weibo; }\n.@{fa-css-prefix}-renren:before { content: @fa-var-renren; }\n.@{fa-css-prefix}-pagelines:before { content: @fa-var-pagelines; }\n.@{fa-css-prefix}-stack-exchange:before { content: @fa-var-stack-exchange; }\n.@{fa-css-prefix}-arrow-circle-o-right:before { content: @fa-var-arrow-circle-o-right; }\n.@{fa-css-prefix}-arrow-circle-o-left:before { content: @fa-var-arrow-circle-o-left; }\n.@{fa-css-prefix}-toggle-left:before,\n.@{fa-css-prefix}-caret-square-o-left:before { content: @fa-var-caret-square-o-left; }\n.@{fa-css-prefix}-dot-circle-o:before { content: @fa-var-dot-circle-o; }\n.@{fa-css-prefix}-wheelchair:before { content: @fa-var-wheelchair; }\n.@{fa-css-prefix}-vimeo-square:before { content: @fa-var-vimeo-square; }\n.@{fa-css-prefix}-turkish-lira:before,\n.@{fa-css-prefix}-try:before { content: @fa-var-try; }\n.@{fa-css-prefix}-plus-square-o:before { content: @fa-var-plus-square-o; }\n.@{fa-css-prefix}-space-shuttle:before { content: @fa-var-space-shuttle; }\n.@{fa-css-prefix}-slack:before { content: @fa-var-slack; }\n.@{fa-css-prefix}-envelope-square:before { content: @fa-var-envelope-square; }\n.@{fa-css-prefix}-wordpress:before { content: @fa-var-wordpress; }\n.@{fa-css-prefix}-openid:before { content: @fa-var-openid; }\n.@{fa-css-prefix}-institution:before,\n.@{fa-css-prefix}-bank:before,\n.@{fa-css-prefix}-university:before { content: @fa-var-university; }\n.@{fa-css-prefix}-mortar-board:before,\n.@{fa-css-prefix}-graduation-cap:before { content: @fa-var-graduation-cap; }\n.@{fa-css-prefix}-yahoo:before { content: @fa-var-yahoo; }\n.@{fa-css-prefix}-google:before { content: @fa-var-google; }\n.@{fa-css-prefix}-reddit:before { content: @fa-var-reddit; }\n.@{fa-css-prefix}-reddit-square:before { content: @fa-var-reddit-square; }\n.@{fa-css-prefix}-stumbleupon-circle:before { content: @fa-var-stumbleupon-circle; }\n.@{fa-css-prefix}-stumbleupon:before { content: @fa-var-stumbleupon; }\n.@{fa-css-prefix}-delicious:before { content: @fa-var-delicious; }\n.@{fa-css-prefix}-digg:before { content: @fa-var-digg; }\n.@{fa-css-prefix}-pied-piper:before { content: @fa-var-pied-piper; }\n.@{fa-css-prefix}-pied-piper-alt:before { content: @fa-var-pied-piper-alt; }\n.@{fa-css-prefix}-drupal:before { content: @fa-var-drupal; }\n.@{fa-css-prefix}-joomla:before { content: @fa-var-joomla; }\n.@{fa-css-prefix}-language:before { content: @fa-var-language; }\n.@{fa-css-prefix}-fax:before { content: @fa-var-fax; }\n.@{fa-css-prefix}-building:before { content: @fa-var-building; }\n.@{fa-css-prefix}-child:before { content: @fa-var-child; }\n.@{fa-css-prefix}-paw:before { content: @fa-var-paw; }\n.@{fa-css-prefix}-spoon:before { content: @fa-var-spoon; }\n.@{fa-css-prefix}-cube:before { content: @fa-var-cube; }\n.@{fa-css-prefix}-cubes:before { content: @fa-var-cubes; }\n.@{fa-css-prefix}-behance:before { content: @fa-var-behance; }\n.@{fa-css-prefix}-behance-square:before { content: @fa-var-behance-square; }\n.@{fa-css-prefix}-steam:before { content: @fa-var-steam; }\n.@{fa-css-prefix}-steam-square:before { content: @fa-var-steam-square; }\n.@{fa-css-prefix}-recycle:before { content: @fa-var-recycle; }\n.@{fa-css-prefix}-automobile:before,\n.@{fa-css-prefix}-car:before { content: @fa-var-car; }\n.@{fa-css-prefix}-cab:before,\n.@{fa-css-prefix}-taxi:before { content: @fa-var-taxi; }\n.@{fa-css-prefix}-tree:before { content: @fa-var-tree; }\n.@{fa-css-prefix}-spotify:before { content: @fa-var-spotify; }\n.@{fa-css-prefix}-deviantart:before { content: @fa-var-deviantart; }\n.@{fa-css-prefix}-soundcloud:before { content: @fa-var-soundcloud; }\n.@{fa-css-prefix}-database:before { content: @fa-var-database; }\n.@{fa-css-prefix}-file-pdf-o:before { content: @fa-var-file-pdf-o; }\n.@{fa-css-prefix}-file-word-o:before { content: @fa-var-file-word-o; }\n.@{fa-css-prefix}-file-excel-o:before { content: @fa-var-file-excel-o; }\n.@{fa-css-prefix}-file-powerpoint-o:before { content: @fa-var-file-powerpoint-o; }\n.@{fa-css-prefix}-file-photo-o:before,\n.@{fa-css-prefix}-file-picture-o:before,\n.@{fa-css-prefix}-file-image-o:before { content: @fa-var-file-image-o; }\n.@{fa-css-prefix}-file-zip-o:before,\n.@{fa-css-prefix}-file-archive-o:before { content: @fa-var-file-archive-o; }\n.@{fa-css-prefix}-file-sound-o:before,\n.@{fa-css-prefix}-file-audio-o:before { content: @fa-var-file-audio-o; }\n.@{fa-css-prefix}-file-movie-o:before,\n.@{fa-css-prefix}-file-video-o:before { content: @fa-var-file-video-o; }\n.@{fa-css-prefix}-file-code-o:before { content: @fa-var-file-code-o; }\n.@{fa-css-prefix}-vine:before { content: @fa-var-vine; }\n.@{fa-css-prefix}-codepen:before { content: @fa-var-codepen; }\n.@{fa-css-prefix}-jsfiddle:before { content: @fa-var-jsfiddle; }\n.@{fa-css-prefix}-life-bouy:before,\n.@{fa-css-prefix}-life-buoy:before,\n.@{fa-css-prefix}-life-saver:before,\n.@{fa-css-prefix}-support:before,\n.@{fa-css-prefix}-life-ring:before { content: @fa-var-life-ring; }\n.@{fa-css-prefix}-circle-o-notch:before { content: @fa-var-circle-o-notch; }\n.@{fa-css-prefix}-ra:before,\n.@{fa-css-prefix}-rebel:before { content: @fa-var-rebel; }\n.@{fa-css-prefix}-ge:before,\n.@{fa-css-prefix}-empire:before { content: @fa-var-empire; }\n.@{fa-css-prefix}-git-square:before { content: @fa-var-git-square; }\n.@{fa-css-prefix}-git:before { content: @fa-var-git; }\n.@{fa-css-prefix}-hacker-news:before { content: @fa-var-hacker-news; }\n.@{fa-css-prefix}-tencent-weibo:before { content: @fa-var-tencent-weibo; }\n.@{fa-css-prefix}-qq:before { content: @fa-var-qq; }\n.@{fa-css-prefix}-wechat:before,\n.@{fa-css-prefix}-weixin:before { content: @fa-var-weixin; }\n.@{fa-css-prefix}-send:before,\n.@{fa-css-prefix}-paper-plane:before { content: @fa-var-paper-plane; }\n.@{fa-css-prefix}-send-o:before,\n.@{fa-css-prefix}-paper-plane-o:before { content: @fa-var-paper-plane-o; }\n.@{fa-css-prefix}-history:before { content: @fa-var-history; }\n.@{fa-css-prefix}-genderless:before,\n.@{fa-css-prefix}-circle-thin:before { content: @fa-var-circle-thin; }\n.@{fa-css-prefix}-header:before { content: @fa-var-header; }\n.@{fa-css-prefix}-paragraph:before { content: @fa-var-paragraph; }\n.@{fa-css-prefix}-sliders:before { content: @fa-var-sliders; }\n.@{fa-css-prefix}-share-alt:before { content: @fa-var-share-alt; }\n.@{fa-css-prefix}-share-alt-square:before { content: @fa-var-share-alt-square; }\n.@{fa-css-prefix}-bomb:before { content: @fa-var-bomb; }\n.@{fa-css-prefix}-soccer-ball-o:before,\n.@{fa-css-prefix}-futbol-o:before { content: @fa-var-futbol-o; }\n.@{fa-css-prefix}-tty:before { content: @fa-var-tty; }\n.@{fa-css-prefix}-binoculars:before { content: @fa-var-binoculars; }\n.@{fa-css-prefix}-plug:before { content: @fa-var-plug; }\n.@{fa-css-prefix}-slideshare:before { content: @fa-var-slideshare; }\n.@{fa-css-prefix}-twitch:before { content: @fa-var-twitch; }\n.@{fa-css-prefix}-yelp:before { content: @fa-var-yelp; }\n.@{fa-css-prefix}-newspaper-o:before { content: @fa-var-newspaper-o; }\n.@{fa-css-prefix}-wifi:before { content: @fa-var-wifi; }\n.@{fa-css-prefix}-calculator:before { content: @fa-var-calculator; }\n.@{fa-css-prefix}-paypal:before { content: @fa-var-paypal; }\n.@{fa-css-prefix}-google-wallet:before { content: @fa-var-google-wallet; }\n.@{fa-css-prefix}-cc-visa:before { content: @fa-var-cc-visa; }\n.@{fa-css-prefix}-cc-mastercard:before { content: @fa-var-cc-mastercard; }\n.@{fa-css-prefix}-cc-discover:before { content: @fa-var-cc-discover; }\n.@{fa-css-prefix}-cc-amex:before { content: @fa-var-cc-amex; }\n.@{fa-css-prefix}-cc-paypal:before { content: @fa-var-cc-paypal; }\n.@{fa-css-prefix}-cc-stripe:before { content: @fa-var-cc-stripe; }\n.@{fa-css-prefix}-bell-slash:before { content: @fa-var-bell-slash; }\n.@{fa-css-prefix}-bell-slash-o:before { content: @fa-var-bell-slash-o; }\n.@{fa-css-prefix}-trash:before { content: @fa-var-trash; }\n.@{fa-css-prefix}-copyright:before { content: @fa-var-copyright; }\n.@{fa-css-prefix}-at:before { content: @fa-var-at; }\n.@{fa-css-prefix}-eyedropper:before { content: @fa-var-eyedropper; }\n.@{fa-css-prefix}-paint-brush:before { content: @fa-var-paint-brush; }\n.@{fa-css-prefix}-birthday-cake:before { content: @fa-var-birthday-cake; }\n.@{fa-css-prefix}-area-chart:before { content: @fa-var-area-chart; }\n.@{fa-css-prefix}-pie-chart:before { content: @fa-var-pie-chart; }\n.@{fa-css-prefix}-line-chart:before { content: @fa-var-line-chart; }\n.@{fa-css-prefix}-lastfm:before { content: @fa-var-lastfm; }\n.@{fa-css-prefix}-lastfm-square:before { content: @fa-var-lastfm-square; }\n.@{fa-css-prefix}-toggle-off:before { content: @fa-var-toggle-off; }\n.@{fa-css-prefix}-toggle-on:before { content: @fa-var-toggle-on; }\n.@{fa-css-prefix}-bicycle:before { content: @fa-var-bicycle; }\n.@{fa-css-prefix}-bus:before { content: @fa-var-bus; }\n.@{fa-css-prefix}-ioxhost:before { content: @fa-var-ioxhost; }\n.@{fa-css-prefix}-angellist:before { content: @fa-var-angellist; }\n.@{fa-css-prefix}-cc:before { content: @fa-var-cc; }\n.@{fa-css-prefix}-shekel:before,\n.@{fa-css-prefix}-sheqel:before,\n.@{fa-css-prefix}-ils:before { content: @fa-var-ils; }\n.@{fa-css-prefix}-meanpath:before { content: @fa-var-meanpath; }\n.@{fa-css-prefix}-buysellads:before { content: @fa-var-buysellads; }\n.@{fa-css-prefix}-connectdevelop:before { content: @fa-var-connectdevelop; }\n.@{fa-css-prefix}-dashcube:before { content: @fa-var-dashcube; }\n.@{fa-css-prefix}-forumbee:before { content: @fa-var-forumbee; }\n.@{fa-css-prefix}-leanpub:before { content: @fa-var-leanpub; }\n.@{fa-css-prefix}-sellsy:before { content: @fa-var-sellsy; }\n.@{fa-css-prefix}-shirtsinbulk:before { content: @fa-var-shirtsinbulk; }\n.@{fa-css-prefix}-simplybuilt:before { content: @fa-var-simplybuilt; }\n.@{fa-css-prefix}-skyatlas:before { content: @fa-var-skyatlas; }\n.@{fa-css-prefix}-cart-plus:before { content: @fa-var-cart-plus; }\n.@{fa-css-prefix}-cart-arrow-down:before { content: @fa-var-cart-arrow-down; }\n.@{fa-css-prefix}-diamond:before { content: @fa-var-diamond; }\n.@{fa-css-prefix}-ship:before { content: @fa-var-ship; }\n.@{fa-css-prefix}-user-secret:before { content: @fa-var-user-secret; }\n.@{fa-css-prefix}-motorcycle:before { content: @fa-var-motorcycle; }\n.@{fa-css-prefix}-street-view:before { content: @fa-var-street-view; }\n.@{fa-css-prefix}-heartbeat:before { content: @fa-var-heartbeat; }\n.@{fa-css-prefix}-venus:before { content: @fa-var-venus; }\n.@{fa-css-prefix}-mars:before { content: @fa-var-mars; }\n.@{fa-css-prefix}-mercury:before { content: @fa-var-mercury; }\n.@{fa-css-prefix}-transgender:before { content: @fa-var-transgender; }\n.@{fa-css-prefix}-transgender-alt:before { content: @fa-var-transgender-alt; }\n.@{fa-css-prefix}-venus-double:before { content: @fa-var-venus-double; }\n.@{fa-css-prefix}-mars-double:before { content: @fa-var-mars-double; }\n.@{fa-css-prefix}-venus-mars:before { content: @fa-var-venus-mars; }\n.@{fa-css-prefix}-mars-stroke:before { content: @fa-var-mars-stroke; }\n.@{fa-css-prefix}-mars-stroke-v:before { content: @fa-var-mars-stroke-v; }\n.@{fa-css-prefix}-mars-stroke-h:before { content: @fa-var-mars-stroke-h; }\n.@{fa-css-prefix}-neuter:before { content: @fa-var-neuter; }\n.@{fa-css-prefix}-facebook-official:before { content: @fa-var-facebook-official; }\n.@{fa-css-prefix}-pinterest-p:before { content: @fa-var-pinterest-p; }\n.@{fa-css-prefix}-whatsapp:before { content: @fa-var-whatsapp; }\n.@{fa-css-prefix}-server:before { content: @fa-var-server; }\n.@{fa-css-prefix}-user-plus:before { content: @fa-var-user-plus; }\n.@{fa-css-prefix}-user-times:before { content: @fa-var-user-times; }\n.@{fa-css-prefix}-hotel:before,\n.@{fa-css-prefix}-bed:before { content: @fa-var-bed; }\n.@{fa-css-prefix}-viacoin:before { content: @fa-var-viacoin; }\n.@{fa-css-prefix}-train:before { content: @fa-var-train; }\n.@{fa-css-prefix}-subway:before { content: @fa-var-subway; }\n.@{fa-css-prefix}-medium:before { content: @fa-var-medium; }\n"
  },
  {
    "path": "website/font-awesome/less/larger.less",
    "content": "// Icon Sizes\n// -------------------------\n\n/* makes the font 33% larger relative to the icon container */\n.@{fa-css-prefix}-lg {\n  font-size: (4em / 3);\n  line-height: (3em / 4);\n  vertical-align: -15%;\n}\n.@{fa-css-prefix}-2x { font-size: 2em; }\n.@{fa-css-prefix}-3x { font-size: 3em; }\n.@{fa-css-prefix}-4x { font-size: 4em; }\n.@{fa-css-prefix}-5x { font-size: 5em; }\n"
  },
  {
    "path": "website/font-awesome/less/list.less",
    "content": "// List Icons\n// -------------------------\n\n.@{fa-css-prefix}-ul {\n  padding-left: 0;\n  margin-left: @fa-li-width;\n  list-style-type: none;\n  > li { position: relative; }\n}\n.@{fa-css-prefix}-li {\n  position: absolute;\n  left: -@fa-li-width;\n  width: @fa-li-width;\n  top: (2em / 14);\n  text-align: center;\n  &.@{fa-css-prefix}-lg {\n    left: (-@fa-li-width + (4em / 14));\n  }\n}\n"
  },
  {
    "path": "website/font-awesome/less/mixins.less",
    "content": "// Mixins\n// --------------------------\n\n.fa-icon() {\n  display: inline-block;\n  font: normal normal normal @fa-font-size-base/1 FontAwesome; // shortening font declaration\n  font-size: inherit; // can't have font-size inherit on line above, so need to override\n  text-rendering: auto; // optimizelegibility throws things off #1094\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n  transform: translate(0, 0); // ensures no half-pixel rendering in firefox\n\n}\n\n.fa-icon-rotate(@degrees, @rotation) {\n  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=@rotation);\n  -webkit-transform: rotate(@degrees);\n      -ms-transform: rotate(@degrees);\n          transform: rotate(@degrees);\n}\n\n.fa-icon-flip(@horiz, @vert, @rotation) {\n  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=@rotation, mirror=1);\n  -webkit-transform: scale(@horiz, @vert);\n      -ms-transform: scale(@horiz, @vert);\n          transform: scale(@horiz, @vert);\n}\n"
  },
  {
    "path": "website/font-awesome/less/path.less",
    "content": "/* FONT PATH\n * -------------------------- */\n\n@font-face {\n  font-family: 'FontAwesome';\n  src: url('@{fa-font-path}/fontawesome-webfont.eot?v=@{fa-version}');\n  src: url('@{fa-font-path}/fontawesome-webfont.eot?#iefix&v=@{fa-version}') format('embedded-opentype'),\n    url('@{fa-font-path}/fontawesome-webfont.woff2?v=@{fa-version}') format('woff2'),\n    url('@{fa-font-path}/fontawesome-webfont.woff?v=@{fa-version}') format('woff'),\n    url('@{fa-font-path}/fontawesome-webfont.ttf?v=@{fa-version}') format('truetype'),\n    url('@{fa-font-path}/fontawesome-webfont.svg?v=@{fa-version}#fontawesomeregular') format('svg');\n//  src: url('@{fa-font-path}/FontAwesome.otf') format('opentype'); // used when developing fonts\n  font-weight: normal;\n  font-style: normal;\n}\n"
  },
  {
    "path": "website/font-awesome/less/rotated-flipped.less",
    "content": "// Rotated & Flipped Icons\n// -------------------------\n\n.@{fa-css-prefix}-rotate-90  { .fa-icon-rotate(90deg, 1);  }\n.@{fa-css-prefix}-rotate-180 { .fa-icon-rotate(180deg, 2); }\n.@{fa-css-prefix}-rotate-270 { .fa-icon-rotate(270deg, 3); }\n\n.@{fa-css-prefix}-flip-horizontal { .fa-icon-flip(-1, 1, 0); }\n.@{fa-css-prefix}-flip-vertical   { .fa-icon-flip(1, -1, 2); }\n\n// Hook for IE8-9\n// -------------------------\n\n:root .@{fa-css-prefix}-rotate-90,\n:root .@{fa-css-prefix}-rotate-180,\n:root .@{fa-css-prefix}-rotate-270,\n:root .@{fa-css-prefix}-flip-horizontal,\n:root .@{fa-css-prefix}-flip-vertical {\n  filter: none;\n}\n"
  },
  {
    "path": "website/font-awesome/less/stacked.less",
    "content": "// Stacked Icons\n// -------------------------\n\n.@{fa-css-prefix}-stack {\n  position: relative;\n  display: inline-block;\n  width: 2em;\n  height: 2em;\n  line-height: 2em;\n  vertical-align: middle;\n}\n.@{fa-css-prefix}-stack-1x, .@{fa-css-prefix}-stack-2x {\n  position: absolute;\n  left: 0;\n  width: 100%;\n  text-align: center;\n}\n.@{fa-css-prefix}-stack-1x { line-height: inherit; }\n.@{fa-css-prefix}-stack-2x { font-size: 2em; }\n.@{fa-css-prefix}-inverse { color: @fa-inverse; }\n"
  },
  {
    "path": "website/font-awesome/less/variables.less",
    "content": "// Variables\n// --------------------------\n\n@fa-font-path:        \"../fonts\";\n@fa-font-size-base:   14px;\n//@fa-font-path:        \"//netdna.bootstrapcdn.com/font-awesome/4.3.0/fonts\"; // for referencing Bootstrap CDN font files directly\n@fa-css-prefix:       fa;\n@fa-version:          \"4.3.0\";\n@fa-border-color:     #eee;\n@fa-inverse:          #fff;\n@fa-li-width:         (30em / 14);\n\n@fa-var-adjust: \"\\f042\";\n@fa-var-adn: \"\\f170\";\n@fa-var-align-center: \"\\f037\";\n@fa-var-align-justify: \"\\f039\";\n@fa-var-align-left: \"\\f036\";\n@fa-var-align-right: \"\\f038\";\n@fa-var-ambulance: \"\\f0f9\";\n@fa-var-anchor: \"\\f13d\";\n@fa-var-android: \"\\f17b\";\n@fa-var-angellist: \"\\f209\";\n@fa-var-angle-double-down: \"\\f103\";\n@fa-var-angle-double-left: \"\\f100\";\n@fa-var-angle-double-right: \"\\f101\";\n@fa-var-angle-double-up: \"\\f102\";\n@fa-var-angle-down: \"\\f107\";\n@fa-var-angle-left: \"\\f104\";\n@fa-var-angle-right: \"\\f105\";\n@fa-var-angle-up: \"\\f106\";\n@fa-var-apple: \"\\f179\";\n@fa-var-archive: \"\\f187\";\n@fa-var-area-chart: \"\\f1fe\";\n@fa-var-arrow-circle-down: \"\\f0ab\";\n@fa-var-arrow-circle-left: \"\\f0a8\";\n@fa-var-arrow-circle-o-down: \"\\f01a\";\n@fa-var-arrow-circle-o-left: \"\\f190\";\n@fa-var-arrow-circle-o-right: \"\\f18e\";\n@fa-var-arrow-circle-o-up: \"\\f01b\";\n@fa-var-arrow-circle-right: \"\\f0a9\";\n@fa-var-arrow-circle-up: \"\\f0aa\";\n@fa-var-arrow-down: \"\\f063\";\n@fa-var-arrow-left: \"\\f060\";\n@fa-var-arrow-right: \"\\f061\";\n@fa-var-arrow-up: \"\\f062\";\n@fa-var-arrows: \"\\f047\";\n@fa-var-arrows-alt: \"\\f0b2\";\n@fa-var-arrows-h: \"\\f07e\";\n@fa-var-arrows-v: \"\\f07d\";\n@fa-var-asterisk: \"\\f069\";\n@fa-var-at: \"\\f1fa\";\n@fa-var-automobile: \"\\f1b9\";\n@fa-var-backward: \"\\f04a\";\n@fa-var-ban: \"\\f05e\";\n@fa-var-bank: \"\\f19c\";\n@fa-var-bar-chart: \"\\f080\";\n@fa-var-bar-chart-o: \"\\f080\";\n@fa-var-barcode: \"\\f02a\";\n@fa-var-bars: \"\\f0c9\";\n@fa-var-bed: \"\\f236\";\n@fa-var-beer: \"\\f0fc\";\n@fa-var-behance: \"\\f1b4\";\n@fa-var-behance-square: \"\\f1b5\";\n@fa-var-bell: \"\\f0f3\";\n@fa-var-bell-o: \"\\f0a2\";\n@fa-var-bell-slash: \"\\f1f6\";\n@fa-var-bell-slash-o: \"\\f1f7\";\n@fa-var-bicycle: \"\\f206\";\n@fa-var-binoculars: \"\\f1e5\";\n@fa-var-birthday-cake: \"\\f1fd\";\n@fa-var-bitbucket: \"\\f171\";\n@fa-var-bitbucket-square: \"\\f172\";\n@fa-var-bitcoin: \"\\f15a\";\n@fa-var-bold: \"\\f032\";\n@fa-var-bolt: \"\\f0e7\";\n@fa-var-bomb: \"\\f1e2\";\n@fa-var-book: \"\\f02d\";\n@fa-var-bookmark: \"\\f02e\";\n@fa-var-bookmark-o: \"\\f097\";\n@fa-var-briefcase: \"\\f0b1\";\n@fa-var-btc: \"\\f15a\";\n@fa-var-bug: \"\\f188\";\n@fa-var-building: \"\\f1ad\";\n@fa-var-building-o: \"\\f0f7\";\n@fa-var-bullhorn: \"\\f0a1\";\n@fa-var-bullseye: \"\\f140\";\n@fa-var-bus: \"\\f207\";\n@fa-var-buysellads: \"\\f20d\";\n@fa-var-cab: \"\\f1ba\";\n@fa-var-calculator: \"\\f1ec\";\n@fa-var-calendar: \"\\f073\";\n@fa-var-calendar-o: \"\\f133\";\n@fa-var-camera: \"\\f030\";\n@fa-var-camera-retro: \"\\f083\";\n@fa-var-car: \"\\f1b9\";\n@fa-var-caret-down: \"\\f0d7\";\n@fa-var-caret-left: \"\\f0d9\";\n@fa-var-caret-right: \"\\f0da\";\n@fa-var-caret-square-o-down: \"\\f150\";\n@fa-var-caret-square-o-left: \"\\f191\";\n@fa-var-caret-square-o-right: \"\\f152\";\n@fa-var-caret-square-o-up: \"\\f151\";\n@fa-var-caret-up: \"\\f0d8\";\n@fa-var-cart-arrow-down: \"\\f218\";\n@fa-var-cart-plus: \"\\f217\";\n@fa-var-cc: \"\\f20a\";\n@fa-var-cc-amex: \"\\f1f3\";\n@fa-var-cc-discover: \"\\f1f2\";\n@fa-var-cc-mastercard: \"\\f1f1\";\n@fa-var-cc-paypal: \"\\f1f4\";\n@fa-var-cc-stripe: \"\\f1f5\";\n@fa-var-cc-visa: \"\\f1f0\";\n@fa-var-certificate: \"\\f0a3\";\n@fa-var-chain: \"\\f0c1\";\n@fa-var-chain-broken: \"\\f127\";\n@fa-var-check: \"\\f00c\";\n@fa-var-check-circle: \"\\f058\";\n@fa-var-check-circle-o: \"\\f05d\";\n@fa-var-check-square: \"\\f14a\";\n@fa-var-check-square-o: \"\\f046\";\n@fa-var-chevron-circle-down: \"\\f13a\";\n@fa-var-chevron-circle-left: \"\\f137\";\n@fa-var-chevron-circle-right: \"\\f138\";\n@fa-var-chevron-circle-up: \"\\f139\";\n@fa-var-chevron-down: \"\\f078\";\n@fa-var-chevron-left: \"\\f053\";\n@fa-var-chevron-right: \"\\f054\";\n@fa-var-chevron-up: \"\\f077\";\n@fa-var-child: \"\\f1ae\";\n@fa-var-circle: \"\\f111\";\n@fa-var-circle-o: \"\\f10c\";\n@fa-var-circle-o-notch: \"\\f1ce\";\n@fa-var-circle-thin: \"\\f1db\";\n@fa-var-clipboard: \"\\f0ea\";\n@fa-var-clock-o: \"\\f017\";\n@fa-var-close: \"\\f00d\";\n@fa-var-cloud: \"\\f0c2\";\n@fa-var-cloud-download: \"\\f0ed\";\n@fa-var-cloud-upload: \"\\f0ee\";\n@fa-var-cny: \"\\f157\";\n@fa-var-code: \"\\f121\";\n@fa-var-code-fork: \"\\f126\";\n@fa-var-codepen: \"\\f1cb\";\n@fa-var-coffee: \"\\f0f4\";\n@fa-var-cog: \"\\f013\";\n@fa-var-cogs: \"\\f085\";\n@fa-var-columns: \"\\f0db\";\n@fa-var-comment: \"\\f075\";\n@fa-var-comment-o: \"\\f0e5\";\n@fa-var-comments: \"\\f086\";\n@fa-var-comments-o: \"\\f0e6\";\n@fa-var-compass: \"\\f14e\";\n@fa-var-compress: \"\\f066\";\n@fa-var-connectdevelop: \"\\f20e\";\n@fa-var-copy: \"\\f0c5\";\n@fa-var-copyright: \"\\f1f9\";\n@fa-var-credit-card: \"\\f09d\";\n@fa-var-crop: \"\\f125\";\n@fa-var-crosshairs: \"\\f05b\";\n@fa-var-css3: \"\\f13c\";\n@fa-var-cube: \"\\f1b2\";\n@fa-var-cubes: \"\\f1b3\";\n@fa-var-cut: \"\\f0c4\";\n@fa-var-cutlery: \"\\f0f5\";\n@fa-var-dashboard: \"\\f0e4\";\n@fa-var-dashcube: \"\\f210\";\n@fa-var-database: \"\\f1c0\";\n@fa-var-dedent: \"\\f03b\";\n@fa-var-delicious: \"\\f1a5\";\n@fa-var-desktop: \"\\f108\";\n@fa-var-deviantart: \"\\f1bd\";\n@fa-var-diamond: \"\\f219\";\n@fa-var-digg: \"\\f1a6\";\n@fa-var-dollar: \"\\f155\";\n@fa-var-dot-circle-o: \"\\f192\";\n@fa-var-download: \"\\f019\";\n@fa-var-dribbble: \"\\f17d\";\n@fa-var-dropbox: \"\\f16b\";\n@fa-var-drupal: \"\\f1a9\";\n@fa-var-edit: \"\\f044\";\n@fa-var-eject: \"\\f052\";\n@fa-var-ellipsis-h: \"\\f141\";\n@fa-var-ellipsis-v: \"\\f142\";\n@fa-var-empire: \"\\f1d1\";\n@fa-var-envelope: \"\\f0e0\";\n@fa-var-envelope-o: \"\\f003\";\n@fa-var-envelope-square: \"\\f199\";\n@fa-var-eraser: \"\\f12d\";\n@fa-var-eur: \"\\f153\";\n@fa-var-euro: \"\\f153\";\n@fa-var-exchange: \"\\f0ec\";\n@fa-var-exclamation: \"\\f12a\";\n@fa-var-exclamation-circle: \"\\f06a\";\n@fa-var-exclamation-triangle: \"\\f071\";\n@fa-var-expand: \"\\f065\";\n@fa-var-external-link: \"\\f08e\";\n@fa-var-external-link-square: \"\\f14c\";\n@fa-var-eye: \"\\f06e\";\n@fa-var-eye-slash: \"\\f070\";\n@fa-var-eyedropper: \"\\f1fb\";\n@fa-var-facebook: \"\\f09a\";\n@fa-var-facebook-f: \"\\f09a\";\n@fa-var-facebook-official: \"\\f230\";\n@fa-var-facebook-square: \"\\f082\";\n@fa-var-fast-backward: \"\\f049\";\n@fa-var-fast-forward: \"\\f050\";\n@fa-var-fax: \"\\f1ac\";\n@fa-var-female: \"\\f182\";\n@fa-var-fighter-jet: \"\\f0fb\";\n@fa-var-file: \"\\f15b\";\n@fa-var-file-archive-o: \"\\f1c6\";\n@fa-var-file-audio-o: \"\\f1c7\";\n@fa-var-file-code-o: \"\\f1c9\";\n@fa-var-file-excel-o: \"\\f1c3\";\n@fa-var-file-image-o: \"\\f1c5\";\n@fa-var-file-movie-o: \"\\f1c8\";\n@fa-var-file-o: \"\\f016\";\n@fa-var-file-pdf-o: \"\\f1c1\";\n@fa-var-file-photo-o: \"\\f1c5\";\n@fa-var-file-picture-o: \"\\f1c5\";\n@fa-var-file-powerpoint-o: \"\\f1c4\";\n@fa-var-file-sound-o: \"\\f1c7\";\n@fa-var-file-text: \"\\f15c\";\n@fa-var-file-text-o: \"\\f0f6\";\n@fa-var-file-video-o: \"\\f1c8\";\n@fa-var-file-word-o: \"\\f1c2\";\n@fa-var-file-zip-o: \"\\f1c6\";\n@fa-var-files-o: \"\\f0c5\";\n@fa-var-film: \"\\f008\";\n@fa-var-filter: \"\\f0b0\";\n@fa-var-fire: \"\\f06d\";\n@fa-var-fire-extinguisher: \"\\f134\";\n@fa-var-flag: \"\\f024\";\n@fa-var-flag-checkered: \"\\f11e\";\n@fa-var-flag-o: \"\\f11d\";\n@fa-var-flash: \"\\f0e7\";\n@fa-var-flask: \"\\f0c3\";\n@fa-var-flickr: \"\\f16e\";\n@fa-var-floppy-o: \"\\f0c7\";\n@fa-var-folder: \"\\f07b\";\n@fa-var-folder-o: \"\\f114\";\n@fa-var-folder-open: \"\\f07c\";\n@fa-var-folder-open-o: \"\\f115\";\n@fa-var-font: \"\\f031\";\n@fa-var-forumbee: \"\\f211\";\n@fa-var-forward: \"\\f04e\";\n@fa-var-foursquare: \"\\f180\";\n@fa-var-frown-o: \"\\f119\";\n@fa-var-futbol-o: \"\\f1e3\";\n@fa-var-gamepad: \"\\f11b\";\n@fa-var-gavel: \"\\f0e3\";\n@fa-var-gbp: \"\\f154\";\n@fa-var-ge: \"\\f1d1\";\n@fa-var-gear: \"\\f013\";\n@fa-var-gears: \"\\f085\";\n@fa-var-genderless: \"\\f1db\";\n@fa-var-gift: \"\\f06b\";\n@fa-var-git: \"\\f1d3\";\n@fa-var-git-square: \"\\f1d2\";\n@fa-var-github: \"\\f09b\";\n@fa-var-github-alt: \"\\f113\";\n@fa-var-github-square: \"\\f092\";\n@fa-var-gittip: \"\\f184\";\n@fa-var-glass: \"\\f000\";\n@fa-var-globe: \"\\f0ac\";\n@fa-var-google: \"\\f1a0\";\n@fa-var-google-plus: \"\\f0d5\";\n@fa-var-google-plus-square: \"\\f0d4\";\n@fa-var-google-wallet: \"\\f1ee\";\n@fa-var-graduation-cap: \"\\f19d\";\n@fa-var-gratipay: \"\\f184\";\n@fa-var-group: \"\\f0c0\";\n@fa-var-h-square: \"\\f0fd\";\n@fa-var-hacker-news: \"\\f1d4\";\n@fa-var-hand-o-down: \"\\f0a7\";\n@fa-var-hand-o-left: \"\\f0a5\";\n@fa-var-hand-o-right: \"\\f0a4\";\n@fa-var-hand-o-up: \"\\f0a6\";\n@fa-var-hdd-o: \"\\f0a0\";\n@fa-var-header: \"\\f1dc\";\n@fa-var-headphones: \"\\f025\";\n@fa-var-heart: \"\\f004\";\n@fa-var-heart-o: \"\\f08a\";\n@fa-var-heartbeat: \"\\f21e\";\n@fa-var-history: \"\\f1da\";\n@fa-var-home: \"\\f015\";\n@fa-var-hospital-o: \"\\f0f8\";\n@fa-var-hotel: \"\\f236\";\n@fa-var-html5: \"\\f13b\";\n@fa-var-ils: \"\\f20b\";\n@fa-var-image: \"\\f03e\";\n@fa-var-inbox: \"\\f01c\";\n@fa-var-indent: \"\\f03c\";\n@fa-var-info: \"\\f129\";\n@fa-var-info-circle: \"\\f05a\";\n@fa-var-inr: \"\\f156\";\n@fa-var-instagram: \"\\f16d\";\n@fa-var-institution: \"\\f19c\";\n@fa-var-ioxhost: \"\\f208\";\n@fa-var-italic: \"\\f033\";\n@fa-var-joomla: \"\\f1aa\";\n@fa-var-jpy: \"\\f157\";\n@fa-var-jsfiddle: \"\\f1cc\";\n@fa-var-key: \"\\f084\";\n@fa-var-keyboard-o: \"\\f11c\";\n@fa-var-krw: \"\\f159\";\n@fa-var-language: \"\\f1ab\";\n@fa-var-laptop: \"\\f109\";\n@fa-var-lastfm: \"\\f202\";\n@fa-var-lastfm-square: \"\\f203\";\n@fa-var-leaf: \"\\f06c\";\n@fa-var-leanpub: \"\\f212\";\n@fa-var-legal: \"\\f0e3\";\n@fa-var-lemon-o: \"\\f094\";\n@fa-var-level-down: \"\\f149\";\n@fa-var-level-up: \"\\f148\";\n@fa-var-life-bouy: \"\\f1cd\";\n@fa-var-life-buoy: \"\\f1cd\";\n@fa-var-life-ring: \"\\f1cd\";\n@fa-var-life-saver: \"\\f1cd\";\n@fa-var-lightbulb-o: \"\\f0eb\";\n@fa-var-line-chart: \"\\f201\";\n@fa-var-link: \"\\f0c1\";\n@fa-var-linkedin: \"\\f0e1\";\n@fa-var-linkedin-square: \"\\f08c\";\n@fa-var-linux: \"\\f17c\";\n@fa-var-list: \"\\f03a\";\n@fa-var-list-alt: \"\\f022\";\n@fa-var-list-ol: \"\\f0cb\";\n@fa-var-list-ul: \"\\f0ca\";\n@fa-var-location-arrow: \"\\f124\";\n@fa-var-lock: \"\\f023\";\n@fa-var-long-arrow-down: \"\\f175\";\n@fa-var-long-arrow-left: \"\\f177\";\n@fa-var-long-arrow-right: \"\\f178\";\n@fa-var-long-arrow-up: \"\\f176\";\n@fa-var-magic: \"\\f0d0\";\n@fa-var-magnet: \"\\f076\";\n@fa-var-mail-forward: \"\\f064\";\n@fa-var-mail-reply: \"\\f112\";\n@fa-var-mail-reply-all: \"\\f122\";\n@fa-var-male: \"\\f183\";\n@fa-var-map-marker: \"\\f041\";\n@fa-var-mars: \"\\f222\";\n@fa-var-mars-double: \"\\f227\";\n@fa-var-mars-stroke: \"\\f229\";\n@fa-var-mars-stroke-h: \"\\f22b\";\n@fa-var-mars-stroke-v: \"\\f22a\";\n@fa-var-maxcdn: \"\\f136\";\n@fa-var-meanpath: \"\\f20c\";\n@fa-var-medium: \"\\f23a\";\n@fa-var-medkit: \"\\f0fa\";\n@fa-var-meh-o: \"\\f11a\";\n@fa-var-mercury: \"\\f223\";\n@fa-var-microphone: \"\\f130\";\n@fa-var-microphone-slash: \"\\f131\";\n@fa-var-minus: \"\\f068\";\n@fa-var-minus-circle: \"\\f056\";\n@fa-var-minus-square: \"\\f146\";\n@fa-var-minus-square-o: \"\\f147\";\n@fa-var-mobile: \"\\f10b\";\n@fa-var-mobile-phone: \"\\f10b\";\n@fa-var-money: \"\\f0d6\";\n@fa-var-moon-o: \"\\f186\";\n@fa-var-mortar-board: \"\\f19d\";\n@fa-var-motorcycle: \"\\f21c\";\n@fa-var-music: \"\\f001\";\n@fa-var-navicon: \"\\f0c9\";\n@fa-var-neuter: \"\\f22c\";\n@fa-var-newspaper-o: \"\\f1ea\";\n@fa-var-openid: \"\\f19b\";\n@fa-var-outdent: \"\\f03b\";\n@fa-var-pagelines: \"\\f18c\";\n@fa-var-paint-brush: \"\\f1fc\";\n@fa-var-paper-plane: \"\\f1d8\";\n@fa-var-paper-plane-o: \"\\f1d9\";\n@fa-var-paperclip: \"\\f0c6\";\n@fa-var-paragraph: \"\\f1dd\";\n@fa-var-paste: \"\\f0ea\";\n@fa-var-pause: \"\\f04c\";\n@fa-var-paw: \"\\f1b0\";\n@fa-var-paypal: \"\\f1ed\";\n@fa-var-pencil: \"\\f040\";\n@fa-var-pencil-square: \"\\f14b\";\n@fa-var-pencil-square-o: \"\\f044\";\n@fa-var-phone: \"\\f095\";\n@fa-var-phone-square: \"\\f098\";\n@fa-var-photo: \"\\f03e\";\n@fa-var-picture-o: \"\\f03e\";\n@fa-var-pie-chart: \"\\f200\";\n@fa-var-pied-piper: \"\\f1a7\";\n@fa-var-pied-piper-alt: \"\\f1a8\";\n@fa-var-pinterest: \"\\f0d2\";\n@fa-var-pinterest-p: \"\\f231\";\n@fa-var-pinterest-square: \"\\f0d3\";\n@fa-var-plane: \"\\f072\";\n@fa-var-play: \"\\f04b\";\n@fa-var-play-circle: \"\\f144\";\n@fa-var-play-circle-o: \"\\f01d\";\n@fa-var-plug: \"\\f1e6\";\n@fa-var-plus: \"\\f067\";\n@fa-var-plus-circle: \"\\f055\";\n@fa-var-plus-square: \"\\f0fe\";\n@fa-var-plus-square-o: \"\\f196\";\n@fa-var-power-off: \"\\f011\";\n@fa-var-print: \"\\f02f\";\n@fa-var-puzzle-piece: \"\\f12e\";\n@fa-var-qq: \"\\f1d6\";\n@fa-var-qrcode: \"\\f029\";\n@fa-var-question: \"\\f128\";\n@fa-var-question-circle: \"\\f059\";\n@fa-var-quote-left: \"\\f10d\";\n@fa-var-quote-right: \"\\f10e\";\n@fa-var-ra: \"\\f1d0\";\n@fa-var-random: \"\\f074\";\n@fa-var-rebel: \"\\f1d0\";\n@fa-var-recycle: \"\\f1b8\";\n@fa-var-reddit: \"\\f1a1\";\n@fa-var-reddit-square: \"\\f1a2\";\n@fa-var-refresh: \"\\f021\";\n@fa-var-remove: \"\\f00d\";\n@fa-var-renren: \"\\f18b\";\n@fa-var-reorder: \"\\f0c9\";\n@fa-var-repeat: \"\\f01e\";\n@fa-var-reply: \"\\f112\";\n@fa-var-reply-all: \"\\f122\";\n@fa-var-retweet: \"\\f079\";\n@fa-var-rmb: \"\\f157\";\n@fa-var-road: \"\\f018\";\n@fa-var-rocket: \"\\f135\";\n@fa-var-rotate-left: \"\\f0e2\";\n@fa-var-rotate-right: \"\\f01e\";\n@fa-var-rouble: \"\\f158\";\n@fa-var-rss: \"\\f09e\";\n@fa-var-rss-square: \"\\f143\";\n@fa-var-rub: \"\\f158\";\n@fa-var-ruble: \"\\f158\";\n@fa-var-rupee: \"\\f156\";\n@fa-var-save: \"\\f0c7\";\n@fa-var-scissors: \"\\f0c4\";\n@fa-var-search: \"\\f002\";\n@fa-var-search-minus: \"\\f010\";\n@fa-var-search-plus: \"\\f00e\";\n@fa-var-sellsy: \"\\f213\";\n@fa-var-send: \"\\f1d8\";\n@fa-var-send-o: \"\\f1d9\";\n@fa-var-server: \"\\f233\";\n@fa-var-share: \"\\f064\";\n@fa-var-share-alt: \"\\f1e0\";\n@fa-var-share-alt-square: \"\\f1e1\";\n@fa-var-share-square: \"\\f14d\";\n@fa-var-share-square-o: \"\\f045\";\n@fa-var-shekel: \"\\f20b\";\n@fa-var-sheqel: \"\\f20b\";\n@fa-var-shield: \"\\f132\";\n@fa-var-ship: \"\\f21a\";\n@fa-var-shirtsinbulk: \"\\f214\";\n@fa-var-shopping-cart: \"\\f07a\";\n@fa-var-sign-in: \"\\f090\";\n@fa-var-sign-out: \"\\f08b\";\n@fa-var-signal: \"\\f012\";\n@fa-var-simplybuilt: \"\\f215\";\n@fa-var-sitemap: \"\\f0e8\";\n@fa-var-skyatlas: \"\\f216\";\n@fa-var-skype: \"\\f17e\";\n@fa-var-slack: \"\\f198\";\n@fa-var-sliders: \"\\f1de\";\n@fa-var-slideshare: \"\\f1e7\";\n@fa-var-smile-o: \"\\f118\";\n@fa-var-soccer-ball-o: \"\\f1e3\";\n@fa-var-sort: \"\\f0dc\";\n@fa-var-sort-alpha-asc: \"\\f15d\";\n@fa-var-sort-alpha-desc: \"\\f15e\";\n@fa-var-sort-amount-asc: \"\\f160\";\n@fa-var-sort-amount-desc: \"\\f161\";\n@fa-var-sort-asc: \"\\f0de\";\n@fa-var-sort-desc: \"\\f0dd\";\n@fa-var-sort-down: \"\\f0dd\";\n@fa-var-sort-numeric-asc: \"\\f162\";\n@fa-var-sort-numeric-desc: \"\\f163\";\n@fa-var-sort-up: \"\\f0de\";\n@fa-var-soundcloud: \"\\f1be\";\n@fa-var-space-shuttle: \"\\f197\";\n@fa-var-spinner: \"\\f110\";\n@fa-var-spoon: \"\\f1b1\";\n@fa-var-spotify: \"\\f1bc\";\n@fa-var-square: \"\\f0c8\";\n@fa-var-square-o: \"\\f096\";\n@fa-var-stack-exchange: \"\\f18d\";\n@fa-var-stack-overflow: \"\\f16c\";\n@fa-var-star: \"\\f005\";\n@fa-var-star-half: \"\\f089\";\n@fa-var-star-half-empty: \"\\f123\";\n@fa-var-star-half-full: \"\\f123\";\n@fa-var-star-half-o: \"\\f123\";\n@fa-var-star-o: \"\\f006\";\n@fa-var-steam: \"\\f1b6\";\n@fa-var-steam-square: \"\\f1b7\";\n@fa-var-step-backward: \"\\f048\";\n@fa-var-step-forward: \"\\f051\";\n@fa-var-stethoscope: \"\\f0f1\";\n@fa-var-stop: \"\\f04d\";\n@fa-var-street-view: \"\\f21d\";\n@fa-var-strikethrough: \"\\f0cc\";\n@fa-var-stumbleupon: \"\\f1a4\";\n@fa-var-stumbleupon-circle: \"\\f1a3\";\n@fa-var-subscript: \"\\f12c\";\n@fa-var-subway: \"\\f239\";\n@fa-var-suitcase: \"\\f0f2\";\n@fa-var-sun-o: \"\\f185\";\n@fa-var-superscript: \"\\f12b\";\n@fa-var-support: \"\\f1cd\";\n@fa-var-table: \"\\f0ce\";\n@fa-var-tablet: \"\\f10a\";\n@fa-var-tachometer: \"\\f0e4\";\n@fa-var-tag: \"\\f02b\";\n@fa-var-tags: \"\\f02c\";\n@fa-var-tasks: \"\\f0ae\";\n@fa-var-taxi: \"\\f1ba\";\n@fa-var-tencent-weibo: \"\\f1d5\";\n@fa-var-terminal: \"\\f120\";\n@fa-var-text-height: \"\\f034\";\n@fa-var-text-width: \"\\f035\";\n@fa-var-th: \"\\f00a\";\n@fa-var-th-large: \"\\f009\";\n@fa-var-th-list: \"\\f00b\";\n@fa-var-thumb-tack: \"\\f08d\";\n@fa-var-thumbs-down: \"\\f165\";\n@fa-var-thumbs-o-down: \"\\f088\";\n@fa-var-thumbs-o-up: \"\\f087\";\n@fa-var-thumbs-up: \"\\f164\";\n@fa-var-ticket: \"\\f145\";\n@fa-var-times: \"\\f00d\";\n@fa-var-times-circle: \"\\f057\";\n@fa-var-times-circle-o: \"\\f05c\";\n@fa-var-tint: \"\\f043\";\n@fa-var-toggle-down: \"\\f150\";\n@fa-var-toggle-left: \"\\f191\";\n@fa-var-toggle-off: \"\\f204\";\n@fa-var-toggle-on: \"\\f205\";\n@fa-var-toggle-right: \"\\f152\";\n@fa-var-toggle-up: \"\\f151\";\n@fa-var-train: \"\\f238\";\n@fa-var-transgender: \"\\f224\";\n@fa-var-transgender-alt: \"\\f225\";\n@fa-var-trash: \"\\f1f8\";\n@fa-var-trash-o: \"\\f014\";\n@fa-var-tree: \"\\f1bb\";\n@fa-var-trello: \"\\f181\";\n@fa-var-trophy: \"\\f091\";\n@fa-var-truck: \"\\f0d1\";\n@fa-var-try: \"\\f195\";\n@fa-var-tty: \"\\f1e4\";\n@fa-var-tumblr: \"\\f173\";\n@fa-var-tumblr-square: \"\\f174\";\n@fa-var-turkish-lira: \"\\f195\";\n@fa-var-twitch: \"\\f1e8\";\n@fa-var-twitter: \"\\f099\";\n@fa-var-twitter-square: \"\\f081\";\n@fa-var-umbrella: \"\\f0e9\";\n@fa-var-underline: \"\\f0cd\";\n@fa-var-undo: \"\\f0e2\";\n@fa-var-university: \"\\f19c\";\n@fa-var-unlink: \"\\f127\";\n@fa-var-unlock: \"\\f09c\";\n@fa-var-unlock-alt: \"\\f13e\";\n@fa-var-unsorted: \"\\f0dc\";\n@fa-var-upload: \"\\f093\";\n@fa-var-usd: \"\\f155\";\n@fa-var-user: \"\\f007\";\n@fa-var-user-md: \"\\f0f0\";\n@fa-var-user-plus: \"\\f234\";\n@fa-var-user-secret: \"\\f21b\";\n@fa-var-user-times: \"\\f235\";\n@fa-var-users: \"\\f0c0\";\n@fa-var-venus: \"\\f221\";\n@fa-var-venus-double: \"\\f226\";\n@fa-var-venus-mars: \"\\f228\";\n@fa-var-viacoin: \"\\f237\";\n@fa-var-video-camera: \"\\f03d\";\n@fa-var-vimeo-square: \"\\f194\";\n@fa-var-vine: \"\\f1ca\";\n@fa-var-vk: \"\\f189\";\n@fa-var-volume-down: \"\\f027\";\n@fa-var-volume-off: \"\\f026\";\n@fa-var-volume-up: \"\\f028\";\n@fa-var-warning: \"\\f071\";\n@fa-var-wechat: \"\\f1d7\";\n@fa-var-weibo: \"\\f18a\";\n@fa-var-weixin: \"\\f1d7\";\n@fa-var-whatsapp: \"\\f232\";\n@fa-var-wheelchair: \"\\f193\";\n@fa-var-wifi: \"\\f1eb\";\n@fa-var-windows: \"\\f17a\";\n@fa-var-won: \"\\f159\";\n@fa-var-wordpress: \"\\f19a\";\n@fa-var-wrench: \"\\f0ad\";\n@fa-var-xing: \"\\f168\";\n@fa-var-xing-square: \"\\f169\";\n@fa-var-yahoo: \"\\f19e\";\n@fa-var-yelp: \"\\f1e9\";\n@fa-var-yen: \"\\f157\";\n@fa-var-youtube: \"\\f167\";\n@fa-var-youtube-play: \"\\f16a\";\n@fa-var-youtube-square: \"\\f166\";\n"
  },
  {
    "path": "website/font-awesome/scss/_animated.scss",
    "content": "// Spinning Icons\n// --------------------------\n\n.#{$fa-css-prefix}-spin {\n  -webkit-animation: fa-spin 2s infinite linear;\n          animation: fa-spin 2s infinite linear;\n}\n\n.#{$fa-css-prefix}-pulse {\n  -webkit-animation: fa-spin 1s infinite steps(8);\n          animation: fa-spin 1s infinite steps(8);\n}\n\n@-webkit-keyframes fa-spin {\n  0% {\n    -webkit-transform: rotate(0deg);\n            transform: rotate(0deg);\n  }\n  100% {\n    -webkit-transform: rotate(359deg);\n            transform: rotate(359deg);\n  }\n}\n\n@keyframes fa-spin {\n  0% {\n    -webkit-transform: rotate(0deg);\n            transform: rotate(0deg);\n  }\n  100% {\n    -webkit-transform: rotate(359deg);\n            transform: rotate(359deg);\n  }\n}\n"
  },
  {
    "path": "website/font-awesome/scss/_bordered-pulled.scss",
    "content": "// Bordered & Pulled\n// -------------------------\n\n.#{$fa-css-prefix}-border {\n  padding: .2em .25em .15em;\n  border: solid .08em $fa-border-color;\n  border-radius: .1em;\n}\n\n.pull-right { float: right; }\n.pull-left { float: left; }\n\n.#{$fa-css-prefix} {\n  &.pull-left { margin-right: .3em; }\n  &.pull-right { margin-left: .3em; }\n}\n"
  },
  {
    "path": "website/font-awesome/scss/_core.scss",
    "content": "// Base Class Definition\n// -------------------------\n\n.#{$fa-css-prefix} {\n  display: inline-block;\n  font: normal normal normal #{$fa-font-size-base}/1 FontAwesome; // shortening font declaration\n  font-size: inherit; // can't have font-size inherit on line above, so need to override\n  text-rendering: auto; // optimizelegibility throws things off #1094\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n  transform: translate(0, 0); // ensures no half-pixel rendering in firefox\n\n}\n"
  },
  {
    "path": "website/font-awesome/scss/_fixed-width.scss",
    "content": "// Fixed Width Icons\n// -------------------------\n.#{$fa-css-prefix}-fw {\n  width: (18em / 14);\n  text-align: center;\n}\n"
  },
  {
    "path": "website/font-awesome/scss/_icons.scss",
    "content": "/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen\n   readers do not read off random characters that represent icons */\n\n.#{$fa-css-prefix}-glass:before { content: $fa-var-glass; }\n.#{$fa-css-prefix}-music:before { content: $fa-var-music; }\n.#{$fa-css-prefix}-search:before { content: $fa-var-search; }\n.#{$fa-css-prefix}-envelope-o:before { content: $fa-var-envelope-o; }\n.#{$fa-css-prefix}-heart:before { content: $fa-var-heart; }\n.#{$fa-css-prefix}-star:before { content: $fa-var-star; }\n.#{$fa-css-prefix}-star-o:before { content: $fa-var-star-o; }\n.#{$fa-css-prefix}-user:before { content: $fa-var-user; }\n.#{$fa-css-prefix}-film:before { content: $fa-var-film; }\n.#{$fa-css-prefix}-th-large:before { content: $fa-var-th-large; }\n.#{$fa-css-prefix}-th:before { content: $fa-var-th; }\n.#{$fa-css-prefix}-th-list:before { content: $fa-var-th-list; }\n.#{$fa-css-prefix}-check:before { content: $fa-var-check; }\n.#{$fa-css-prefix}-remove:before,\n.#{$fa-css-prefix}-close:before,\n.#{$fa-css-prefix}-times:before { content: $fa-var-times; }\n.#{$fa-css-prefix}-search-plus:before { content: $fa-var-search-plus; }\n.#{$fa-css-prefix}-search-minus:before { content: $fa-var-search-minus; }\n.#{$fa-css-prefix}-power-off:before { content: $fa-var-power-off; }\n.#{$fa-css-prefix}-signal:before { content: $fa-var-signal; }\n.#{$fa-css-prefix}-gear:before,\n.#{$fa-css-prefix}-cog:before { content: $fa-var-cog; }\n.#{$fa-css-prefix}-trash-o:before { content: $fa-var-trash-o; }\n.#{$fa-css-prefix}-home:before { content: $fa-var-home; }\n.#{$fa-css-prefix}-file-o:before { content: $fa-var-file-o; }\n.#{$fa-css-prefix}-clock-o:before { content: $fa-var-clock-o; }\n.#{$fa-css-prefix}-road:before { content: $fa-var-road; }\n.#{$fa-css-prefix}-download:before { content: $fa-var-download; }\n.#{$fa-css-prefix}-arrow-circle-o-down:before { content: $fa-var-arrow-circle-o-down; }\n.#{$fa-css-prefix}-arrow-circle-o-up:before { content: $fa-var-arrow-circle-o-up; }\n.#{$fa-css-prefix}-inbox:before { content: $fa-var-inbox; }\n.#{$fa-css-prefix}-play-circle-o:before { content: $fa-var-play-circle-o; }\n.#{$fa-css-prefix}-rotate-right:before,\n.#{$fa-css-prefix}-repeat:before { content: $fa-var-repeat; }\n.#{$fa-css-prefix}-refresh:before { content: $fa-var-refresh; }\n.#{$fa-css-prefix}-list-alt:before { content: $fa-var-list-alt; }\n.#{$fa-css-prefix}-lock:before { content: $fa-var-lock; }\n.#{$fa-css-prefix}-flag:before { content: $fa-var-flag; }\n.#{$fa-css-prefix}-headphones:before { content: $fa-var-headphones; }\n.#{$fa-css-prefix}-volume-off:before { content: $fa-var-volume-off; }\n.#{$fa-css-prefix}-volume-down:before { content: $fa-var-volume-down; }\n.#{$fa-css-prefix}-volume-up:before { content: $fa-var-volume-up; }\n.#{$fa-css-prefix}-qrcode:before { content: $fa-var-qrcode; }\n.#{$fa-css-prefix}-barcode:before { content: $fa-var-barcode; }\n.#{$fa-css-prefix}-tag:before { content: $fa-var-tag; }\n.#{$fa-css-prefix}-tags:before { content: $fa-var-tags; }\n.#{$fa-css-prefix}-book:before { content: $fa-var-book; }\n.#{$fa-css-prefix}-bookmark:before { content: $fa-var-bookmark; }\n.#{$fa-css-prefix}-print:before { content: $fa-var-print; }\n.#{$fa-css-prefix}-camera:before { content: $fa-var-camera; }\n.#{$fa-css-prefix}-font:before { content: $fa-var-font; }\n.#{$fa-css-prefix}-bold:before { content: $fa-var-bold; }\n.#{$fa-css-prefix}-italic:before { content: $fa-var-italic; }\n.#{$fa-css-prefix}-text-height:before { content: $fa-var-text-height; }\n.#{$fa-css-prefix}-text-width:before { content: $fa-var-text-width; }\n.#{$fa-css-prefix}-align-left:before { content: $fa-var-align-left; }\n.#{$fa-css-prefix}-align-center:before { content: $fa-var-align-center; }\n.#{$fa-css-prefix}-align-right:before { content: $fa-var-align-right; }\n.#{$fa-css-prefix}-align-justify:before { content: $fa-var-align-justify; }\n.#{$fa-css-prefix}-list:before { content: $fa-var-list; }\n.#{$fa-css-prefix}-dedent:before,\n.#{$fa-css-prefix}-outdent:before { content: $fa-var-outdent; }\n.#{$fa-css-prefix}-indent:before { content: $fa-var-indent; }\n.#{$fa-css-prefix}-video-camera:before { content: $fa-var-video-camera; }\n.#{$fa-css-prefix}-photo:before,\n.#{$fa-css-prefix}-image:before,\n.#{$fa-css-prefix}-picture-o:before { content: $fa-var-picture-o; }\n.#{$fa-css-prefix}-pencil:before { content: $fa-var-pencil; }\n.#{$fa-css-prefix}-map-marker:before { content: $fa-var-map-marker; }\n.#{$fa-css-prefix}-adjust:before { content: $fa-var-adjust; }\n.#{$fa-css-prefix}-tint:before { content: $fa-var-tint; }\n.#{$fa-css-prefix}-edit:before,\n.#{$fa-css-prefix}-pencil-square-o:before { content: $fa-var-pencil-square-o; }\n.#{$fa-css-prefix}-share-square-o:before { content: $fa-var-share-square-o; }\n.#{$fa-css-prefix}-check-square-o:before { content: $fa-var-check-square-o; }\n.#{$fa-css-prefix}-arrows:before { content: $fa-var-arrows; }\n.#{$fa-css-prefix}-step-backward:before { content: $fa-var-step-backward; }\n.#{$fa-css-prefix}-fast-backward:before { content: $fa-var-fast-backward; }\n.#{$fa-css-prefix}-backward:before { content: $fa-var-backward; }\n.#{$fa-css-prefix}-play:before { content: $fa-var-play; }\n.#{$fa-css-prefix}-pause:before { content: $fa-var-pause; }\n.#{$fa-css-prefix}-stop:before { content: $fa-var-stop; }\n.#{$fa-css-prefix}-forward:before { content: $fa-var-forward; }\n.#{$fa-css-prefix}-fast-forward:before { content: $fa-var-fast-forward; }\n.#{$fa-css-prefix}-step-forward:before { content: $fa-var-step-forward; }\n.#{$fa-css-prefix}-eject:before { content: $fa-var-eject; }\n.#{$fa-css-prefix}-chevron-left:before { content: $fa-var-chevron-left; }\n.#{$fa-css-prefix}-chevron-right:before { content: $fa-var-chevron-right; }\n.#{$fa-css-prefix}-plus-circle:before { content: $fa-var-plus-circle; }\n.#{$fa-css-prefix}-minus-circle:before { content: $fa-var-minus-circle; }\n.#{$fa-css-prefix}-times-circle:before { content: $fa-var-times-circle; }\n.#{$fa-css-prefix}-check-circle:before { content: $fa-var-check-circle; }\n.#{$fa-css-prefix}-question-circle:before { content: $fa-var-question-circle; }\n.#{$fa-css-prefix}-info-circle:before { content: $fa-var-info-circle; }\n.#{$fa-css-prefix}-crosshairs:before { content: $fa-var-crosshairs; }\n.#{$fa-css-prefix}-times-circle-o:before { content: $fa-var-times-circle-o; }\n.#{$fa-css-prefix}-check-circle-o:before { content: $fa-var-check-circle-o; }\n.#{$fa-css-prefix}-ban:before { content: $fa-var-ban; }\n.#{$fa-css-prefix}-arrow-left:before { content: $fa-var-arrow-left; }\n.#{$fa-css-prefix}-arrow-right:before { content: $fa-var-arrow-right; }\n.#{$fa-css-prefix}-arrow-up:before { content: $fa-var-arrow-up; }\n.#{$fa-css-prefix}-arrow-down:before { content: $fa-var-arrow-down; }\n.#{$fa-css-prefix}-mail-forward:before,\n.#{$fa-css-prefix}-share:before { content: $fa-var-share; }\n.#{$fa-css-prefix}-expand:before { content: $fa-var-expand; }\n.#{$fa-css-prefix}-compress:before { content: $fa-var-compress; }\n.#{$fa-css-prefix}-plus:before { content: $fa-var-plus; }\n.#{$fa-css-prefix}-minus:before { content: $fa-var-minus; }\n.#{$fa-css-prefix}-asterisk:before { content: $fa-var-asterisk; }\n.#{$fa-css-prefix}-exclamation-circle:before { content: $fa-var-exclamation-circle; }\n.#{$fa-css-prefix}-gift:before { content: $fa-var-gift; }\n.#{$fa-css-prefix}-leaf:before { content: $fa-var-leaf; }\n.#{$fa-css-prefix}-fire:before { content: $fa-var-fire; }\n.#{$fa-css-prefix}-eye:before { content: $fa-var-eye; }\n.#{$fa-css-prefix}-eye-slash:before { content: $fa-var-eye-slash; }\n.#{$fa-css-prefix}-warning:before,\n.#{$fa-css-prefix}-exclamation-triangle:before { content: $fa-var-exclamation-triangle; }\n.#{$fa-css-prefix}-plane:before { content: $fa-var-plane; }\n.#{$fa-css-prefix}-calendar:before { content: $fa-var-calendar; }\n.#{$fa-css-prefix}-random:before { content: $fa-var-random; }\n.#{$fa-css-prefix}-comment:before { content: $fa-var-comment; }\n.#{$fa-css-prefix}-magnet:before { content: $fa-var-magnet; }\n.#{$fa-css-prefix}-chevron-up:before { content: $fa-var-chevron-up; }\n.#{$fa-css-prefix}-chevron-down:before { content: $fa-var-chevron-down; }\n.#{$fa-css-prefix}-retweet:before { content: $fa-var-retweet; }\n.#{$fa-css-prefix}-shopping-cart:before { content: $fa-var-shopping-cart; }\n.#{$fa-css-prefix}-folder:before { content: $fa-var-folder; }\n.#{$fa-css-prefix}-folder-open:before { content: $fa-var-folder-open; }\n.#{$fa-css-prefix}-arrows-v:before { content: $fa-var-arrows-v; }\n.#{$fa-css-prefix}-arrows-h:before { content: $fa-var-arrows-h; }\n.#{$fa-css-prefix}-bar-chart-o:before,\n.#{$fa-css-prefix}-bar-chart:before { content: $fa-var-bar-chart; }\n.#{$fa-css-prefix}-twitter-square:before { content: $fa-var-twitter-square; }\n.#{$fa-css-prefix}-facebook-square:before { content: $fa-var-facebook-square; }\n.#{$fa-css-prefix}-camera-retro:before { content: $fa-var-camera-retro; }\n.#{$fa-css-prefix}-key:before { content: $fa-var-key; }\n.#{$fa-css-prefix}-gears:before,\n.#{$fa-css-prefix}-cogs:before { content: $fa-var-cogs; }\n.#{$fa-css-prefix}-comments:before { content: $fa-var-comments; }\n.#{$fa-css-prefix}-thumbs-o-up:before { content: $fa-var-thumbs-o-up; }\n.#{$fa-css-prefix}-thumbs-o-down:before { content: $fa-var-thumbs-o-down; }\n.#{$fa-css-prefix}-star-half:before { content: $fa-var-star-half; }\n.#{$fa-css-prefix}-heart-o:before { content: $fa-var-heart-o; }\n.#{$fa-css-prefix}-sign-out:before { content: $fa-var-sign-out; }\n.#{$fa-css-prefix}-linkedin-square:before { content: $fa-var-linkedin-square; }\n.#{$fa-css-prefix}-thumb-tack:before { content: $fa-var-thumb-tack; }\n.#{$fa-css-prefix}-external-link:before { content: $fa-var-external-link; }\n.#{$fa-css-prefix}-sign-in:before { content: $fa-var-sign-in; }\n.#{$fa-css-prefix}-trophy:before { content: $fa-var-trophy; }\n.#{$fa-css-prefix}-github-square:before { content: $fa-var-github-square; }\n.#{$fa-css-prefix}-upload:before { content: $fa-var-upload; }\n.#{$fa-css-prefix}-lemon-o:before { content: $fa-var-lemon-o; }\n.#{$fa-css-prefix}-phone:before { content: $fa-var-phone; }\n.#{$fa-css-prefix}-square-o:before { content: $fa-var-square-o; }\n.#{$fa-css-prefix}-bookmark-o:before { content: $fa-var-bookmark-o; }\n.#{$fa-css-prefix}-phone-square:before { content: $fa-var-phone-square; }\n.#{$fa-css-prefix}-twitter:before { content: $fa-var-twitter; }\n.#{$fa-css-prefix}-facebook-f:before,\n.#{$fa-css-prefix}-facebook:before { content: $fa-var-facebook; }\n.#{$fa-css-prefix}-github:before { content: $fa-var-github; }\n.#{$fa-css-prefix}-unlock:before { content: $fa-var-unlock; }\n.#{$fa-css-prefix}-credit-card:before { content: $fa-var-credit-card; }\n.#{$fa-css-prefix}-rss:before { content: $fa-var-rss; }\n.#{$fa-css-prefix}-hdd-o:before { content: $fa-var-hdd-o; }\n.#{$fa-css-prefix}-bullhorn:before { content: $fa-var-bullhorn; }\n.#{$fa-css-prefix}-bell:before { content: $fa-var-bell; }\n.#{$fa-css-prefix}-certificate:before { content: $fa-var-certificate; }\n.#{$fa-css-prefix}-hand-o-right:before { content: $fa-var-hand-o-right; }\n.#{$fa-css-prefix}-hand-o-left:before { content: $fa-var-hand-o-left; }\n.#{$fa-css-prefix}-hand-o-up:before { content: $fa-var-hand-o-up; }\n.#{$fa-css-prefix}-hand-o-down:before { content: $fa-var-hand-o-down; }\n.#{$fa-css-prefix}-arrow-circle-left:before { content: $fa-var-arrow-circle-left; }\n.#{$fa-css-prefix}-arrow-circle-right:before { content: $fa-var-arrow-circle-right; }\n.#{$fa-css-prefix}-arrow-circle-up:before { content: $fa-var-arrow-circle-up; }\n.#{$fa-css-prefix}-arrow-circle-down:before { content: $fa-var-arrow-circle-down; }\n.#{$fa-css-prefix}-globe:before { content: $fa-var-globe; }\n.#{$fa-css-prefix}-wrench:before { content: $fa-var-wrench; }\n.#{$fa-css-prefix}-tasks:before { content: $fa-var-tasks; }\n.#{$fa-css-prefix}-filter:before { content: $fa-var-filter; }\n.#{$fa-css-prefix}-briefcase:before { content: $fa-var-briefcase; }\n.#{$fa-css-prefix}-arrows-alt:before { content: $fa-var-arrows-alt; }\n.#{$fa-css-prefix}-group:before,\n.#{$fa-css-prefix}-users:before { content: $fa-var-users; }\n.#{$fa-css-prefix}-chain:before,\n.#{$fa-css-prefix}-link:before { content: $fa-var-link; }\n.#{$fa-css-prefix}-cloud:before { content: $fa-var-cloud; }\n.#{$fa-css-prefix}-flask:before { content: $fa-var-flask; }\n.#{$fa-css-prefix}-cut:before,\n.#{$fa-css-prefix}-scissors:before { content: $fa-var-scissors; }\n.#{$fa-css-prefix}-copy:before,\n.#{$fa-css-prefix}-files-o:before { content: $fa-var-files-o; }\n.#{$fa-css-prefix}-paperclip:before { content: $fa-var-paperclip; }\n.#{$fa-css-prefix}-save:before,\n.#{$fa-css-prefix}-floppy-o:before { content: $fa-var-floppy-o; }\n.#{$fa-css-prefix}-square:before { content: $fa-var-square; }\n.#{$fa-css-prefix}-navicon:before,\n.#{$fa-css-prefix}-reorder:before,\n.#{$fa-css-prefix}-bars:before { content: $fa-var-bars; }\n.#{$fa-css-prefix}-list-ul:before { content: $fa-var-list-ul; }\n.#{$fa-css-prefix}-list-ol:before { content: $fa-var-list-ol; }\n.#{$fa-css-prefix}-strikethrough:before { content: $fa-var-strikethrough; }\n.#{$fa-css-prefix}-underline:before { content: $fa-var-underline; }\n.#{$fa-css-prefix}-table:before { content: $fa-var-table; }\n.#{$fa-css-prefix}-magic:before { content: $fa-var-magic; }\n.#{$fa-css-prefix}-truck:before { content: $fa-var-truck; }\n.#{$fa-css-prefix}-pinterest:before { content: $fa-var-pinterest; }\n.#{$fa-css-prefix}-pinterest-square:before { content: $fa-var-pinterest-square; }\n.#{$fa-css-prefix}-google-plus-square:before { content: $fa-var-google-plus-square; }\n.#{$fa-css-prefix}-google-plus:before { content: $fa-var-google-plus; }\n.#{$fa-css-prefix}-money:before { content: $fa-var-money; }\n.#{$fa-css-prefix}-caret-down:before { content: $fa-var-caret-down; }\n.#{$fa-css-prefix}-caret-up:before { content: $fa-var-caret-up; }\n.#{$fa-css-prefix}-caret-left:before { content: $fa-var-caret-left; }\n.#{$fa-css-prefix}-caret-right:before { content: $fa-var-caret-right; }\n.#{$fa-css-prefix}-columns:before { content: $fa-var-columns; }\n.#{$fa-css-prefix}-unsorted:before,\n.#{$fa-css-prefix}-sort:before { content: $fa-var-sort; }\n.#{$fa-css-prefix}-sort-down:before,\n.#{$fa-css-prefix}-sort-desc:before { content: $fa-var-sort-desc; }\n.#{$fa-css-prefix}-sort-up:before,\n.#{$fa-css-prefix}-sort-asc:before { content: $fa-var-sort-asc; }\n.#{$fa-css-prefix}-envelope:before { content: $fa-var-envelope; }\n.#{$fa-css-prefix}-linkedin:before { content: $fa-var-linkedin; }\n.#{$fa-css-prefix}-rotate-left:before,\n.#{$fa-css-prefix}-undo:before { content: $fa-var-undo; }\n.#{$fa-css-prefix}-legal:before,\n.#{$fa-css-prefix}-gavel:before { content: $fa-var-gavel; }\n.#{$fa-css-prefix}-dashboard:before,\n.#{$fa-css-prefix}-tachometer:before { content: $fa-var-tachometer; }\n.#{$fa-css-prefix}-comment-o:before { content: $fa-var-comment-o; }\n.#{$fa-css-prefix}-comments-o:before { content: $fa-var-comments-o; }\n.#{$fa-css-prefix}-flash:before,\n.#{$fa-css-prefix}-bolt:before { content: $fa-var-bolt; }\n.#{$fa-css-prefix}-sitemap:before { content: $fa-var-sitemap; }\n.#{$fa-css-prefix}-umbrella:before { content: $fa-var-umbrella; }\n.#{$fa-css-prefix}-paste:before,\n.#{$fa-css-prefix}-clipboard:before { content: $fa-var-clipboard; }\n.#{$fa-css-prefix}-lightbulb-o:before { content: $fa-var-lightbulb-o; }\n.#{$fa-css-prefix}-exchange:before { content: $fa-var-exchange; }\n.#{$fa-css-prefix}-cloud-download:before { content: $fa-var-cloud-download; }\n.#{$fa-css-prefix}-cloud-upload:before { content: $fa-var-cloud-upload; }\n.#{$fa-css-prefix}-user-md:before { content: $fa-var-user-md; }\n.#{$fa-css-prefix}-stethoscope:before { content: $fa-var-stethoscope; }\n.#{$fa-css-prefix}-suitcase:before { content: $fa-var-suitcase; }\n.#{$fa-css-prefix}-bell-o:before { content: $fa-var-bell-o; }\n.#{$fa-css-prefix}-coffee:before { content: $fa-var-coffee; }\n.#{$fa-css-prefix}-cutlery:before { content: $fa-var-cutlery; }\n.#{$fa-css-prefix}-file-text-o:before { content: $fa-var-file-text-o; }\n.#{$fa-css-prefix}-building-o:before { content: $fa-var-building-o; }\n.#{$fa-css-prefix}-hospital-o:before { content: $fa-var-hospital-o; }\n.#{$fa-css-prefix}-ambulance:before { content: $fa-var-ambulance; }\n.#{$fa-css-prefix}-medkit:before { content: $fa-var-medkit; }\n.#{$fa-css-prefix}-fighter-jet:before { content: $fa-var-fighter-jet; }\n.#{$fa-css-prefix}-beer:before { content: $fa-var-beer; }\n.#{$fa-css-prefix}-h-square:before { content: $fa-var-h-square; }\n.#{$fa-css-prefix}-plus-square:before { content: $fa-var-plus-square; }\n.#{$fa-css-prefix}-angle-double-left:before { content: $fa-var-angle-double-left; }\n.#{$fa-css-prefix}-angle-double-right:before { content: $fa-var-angle-double-right; }\n.#{$fa-css-prefix}-angle-double-up:before { content: $fa-var-angle-double-up; }\n.#{$fa-css-prefix}-angle-double-down:before { content: $fa-var-angle-double-down; }\n.#{$fa-css-prefix}-angle-left:before { content: $fa-var-angle-left; }\n.#{$fa-css-prefix}-angle-right:before { content: $fa-var-angle-right; }\n.#{$fa-css-prefix}-angle-up:before { content: $fa-var-angle-up; }\n.#{$fa-css-prefix}-angle-down:before { content: $fa-var-angle-down; }\n.#{$fa-css-prefix}-desktop:before { content: $fa-var-desktop; }\n.#{$fa-css-prefix}-laptop:before { content: $fa-var-laptop; }\n.#{$fa-css-prefix}-tablet:before { content: $fa-var-tablet; }\n.#{$fa-css-prefix}-mobile-phone:before,\n.#{$fa-css-prefix}-mobile:before { content: $fa-var-mobile; }\n.#{$fa-css-prefix}-circle-o:before { content: $fa-var-circle-o; }\n.#{$fa-css-prefix}-quote-left:before { content: $fa-var-quote-left; }\n.#{$fa-css-prefix}-quote-right:before { content: $fa-var-quote-right; }\n.#{$fa-css-prefix}-spinner:before { content: $fa-var-spinner; }\n.#{$fa-css-prefix}-circle:before { content: $fa-var-circle; }\n.#{$fa-css-prefix}-mail-reply:before,\n.#{$fa-css-prefix}-reply:before { content: $fa-var-reply; }\n.#{$fa-css-prefix}-github-alt:before { content: $fa-var-github-alt; }\n.#{$fa-css-prefix}-folder-o:before { content: $fa-var-folder-o; }\n.#{$fa-css-prefix}-folder-open-o:before { content: $fa-var-folder-open-o; }\n.#{$fa-css-prefix}-smile-o:before { content: $fa-var-smile-o; }\n.#{$fa-css-prefix}-frown-o:before { content: $fa-var-frown-o; }\n.#{$fa-css-prefix}-meh-o:before { content: $fa-var-meh-o; }\n.#{$fa-css-prefix}-gamepad:before { content: $fa-var-gamepad; }\n.#{$fa-css-prefix}-keyboard-o:before { content: $fa-var-keyboard-o; }\n.#{$fa-css-prefix}-flag-o:before { content: $fa-var-flag-o; }\n.#{$fa-css-prefix}-flag-checkered:before { content: $fa-var-flag-checkered; }\n.#{$fa-css-prefix}-terminal:before { content: $fa-var-terminal; }\n.#{$fa-css-prefix}-code:before { content: $fa-var-code; }\n.#{$fa-css-prefix}-mail-reply-all:before,\n.#{$fa-css-prefix}-reply-all:before { content: $fa-var-reply-all; }\n.#{$fa-css-prefix}-star-half-empty:before,\n.#{$fa-css-prefix}-star-half-full:before,\n.#{$fa-css-prefix}-star-half-o:before { content: $fa-var-star-half-o; }\n.#{$fa-css-prefix}-location-arrow:before { content: $fa-var-location-arrow; }\n.#{$fa-css-prefix}-crop:before { content: $fa-var-crop; }\n.#{$fa-css-prefix}-code-fork:before { content: $fa-var-code-fork; }\n.#{$fa-css-prefix}-unlink:before,\n.#{$fa-css-prefix}-chain-broken:before { content: $fa-var-chain-broken; }\n.#{$fa-css-prefix}-question:before { content: $fa-var-question; }\n.#{$fa-css-prefix}-info:before { content: $fa-var-info; }\n.#{$fa-css-prefix}-exclamation:before { content: $fa-var-exclamation; }\n.#{$fa-css-prefix}-superscript:before { content: $fa-var-superscript; }\n.#{$fa-css-prefix}-subscript:before { content: $fa-var-subscript; }\n.#{$fa-css-prefix}-eraser:before { content: $fa-var-eraser; }\n.#{$fa-css-prefix}-puzzle-piece:before { content: $fa-var-puzzle-piece; }\n.#{$fa-css-prefix}-microphone:before { content: $fa-var-microphone; }\n.#{$fa-css-prefix}-microphone-slash:before { content: $fa-var-microphone-slash; }\n.#{$fa-css-prefix}-shield:before { content: $fa-var-shield; }\n.#{$fa-css-prefix}-calendar-o:before { content: $fa-var-calendar-o; }\n.#{$fa-css-prefix}-fire-extinguisher:before { content: $fa-var-fire-extinguisher; }\n.#{$fa-css-prefix}-rocket:before { content: $fa-var-rocket; }\n.#{$fa-css-prefix}-maxcdn:before { content: $fa-var-maxcdn; }\n.#{$fa-css-prefix}-chevron-circle-left:before { content: $fa-var-chevron-circle-left; }\n.#{$fa-css-prefix}-chevron-circle-right:before { content: $fa-var-chevron-circle-right; }\n.#{$fa-css-prefix}-chevron-circle-up:before { content: $fa-var-chevron-circle-up; }\n.#{$fa-css-prefix}-chevron-circle-down:before { content: $fa-var-chevron-circle-down; }\n.#{$fa-css-prefix}-html5:before { content: $fa-var-html5; }\n.#{$fa-css-prefix}-css3:before { content: $fa-var-css3; }\n.#{$fa-css-prefix}-anchor:before { content: $fa-var-anchor; }\n.#{$fa-css-prefix}-unlock-alt:before { content: $fa-var-unlock-alt; }\n.#{$fa-css-prefix}-bullseye:before { content: $fa-var-bullseye; }\n.#{$fa-css-prefix}-ellipsis-h:before { content: $fa-var-ellipsis-h; }\n.#{$fa-css-prefix}-ellipsis-v:before { content: $fa-var-ellipsis-v; }\n.#{$fa-css-prefix}-rss-square:before { content: $fa-var-rss-square; }\n.#{$fa-css-prefix}-play-circle:before { content: $fa-var-play-circle; }\n.#{$fa-css-prefix}-ticket:before { content: $fa-var-ticket; }\n.#{$fa-css-prefix}-minus-square:before { content: $fa-var-minus-square; }\n.#{$fa-css-prefix}-minus-square-o:before { content: $fa-var-minus-square-o; }\n.#{$fa-css-prefix}-level-up:before { content: $fa-var-level-up; }\n.#{$fa-css-prefix}-level-down:before { content: $fa-var-level-down; }\n.#{$fa-css-prefix}-check-square:before { content: $fa-var-check-square; }\n.#{$fa-css-prefix}-pencil-square:before { content: $fa-var-pencil-square; }\n.#{$fa-css-prefix}-external-link-square:before { content: $fa-var-external-link-square; }\n.#{$fa-css-prefix}-share-square:before { content: $fa-var-share-square; }\n.#{$fa-css-prefix}-compass:before { content: $fa-var-compass; }\n.#{$fa-css-prefix}-toggle-down:before,\n.#{$fa-css-prefix}-caret-square-o-down:before { content: $fa-var-caret-square-o-down; }\n.#{$fa-css-prefix}-toggle-up:before,\n.#{$fa-css-prefix}-caret-square-o-up:before { content: $fa-var-caret-square-o-up; }\n.#{$fa-css-prefix}-toggle-right:before,\n.#{$fa-css-prefix}-caret-square-o-right:before { content: $fa-var-caret-square-o-right; }\n.#{$fa-css-prefix}-euro:before,\n.#{$fa-css-prefix}-eur:before { content: $fa-var-eur; }\n.#{$fa-css-prefix}-gbp:before { content: $fa-var-gbp; }\n.#{$fa-css-prefix}-dollar:before,\n.#{$fa-css-prefix}-usd:before { content: $fa-var-usd; }\n.#{$fa-css-prefix}-rupee:before,\n.#{$fa-css-prefix}-inr:before { content: $fa-var-inr; }\n.#{$fa-css-prefix}-cny:before,\n.#{$fa-css-prefix}-rmb:before,\n.#{$fa-css-prefix}-yen:before,\n.#{$fa-css-prefix}-jpy:before { content: $fa-var-jpy; }\n.#{$fa-css-prefix}-ruble:before,\n.#{$fa-css-prefix}-rouble:before,\n.#{$fa-css-prefix}-rub:before { content: $fa-var-rub; }\n.#{$fa-css-prefix}-won:before,\n.#{$fa-css-prefix}-krw:before { content: $fa-var-krw; }\n.#{$fa-css-prefix}-bitcoin:before,\n.#{$fa-css-prefix}-btc:before { content: $fa-var-btc; }\n.#{$fa-css-prefix}-file:before { content: $fa-var-file; }\n.#{$fa-css-prefix}-file-text:before { content: $fa-var-file-text; }\n.#{$fa-css-prefix}-sort-alpha-asc:before { content: $fa-var-sort-alpha-asc; }\n.#{$fa-css-prefix}-sort-alpha-desc:before { content: $fa-var-sort-alpha-desc; }\n.#{$fa-css-prefix}-sort-amount-asc:before { content: $fa-var-sort-amount-asc; }\n.#{$fa-css-prefix}-sort-amount-desc:before { content: $fa-var-sort-amount-desc; }\n.#{$fa-css-prefix}-sort-numeric-asc:before { content: $fa-var-sort-numeric-asc; }\n.#{$fa-css-prefix}-sort-numeric-desc:before { content: $fa-var-sort-numeric-desc; }\n.#{$fa-css-prefix}-thumbs-up:before { content: $fa-var-thumbs-up; }\n.#{$fa-css-prefix}-thumbs-down:before { content: $fa-var-thumbs-down; }\n.#{$fa-css-prefix}-youtube-square:before { content: $fa-var-youtube-square; }\n.#{$fa-css-prefix}-youtube:before { content: $fa-var-youtube; }\n.#{$fa-css-prefix}-xing:before { content: $fa-var-xing; }\n.#{$fa-css-prefix}-xing-square:before { content: $fa-var-xing-square; }\n.#{$fa-css-prefix}-youtube-play:before { content: $fa-var-youtube-play; }\n.#{$fa-css-prefix}-dropbox:before { content: $fa-var-dropbox; }\n.#{$fa-css-prefix}-stack-overflow:before { content: $fa-var-stack-overflow; }\n.#{$fa-css-prefix}-instagram:before { content: $fa-var-instagram; }\n.#{$fa-css-prefix}-flickr:before { content: $fa-var-flickr; }\n.#{$fa-css-prefix}-adn:before { content: $fa-var-adn; }\n.#{$fa-css-prefix}-bitbucket:before { content: $fa-var-bitbucket; }\n.#{$fa-css-prefix}-bitbucket-square:before { content: $fa-var-bitbucket-square; }\n.#{$fa-css-prefix}-tumblr:before { content: $fa-var-tumblr; }\n.#{$fa-css-prefix}-tumblr-square:before { content: $fa-var-tumblr-square; }\n.#{$fa-css-prefix}-long-arrow-down:before { content: $fa-var-long-arrow-down; }\n.#{$fa-css-prefix}-long-arrow-up:before { content: $fa-var-long-arrow-up; }\n.#{$fa-css-prefix}-long-arrow-left:before { content: $fa-var-long-arrow-left; }\n.#{$fa-css-prefix}-long-arrow-right:before { content: $fa-var-long-arrow-right; }\n.#{$fa-css-prefix}-apple:before { content: $fa-var-apple; }\n.#{$fa-css-prefix}-windows:before { content: $fa-var-windows; }\n.#{$fa-css-prefix}-android:before { content: $fa-var-android; }\n.#{$fa-css-prefix}-linux:before { content: $fa-var-linux; }\n.#{$fa-css-prefix}-dribbble:before { content: $fa-var-dribbble; }\n.#{$fa-css-prefix}-skype:before { content: $fa-var-skype; }\n.#{$fa-css-prefix}-foursquare:before { content: $fa-var-foursquare; }\n.#{$fa-css-prefix}-trello:before { content: $fa-var-trello; }\n.#{$fa-css-prefix}-female:before { content: $fa-var-female; }\n.#{$fa-css-prefix}-male:before { content: $fa-var-male; }\n.#{$fa-css-prefix}-gittip:before,\n.#{$fa-css-prefix}-gratipay:before { content: $fa-var-gratipay; }\n.#{$fa-css-prefix}-sun-o:before { content: $fa-var-sun-o; }\n.#{$fa-css-prefix}-moon-o:before { content: $fa-var-moon-o; }\n.#{$fa-css-prefix}-archive:before { content: $fa-var-archive; }\n.#{$fa-css-prefix}-bug:before { content: $fa-var-bug; }\n.#{$fa-css-prefix}-vk:before { content: $fa-var-vk; }\n.#{$fa-css-prefix}-weibo:before { content: $fa-var-weibo; }\n.#{$fa-css-prefix}-renren:before { content: $fa-var-renren; }\n.#{$fa-css-prefix}-pagelines:before { content: $fa-var-pagelines; }\n.#{$fa-css-prefix}-stack-exchange:before { content: $fa-var-stack-exchange; }\n.#{$fa-css-prefix}-arrow-circle-o-right:before { content: $fa-var-arrow-circle-o-right; }\n.#{$fa-css-prefix}-arrow-circle-o-left:before { content: $fa-var-arrow-circle-o-left; }\n.#{$fa-css-prefix}-toggle-left:before,\n.#{$fa-css-prefix}-caret-square-o-left:before { content: $fa-var-caret-square-o-left; }\n.#{$fa-css-prefix}-dot-circle-o:before { content: $fa-var-dot-circle-o; }\n.#{$fa-css-prefix}-wheelchair:before { content: $fa-var-wheelchair; }\n.#{$fa-css-prefix}-vimeo-square:before { content: $fa-var-vimeo-square; }\n.#{$fa-css-prefix}-turkish-lira:before,\n.#{$fa-css-prefix}-try:before { content: $fa-var-try; }\n.#{$fa-css-prefix}-plus-square-o:before { content: $fa-var-plus-square-o; }\n.#{$fa-css-prefix}-space-shuttle:before { content: $fa-var-space-shuttle; }\n.#{$fa-css-prefix}-slack:before { content: $fa-var-slack; }\n.#{$fa-css-prefix}-envelope-square:before { content: $fa-var-envelope-square; }\n.#{$fa-css-prefix}-wordpress:before { content: $fa-var-wordpress; }\n.#{$fa-css-prefix}-openid:before { content: $fa-var-openid; }\n.#{$fa-css-prefix}-institution:before,\n.#{$fa-css-prefix}-bank:before,\n.#{$fa-css-prefix}-university:before { content: $fa-var-university; }\n.#{$fa-css-prefix}-mortar-board:before,\n.#{$fa-css-prefix}-graduation-cap:before { content: $fa-var-graduation-cap; }\n.#{$fa-css-prefix}-yahoo:before { content: $fa-var-yahoo; }\n.#{$fa-css-prefix}-google:before { content: $fa-var-google; }\n.#{$fa-css-prefix}-reddit:before { content: $fa-var-reddit; }\n.#{$fa-css-prefix}-reddit-square:before { content: $fa-var-reddit-square; }\n.#{$fa-css-prefix}-stumbleupon-circle:before { content: $fa-var-stumbleupon-circle; }\n.#{$fa-css-prefix}-stumbleupon:before { content: $fa-var-stumbleupon; }\n.#{$fa-css-prefix}-delicious:before { content: $fa-var-delicious; }\n.#{$fa-css-prefix}-digg:before { content: $fa-var-digg; }\n.#{$fa-css-prefix}-pied-piper:before { content: $fa-var-pied-piper; }\n.#{$fa-css-prefix}-pied-piper-alt:before { content: $fa-var-pied-piper-alt; }\n.#{$fa-css-prefix}-drupal:before { content: $fa-var-drupal; }\n.#{$fa-css-prefix}-joomla:before { content: $fa-var-joomla; }\n.#{$fa-css-prefix}-language:before { content: $fa-var-language; }\n.#{$fa-css-prefix}-fax:before { content: $fa-var-fax; }\n.#{$fa-css-prefix}-building:before { content: $fa-var-building; }\n.#{$fa-css-prefix}-child:before { content: $fa-var-child; }\n.#{$fa-css-prefix}-paw:before { content: $fa-var-paw; }\n.#{$fa-css-prefix}-spoon:before { content: $fa-var-spoon; }\n.#{$fa-css-prefix}-cube:before { content: $fa-var-cube; }\n.#{$fa-css-prefix}-cubes:before { content: $fa-var-cubes; }\n.#{$fa-css-prefix}-behance:before { content: $fa-var-behance; }\n.#{$fa-css-prefix}-behance-square:before { content: $fa-var-behance-square; }\n.#{$fa-css-prefix}-steam:before { content: $fa-var-steam; }\n.#{$fa-css-prefix}-steam-square:before { content: $fa-var-steam-square; }\n.#{$fa-css-prefix}-recycle:before { content: $fa-var-recycle; }\n.#{$fa-css-prefix}-automobile:before,\n.#{$fa-css-prefix}-car:before { content: $fa-var-car; }\n.#{$fa-css-prefix}-cab:before,\n.#{$fa-css-prefix}-taxi:before { content: $fa-var-taxi; }\n.#{$fa-css-prefix}-tree:before { content: $fa-var-tree; }\n.#{$fa-css-prefix}-spotify:before { content: $fa-var-spotify; }\n.#{$fa-css-prefix}-deviantart:before { content: $fa-var-deviantart; }\n.#{$fa-css-prefix}-soundcloud:before { content: $fa-var-soundcloud; }\n.#{$fa-css-prefix}-database:before { content: $fa-var-database; }\n.#{$fa-css-prefix}-file-pdf-o:before { content: $fa-var-file-pdf-o; }\n.#{$fa-css-prefix}-file-word-o:before { content: $fa-var-file-word-o; }\n.#{$fa-css-prefix}-file-excel-o:before { content: $fa-var-file-excel-o; }\n.#{$fa-css-prefix}-file-powerpoint-o:before { content: $fa-var-file-powerpoint-o; }\n.#{$fa-css-prefix}-file-photo-o:before,\n.#{$fa-css-prefix}-file-picture-o:before,\n.#{$fa-css-prefix}-file-image-o:before { content: $fa-var-file-image-o; }\n.#{$fa-css-prefix}-file-zip-o:before,\n.#{$fa-css-prefix}-file-archive-o:before { content: $fa-var-file-archive-o; }\n.#{$fa-css-prefix}-file-sound-o:before,\n.#{$fa-css-prefix}-file-audio-o:before { content: $fa-var-file-audio-o; }\n.#{$fa-css-prefix}-file-movie-o:before,\n.#{$fa-css-prefix}-file-video-o:before { content: $fa-var-file-video-o; }\n.#{$fa-css-prefix}-file-code-o:before { content: $fa-var-file-code-o; }\n.#{$fa-css-prefix}-vine:before { content: $fa-var-vine; }\n.#{$fa-css-prefix}-codepen:before { content: $fa-var-codepen; }\n.#{$fa-css-prefix}-jsfiddle:before { content: $fa-var-jsfiddle; }\n.#{$fa-css-prefix}-life-bouy:before,\n.#{$fa-css-prefix}-life-buoy:before,\n.#{$fa-css-prefix}-life-saver:before,\n.#{$fa-css-prefix}-support:before,\n.#{$fa-css-prefix}-life-ring:before { content: $fa-var-life-ring; }\n.#{$fa-css-prefix}-circle-o-notch:before { content: $fa-var-circle-o-notch; }\n.#{$fa-css-prefix}-ra:before,\n.#{$fa-css-prefix}-rebel:before { content: $fa-var-rebel; }\n.#{$fa-css-prefix}-ge:before,\n.#{$fa-css-prefix}-empire:before { content: $fa-var-empire; }\n.#{$fa-css-prefix}-git-square:before { content: $fa-var-git-square; }\n.#{$fa-css-prefix}-git:before { content: $fa-var-git; }\n.#{$fa-css-prefix}-hacker-news:before { content: $fa-var-hacker-news; }\n.#{$fa-css-prefix}-tencent-weibo:before { content: $fa-var-tencent-weibo; }\n.#{$fa-css-prefix}-qq:before { content: $fa-var-qq; }\n.#{$fa-css-prefix}-wechat:before,\n.#{$fa-css-prefix}-weixin:before { content: $fa-var-weixin; }\n.#{$fa-css-prefix}-send:before,\n.#{$fa-css-prefix}-paper-plane:before { content: $fa-var-paper-plane; }\n.#{$fa-css-prefix}-send-o:before,\n.#{$fa-css-prefix}-paper-plane-o:before { content: $fa-var-paper-plane-o; }\n.#{$fa-css-prefix}-history:before { content: $fa-var-history; }\n.#{$fa-css-prefix}-genderless:before,\n.#{$fa-css-prefix}-circle-thin:before { content: $fa-var-circle-thin; }\n.#{$fa-css-prefix}-header:before { content: $fa-var-header; }\n.#{$fa-css-prefix}-paragraph:before { content: $fa-var-paragraph; }\n.#{$fa-css-prefix}-sliders:before { content: $fa-var-sliders; }\n.#{$fa-css-prefix}-share-alt:before { content: $fa-var-share-alt; }\n.#{$fa-css-prefix}-share-alt-square:before { content: $fa-var-share-alt-square; }\n.#{$fa-css-prefix}-bomb:before { content: $fa-var-bomb; }\n.#{$fa-css-prefix}-soccer-ball-o:before,\n.#{$fa-css-prefix}-futbol-o:before { content: $fa-var-futbol-o; }\n.#{$fa-css-prefix}-tty:before { content: $fa-var-tty; }\n.#{$fa-css-prefix}-binoculars:before { content: $fa-var-binoculars; }\n.#{$fa-css-prefix}-plug:before { content: $fa-var-plug; }\n.#{$fa-css-prefix}-slideshare:before { content: $fa-var-slideshare; }\n.#{$fa-css-prefix}-twitch:before { content: $fa-var-twitch; }\n.#{$fa-css-prefix}-yelp:before { content: $fa-var-yelp; }\n.#{$fa-css-prefix}-newspaper-o:before { content: $fa-var-newspaper-o; }\n.#{$fa-css-prefix}-wifi:before { content: $fa-var-wifi; }\n.#{$fa-css-prefix}-calculator:before { content: $fa-var-calculator; }\n.#{$fa-css-prefix}-paypal:before { content: $fa-var-paypal; }\n.#{$fa-css-prefix}-google-wallet:before { content: $fa-var-google-wallet; }\n.#{$fa-css-prefix}-cc-visa:before { content: $fa-var-cc-visa; }\n.#{$fa-css-prefix}-cc-mastercard:before { content: $fa-var-cc-mastercard; }\n.#{$fa-css-prefix}-cc-discover:before { content: $fa-var-cc-discover; }\n.#{$fa-css-prefix}-cc-amex:before { content: $fa-var-cc-amex; }\n.#{$fa-css-prefix}-cc-paypal:before { content: $fa-var-cc-paypal; }\n.#{$fa-css-prefix}-cc-stripe:before { content: $fa-var-cc-stripe; }\n.#{$fa-css-prefix}-bell-slash:before { content: $fa-var-bell-slash; }\n.#{$fa-css-prefix}-bell-slash-o:before { content: $fa-var-bell-slash-o; }\n.#{$fa-css-prefix}-trash:before { content: $fa-var-trash; }\n.#{$fa-css-prefix}-copyright:before { content: $fa-var-copyright; }\n.#{$fa-css-prefix}-at:before { content: $fa-var-at; }\n.#{$fa-css-prefix}-eyedropper:before { content: $fa-var-eyedropper; }\n.#{$fa-css-prefix}-paint-brush:before { content: $fa-var-paint-brush; }\n.#{$fa-css-prefix}-birthday-cake:before { content: $fa-var-birthday-cake; }\n.#{$fa-css-prefix}-area-chart:before { content: $fa-var-area-chart; }\n.#{$fa-css-prefix}-pie-chart:before { content: $fa-var-pie-chart; }\n.#{$fa-css-prefix}-line-chart:before { content: $fa-var-line-chart; }\n.#{$fa-css-prefix}-lastfm:before { content: $fa-var-lastfm; }\n.#{$fa-css-prefix}-lastfm-square:before { content: $fa-var-lastfm-square; }\n.#{$fa-css-prefix}-toggle-off:before { content: $fa-var-toggle-off; }\n.#{$fa-css-prefix}-toggle-on:before { content: $fa-var-toggle-on; }\n.#{$fa-css-prefix}-bicycle:before { content: $fa-var-bicycle; }\n.#{$fa-css-prefix}-bus:before { content: $fa-var-bus; }\n.#{$fa-css-prefix}-ioxhost:before { content: $fa-var-ioxhost; }\n.#{$fa-css-prefix}-angellist:before { content: $fa-var-angellist; }\n.#{$fa-css-prefix}-cc:before { content: $fa-var-cc; }\n.#{$fa-css-prefix}-shekel:before,\n.#{$fa-css-prefix}-sheqel:before,\n.#{$fa-css-prefix}-ils:before { content: $fa-var-ils; }\n.#{$fa-css-prefix}-meanpath:before { content: $fa-var-meanpath; }\n.#{$fa-css-prefix}-buysellads:before { content: $fa-var-buysellads; }\n.#{$fa-css-prefix}-connectdevelop:before { content: $fa-var-connectdevelop; }\n.#{$fa-css-prefix}-dashcube:before { content: $fa-var-dashcube; }\n.#{$fa-css-prefix}-forumbee:before { content: $fa-var-forumbee; }\n.#{$fa-css-prefix}-leanpub:before { content: $fa-var-leanpub; }\n.#{$fa-css-prefix}-sellsy:before { content: $fa-var-sellsy; }\n.#{$fa-css-prefix}-shirtsinbulk:before { content: $fa-var-shirtsinbulk; }\n.#{$fa-css-prefix}-simplybuilt:before { content: $fa-var-simplybuilt; }\n.#{$fa-css-prefix}-skyatlas:before { content: $fa-var-skyatlas; }\n.#{$fa-css-prefix}-cart-plus:before { content: $fa-var-cart-plus; }\n.#{$fa-css-prefix}-cart-arrow-down:before { content: $fa-var-cart-arrow-down; }\n.#{$fa-css-prefix}-diamond:before { content: $fa-var-diamond; }\n.#{$fa-css-prefix}-ship:before { content: $fa-var-ship; }\n.#{$fa-css-prefix}-user-secret:before { content: $fa-var-user-secret; }\n.#{$fa-css-prefix}-motorcycle:before { content: $fa-var-motorcycle; }\n.#{$fa-css-prefix}-street-view:before { content: $fa-var-street-view; }\n.#{$fa-css-prefix}-heartbeat:before { content: $fa-var-heartbeat; }\n.#{$fa-css-prefix}-venus:before { content: $fa-var-venus; }\n.#{$fa-css-prefix}-mars:before { content: $fa-var-mars; }\n.#{$fa-css-prefix}-mercury:before { content: $fa-var-mercury; }\n.#{$fa-css-prefix}-transgender:before { content: $fa-var-transgender; }\n.#{$fa-css-prefix}-transgender-alt:before { content: $fa-var-transgender-alt; }\n.#{$fa-css-prefix}-venus-double:before { content: $fa-var-venus-double; }\n.#{$fa-css-prefix}-mars-double:before { content: $fa-var-mars-double; }\n.#{$fa-css-prefix}-venus-mars:before { content: $fa-var-venus-mars; }\n.#{$fa-css-prefix}-mars-stroke:before { content: $fa-var-mars-stroke; }\n.#{$fa-css-prefix}-mars-stroke-v:before { content: $fa-var-mars-stroke-v; }\n.#{$fa-css-prefix}-mars-stroke-h:before { content: $fa-var-mars-stroke-h; }\n.#{$fa-css-prefix}-neuter:before { content: $fa-var-neuter; }\n.#{$fa-css-prefix}-facebook-official:before { content: $fa-var-facebook-official; }\n.#{$fa-css-prefix}-pinterest-p:before { content: $fa-var-pinterest-p; }\n.#{$fa-css-prefix}-whatsapp:before { content: $fa-var-whatsapp; }\n.#{$fa-css-prefix}-server:before { content: $fa-var-server; }\n.#{$fa-css-prefix}-user-plus:before { content: $fa-var-user-plus; }\n.#{$fa-css-prefix}-user-times:before { content: $fa-var-user-times; }\n.#{$fa-css-prefix}-hotel:before,\n.#{$fa-css-prefix}-bed:before { content: $fa-var-bed; }\n.#{$fa-css-prefix}-viacoin:before { content: $fa-var-viacoin; }\n.#{$fa-css-prefix}-train:before { content: $fa-var-train; }\n.#{$fa-css-prefix}-subway:before { content: $fa-var-subway; }\n.#{$fa-css-prefix}-medium:before { content: $fa-var-medium; }\n"
  },
  {
    "path": "website/font-awesome/scss/_larger.scss",
    "content": "// Icon Sizes\n// -------------------------\n\n/* makes the font 33% larger relative to the icon container */\n.#{$fa-css-prefix}-lg {\n  font-size: (4em / 3);\n  line-height: (3em / 4);\n  vertical-align: -15%;\n}\n.#{$fa-css-prefix}-2x { font-size: 2em; }\n.#{$fa-css-prefix}-3x { font-size: 3em; }\n.#{$fa-css-prefix}-4x { font-size: 4em; }\n.#{$fa-css-prefix}-5x { font-size: 5em; }\n"
  },
  {
    "path": "website/font-awesome/scss/_list.scss",
    "content": "// List Icons\n// -------------------------\n\n.#{$fa-css-prefix}-ul {\n  padding-left: 0;\n  margin-left: $fa-li-width;\n  list-style-type: none;\n  > li { position: relative; }\n}\n.#{$fa-css-prefix}-li {\n  position: absolute;\n  left: -$fa-li-width;\n  width: $fa-li-width;\n  top: (2em / 14);\n  text-align: center;\n  &.#{$fa-css-prefix}-lg {\n    left: -$fa-li-width + (4em / 14);\n  }\n}\n"
  },
  {
    "path": "website/font-awesome/scss/_mixins.scss",
    "content": "// Mixins\n// --------------------------\n\n@mixin fa-icon() {\n  display: inline-block;\n  font: normal normal normal #{$fa-font-size-base}/1 FontAwesome; // shortening font declaration\n  font-size: inherit; // can't have font-size inherit on line above, so need to override\n  text-rendering: auto; // optimizelegibility throws things off #1094\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n  transform: translate(0, 0); // ensures no half-pixel rendering in firefox\n\n}\n\n@mixin fa-icon-rotate($degrees, $rotation) {\n  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=#{$rotation});\n  -webkit-transform: rotate($degrees);\n      -ms-transform: rotate($degrees);\n          transform: rotate($degrees);\n}\n\n@mixin fa-icon-flip($horiz, $vert, $rotation) {\n  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=#{$rotation});\n  -webkit-transform: scale($horiz, $vert);\n      -ms-transform: scale($horiz, $vert);\n          transform: scale($horiz, $vert);\n}\n"
  },
  {
    "path": "website/font-awesome/scss/_path.scss",
    "content": "/* FONT PATH\n * -------------------------- */\n\n@font-face {\n  font-family: 'FontAwesome';\n  src: url('#{$fa-font-path}/fontawesome-webfont.eot?v=#{$fa-version}');\n  src: url('#{$fa-font-path}/fontawesome-webfont.eot?#iefix&v=#{$fa-version}') format('embedded-opentype'),\n    url('#{$fa-font-path}/fontawesome-webfont.woff2?v=#{$fa-version}') format('woff2'),\n    url('#{$fa-font-path}/fontawesome-webfont.woff?v=#{$fa-version}') format('woff'),\n    url('#{$fa-font-path}/fontawesome-webfont.ttf?v=#{$fa-version}') format('truetype'),\n    url('#{$fa-font-path}/fontawesome-webfont.svg?v=#{$fa-version}#fontawesomeregular') format('svg');\n//  src: url('#{$fa-font-path}/FontAwesome.otf') format('opentype'); // used when developing fonts\n  font-weight: normal;\n  font-style: normal;\n}\n"
  },
  {
    "path": "website/font-awesome/scss/_rotated-flipped.scss",
    "content": "// Rotated & Flipped Icons\n// -------------------------\n\n.#{$fa-css-prefix}-rotate-90  { @include fa-icon-rotate(90deg, 1);  }\n.#{$fa-css-prefix}-rotate-180 { @include fa-icon-rotate(180deg, 2); }\n.#{$fa-css-prefix}-rotate-270 { @include fa-icon-rotate(270deg, 3); }\n\n.#{$fa-css-prefix}-flip-horizontal { @include fa-icon-flip(-1, 1, 0); }\n.#{$fa-css-prefix}-flip-vertical   { @include fa-icon-flip(1, -1, 2); }\n\n// Hook for IE8-9\n// -------------------------\n\n:root .#{$fa-css-prefix}-rotate-90,\n:root .#{$fa-css-prefix}-rotate-180,\n:root .#{$fa-css-prefix}-rotate-270,\n:root .#{$fa-css-prefix}-flip-horizontal,\n:root .#{$fa-css-prefix}-flip-vertical {\n  filter: none;\n}\n"
  },
  {
    "path": "website/font-awesome/scss/_stacked.scss",
    "content": "// Stacked Icons\n// -------------------------\n\n.#{$fa-css-prefix}-stack {\n  position: relative;\n  display: inline-block;\n  width: 2em;\n  height: 2em;\n  line-height: 2em;\n  vertical-align: middle;\n}\n.#{$fa-css-prefix}-stack-1x, .#{$fa-css-prefix}-stack-2x {\n  position: absolute;\n  left: 0;\n  width: 100%;\n  text-align: center;\n}\n.#{$fa-css-prefix}-stack-1x { line-height: inherit; }\n.#{$fa-css-prefix}-stack-2x { font-size: 2em; }\n.#{$fa-css-prefix}-inverse { color: $fa-inverse; }\n"
  },
  {
    "path": "website/font-awesome/scss/_variables.scss",
    "content": "// Variables\n// --------------------------\n\n$fa-font-path:        \"../fonts\" !default;\n$fa-font-size-base:   14px !default;\n//$fa-font-path:        \"//netdna.bootstrapcdn.com/font-awesome/4.3.0/fonts\" !default; // for referencing Bootstrap CDN font files directly\n$fa-css-prefix:       fa !default;\n$fa-version:          \"4.3.0\" !default;\n$fa-border-color:     #eee !default;\n$fa-inverse:          #fff !default;\n$fa-li-width:         (30em / 14) !default;\n\n$fa-var-adjust: \"\\f042\";\n$fa-var-adn: \"\\f170\";\n$fa-var-align-center: \"\\f037\";\n$fa-var-align-justify: \"\\f039\";\n$fa-var-align-left: \"\\f036\";\n$fa-var-align-right: \"\\f038\";\n$fa-var-ambulance: \"\\f0f9\";\n$fa-var-anchor: \"\\f13d\";\n$fa-var-android: \"\\f17b\";\n$fa-var-angellist: \"\\f209\";\n$fa-var-angle-double-down: \"\\f103\";\n$fa-var-angle-double-left: \"\\f100\";\n$fa-var-angle-double-right: \"\\f101\";\n$fa-var-angle-double-up: \"\\f102\";\n$fa-var-angle-down: \"\\f107\";\n$fa-var-angle-left: \"\\f104\";\n$fa-var-angle-right: \"\\f105\";\n$fa-var-angle-up: \"\\f106\";\n$fa-var-apple: \"\\f179\";\n$fa-var-archive: \"\\f187\";\n$fa-var-area-chart: \"\\f1fe\";\n$fa-var-arrow-circle-down: \"\\f0ab\";\n$fa-var-arrow-circle-left: \"\\f0a8\";\n$fa-var-arrow-circle-o-down: \"\\f01a\";\n$fa-var-arrow-circle-o-left: \"\\f190\";\n$fa-var-arrow-circle-o-right: \"\\f18e\";\n$fa-var-arrow-circle-o-up: \"\\f01b\";\n$fa-var-arrow-circle-right: \"\\f0a9\";\n$fa-var-arrow-circle-up: \"\\f0aa\";\n$fa-var-arrow-down: \"\\f063\";\n$fa-var-arrow-left: \"\\f060\";\n$fa-var-arrow-right: \"\\f061\";\n$fa-var-arrow-up: \"\\f062\";\n$fa-var-arrows: \"\\f047\";\n$fa-var-arrows-alt: \"\\f0b2\";\n$fa-var-arrows-h: \"\\f07e\";\n$fa-var-arrows-v: \"\\f07d\";\n$fa-var-asterisk: \"\\f069\";\n$fa-var-at: \"\\f1fa\";\n$fa-var-automobile: \"\\f1b9\";\n$fa-var-backward: \"\\f04a\";\n$fa-var-ban: \"\\f05e\";\n$fa-var-bank: \"\\f19c\";\n$fa-var-bar-chart: \"\\f080\";\n$fa-var-bar-chart-o: \"\\f080\";\n$fa-var-barcode: \"\\f02a\";\n$fa-var-bars: \"\\f0c9\";\n$fa-var-bed: \"\\f236\";\n$fa-var-beer: \"\\f0fc\";\n$fa-var-behance: \"\\f1b4\";\n$fa-var-behance-square: \"\\f1b5\";\n$fa-var-bell: \"\\f0f3\";\n$fa-var-bell-o: \"\\f0a2\";\n$fa-var-bell-slash: \"\\f1f6\";\n$fa-var-bell-slash-o: \"\\f1f7\";\n$fa-var-bicycle: \"\\f206\";\n$fa-var-binoculars: \"\\f1e5\";\n$fa-var-birthday-cake: \"\\f1fd\";\n$fa-var-bitbucket: \"\\f171\";\n$fa-var-bitbucket-square: \"\\f172\";\n$fa-var-bitcoin: \"\\f15a\";\n$fa-var-bold: \"\\f032\";\n$fa-var-bolt: \"\\f0e7\";\n$fa-var-bomb: \"\\f1e2\";\n$fa-var-book: \"\\f02d\";\n$fa-var-bookmark: \"\\f02e\";\n$fa-var-bookmark-o: \"\\f097\";\n$fa-var-briefcase: \"\\f0b1\";\n$fa-var-btc: \"\\f15a\";\n$fa-var-bug: \"\\f188\";\n$fa-var-building: \"\\f1ad\";\n$fa-var-building-o: \"\\f0f7\";\n$fa-var-bullhorn: \"\\f0a1\";\n$fa-var-bullseye: \"\\f140\";\n$fa-var-bus: \"\\f207\";\n$fa-var-buysellads: \"\\f20d\";\n$fa-var-cab: \"\\f1ba\";\n$fa-var-calculator: \"\\f1ec\";\n$fa-var-calendar: \"\\f073\";\n$fa-var-calendar-o: \"\\f133\";\n$fa-var-camera: \"\\f030\";\n$fa-var-camera-retro: \"\\f083\";\n$fa-var-car: \"\\f1b9\";\n$fa-var-caret-down: \"\\f0d7\";\n$fa-var-caret-left: \"\\f0d9\";\n$fa-var-caret-right: \"\\f0da\";\n$fa-var-caret-square-o-down: \"\\f150\";\n$fa-var-caret-square-o-left: \"\\f191\";\n$fa-var-caret-square-o-right: \"\\f152\";\n$fa-var-caret-square-o-up: \"\\f151\";\n$fa-var-caret-up: \"\\f0d8\";\n$fa-var-cart-arrow-down: \"\\f218\";\n$fa-var-cart-plus: \"\\f217\";\n$fa-var-cc: \"\\f20a\";\n$fa-var-cc-amex: \"\\f1f3\";\n$fa-var-cc-discover: \"\\f1f2\";\n$fa-var-cc-mastercard: \"\\f1f1\";\n$fa-var-cc-paypal: \"\\f1f4\";\n$fa-var-cc-stripe: \"\\f1f5\";\n$fa-var-cc-visa: \"\\f1f0\";\n$fa-var-certificate: \"\\f0a3\";\n$fa-var-chain: \"\\f0c1\";\n$fa-var-chain-broken: \"\\f127\";\n$fa-var-check: \"\\f00c\";\n$fa-var-check-circle: \"\\f058\";\n$fa-var-check-circle-o: \"\\f05d\";\n$fa-var-check-square: \"\\f14a\";\n$fa-var-check-square-o: \"\\f046\";\n$fa-var-chevron-circle-down: \"\\f13a\";\n$fa-var-chevron-circle-left: \"\\f137\";\n$fa-var-chevron-circle-right: \"\\f138\";\n$fa-var-chevron-circle-up: \"\\f139\";\n$fa-var-chevron-down: \"\\f078\";\n$fa-var-chevron-left: \"\\f053\";\n$fa-var-chevron-right: \"\\f054\";\n$fa-var-chevron-up: \"\\f077\";\n$fa-var-child: \"\\f1ae\";\n$fa-var-circle: \"\\f111\";\n$fa-var-circle-o: \"\\f10c\";\n$fa-var-circle-o-notch: \"\\f1ce\";\n$fa-var-circle-thin: \"\\f1db\";\n$fa-var-clipboard: \"\\f0ea\";\n$fa-var-clock-o: \"\\f017\";\n$fa-var-close: \"\\f00d\";\n$fa-var-cloud: \"\\f0c2\";\n$fa-var-cloud-download: \"\\f0ed\";\n$fa-var-cloud-upload: \"\\f0ee\";\n$fa-var-cny: \"\\f157\";\n$fa-var-code: \"\\f121\";\n$fa-var-code-fork: \"\\f126\";\n$fa-var-codepen: \"\\f1cb\";\n$fa-var-coffee: \"\\f0f4\";\n$fa-var-cog: \"\\f013\";\n$fa-var-cogs: \"\\f085\";\n$fa-var-columns: \"\\f0db\";\n$fa-var-comment: \"\\f075\";\n$fa-var-comment-o: \"\\f0e5\";\n$fa-var-comments: \"\\f086\";\n$fa-var-comments-o: \"\\f0e6\";\n$fa-var-compass: \"\\f14e\";\n$fa-var-compress: \"\\f066\";\n$fa-var-connectdevelop: \"\\f20e\";\n$fa-var-copy: \"\\f0c5\";\n$fa-var-copyright: \"\\f1f9\";\n$fa-var-credit-card: \"\\f09d\";\n$fa-var-crop: \"\\f125\";\n$fa-var-crosshairs: \"\\f05b\";\n$fa-var-css3: \"\\f13c\";\n$fa-var-cube: \"\\f1b2\";\n$fa-var-cubes: \"\\f1b3\";\n$fa-var-cut: \"\\f0c4\";\n$fa-var-cutlery: \"\\f0f5\";\n$fa-var-dashboard: \"\\f0e4\";\n$fa-var-dashcube: \"\\f210\";\n$fa-var-database: \"\\f1c0\";\n$fa-var-dedent: \"\\f03b\";\n$fa-var-delicious: \"\\f1a5\";\n$fa-var-desktop: \"\\f108\";\n$fa-var-deviantart: \"\\f1bd\";\n$fa-var-diamond: \"\\f219\";\n$fa-var-digg: \"\\f1a6\";\n$fa-var-dollar: \"\\f155\";\n$fa-var-dot-circle-o: \"\\f192\";\n$fa-var-download: \"\\f019\";\n$fa-var-dribbble: \"\\f17d\";\n$fa-var-dropbox: \"\\f16b\";\n$fa-var-drupal: \"\\f1a9\";\n$fa-var-edit: \"\\f044\";\n$fa-var-eject: \"\\f052\";\n$fa-var-ellipsis-h: \"\\f141\";\n$fa-var-ellipsis-v: \"\\f142\";\n$fa-var-empire: \"\\f1d1\";\n$fa-var-envelope: \"\\f0e0\";\n$fa-var-envelope-o: \"\\f003\";\n$fa-var-envelope-square: \"\\f199\";\n$fa-var-eraser: \"\\f12d\";\n$fa-var-eur: \"\\f153\";\n$fa-var-euro: \"\\f153\";\n$fa-var-exchange: \"\\f0ec\";\n$fa-var-exclamation: \"\\f12a\";\n$fa-var-exclamation-circle: \"\\f06a\";\n$fa-var-exclamation-triangle: \"\\f071\";\n$fa-var-expand: \"\\f065\";\n$fa-var-external-link: \"\\f08e\";\n$fa-var-external-link-square: \"\\f14c\";\n$fa-var-eye: \"\\f06e\";\n$fa-var-eye-slash: \"\\f070\";\n$fa-var-eyedropper: \"\\f1fb\";\n$fa-var-facebook: \"\\f09a\";\n$fa-var-facebook-f: \"\\f09a\";\n$fa-var-facebook-official: \"\\f230\";\n$fa-var-facebook-square: \"\\f082\";\n$fa-var-fast-backward: \"\\f049\";\n$fa-var-fast-forward: \"\\f050\";\n$fa-var-fax: \"\\f1ac\";\n$fa-var-female: \"\\f182\";\n$fa-var-fighter-jet: \"\\f0fb\";\n$fa-var-file: \"\\f15b\";\n$fa-var-file-archive-o: \"\\f1c6\";\n$fa-var-file-audio-o: \"\\f1c7\";\n$fa-var-file-code-o: \"\\f1c9\";\n$fa-var-file-excel-o: \"\\f1c3\";\n$fa-var-file-image-o: \"\\f1c5\";\n$fa-var-file-movie-o: \"\\f1c8\";\n$fa-var-file-o: \"\\f016\";\n$fa-var-file-pdf-o: \"\\f1c1\";\n$fa-var-file-photo-o: \"\\f1c5\";\n$fa-var-file-picture-o: \"\\f1c5\";\n$fa-var-file-powerpoint-o: \"\\f1c4\";\n$fa-var-file-sound-o: \"\\f1c7\";\n$fa-var-file-text: \"\\f15c\";\n$fa-var-file-text-o: \"\\f0f6\";\n$fa-var-file-video-o: \"\\f1c8\";\n$fa-var-file-word-o: \"\\f1c2\";\n$fa-var-file-zip-o: \"\\f1c6\";\n$fa-var-files-o: \"\\f0c5\";\n$fa-var-film: \"\\f008\";\n$fa-var-filter: \"\\f0b0\";\n$fa-var-fire: \"\\f06d\";\n$fa-var-fire-extinguisher: \"\\f134\";\n$fa-var-flag: \"\\f024\";\n$fa-var-flag-checkered: \"\\f11e\";\n$fa-var-flag-o: \"\\f11d\";\n$fa-var-flash: \"\\f0e7\";\n$fa-var-flask: \"\\f0c3\";\n$fa-var-flickr: \"\\f16e\";\n$fa-var-floppy-o: \"\\f0c7\";\n$fa-var-folder: \"\\f07b\";\n$fa-var-folder-o: \"\\f114\";\n$fa-var-folder-open: \"\\f07c\";\n$fa-var-folder-open-o: \"\\f115\";\n$fa-var-font: \"\\f031\";\n$fa-var-forumbee: \"\\f211\";\n$fa-var-forward: \"\\f04e\";\n$fa-var-foursquare: \"\\f180\";\n$fa-var-frown-o: \"\\f119\";\n$fa-var-futbol-o: \"\\f1e3\";\n$fa-var-gamepad: \"\\f11b\";\n$fa-var-gavel: \"\\f0e3\";\n$fa-var-gbp: \"\\f154\";\n$fa-var-ge: \"\\f1d1\";\n$fa-var-gear: \"\\f013\";\n$fa-var-gears: \"\\f085\";\n$fa-var-genderless: \"\\f1db\";\n$fa-var-gift: \"\\f06b\";\n$fa-var-git: \"\\f1d3\";\n$fa-var-git-square: \"\\f1d2\";\n$fa-var-github: \"\\f09b\";\n$fa-var-github-alt: \"\\f113\";\n$fa-var-github-square: \"\\f092\";\n$fa-var-gittip: \"\\f184\";\n$fa-var-glass: \"\\f000\";\n$fa-var-globe: \"\\f0ac\";\n$fa-var-google: \"\\f1a0\";\n$fa-var-google-plus: \"\\f0d5\";\n$fa-var-google-plus-square: \"\\f0d4\";\n$fa-var-google-wallet: \"\\f1ee\";\n$fa-var-graduation-cap: \"\\f19d\";\n$fa-var-gratipay: \"\\f184\";\n$fa-var-group: \"\\f0c0\";\n$fa-var-h-square: \"\\f0fd\";\n$fa-var-hacker-news: \"\\f1d4\";\n$fa-var-hand-o-down: \"\\f0a7\";\n$fa-var-hand-o-left: \"\\f0a5\";\n$fa-var-hand-o-right: \"\\f0a4\";\n$fa-var-hand-o-up: \"\\f0a6\";\n$fa-var-hdd-o: \"\\f0a0\";\n$fa-var-header: \"\\f1dc\";\n$fa-var-headphones: \"\\f025\";\n$fa-var-heart: \"\\f004\";\n$fa-var-heart-o: \"\\f08a\";\n$fa-var-heartbeat: \"\\f21e\";\n$fa-var-history: \"\\f1da\";\n$fa-var-home: \"\\f015\";\n$fa-var-hospital-o: \"\\f0f8\";\n$fa-var-hotel: \"\\f236\";\n$fa-var-html5: \"\\f13b\";\n$fa-var-ils: \"\\f20b\";\n$fa-var-image: \"\\f03e\";\n$fa-var-inbox: \"\\f01c\";\n$fa-var-indent: \"\\f03c\";\n$fa-var-info: \"\\f129\";\n$fa-var-info-circle: \"\\f05a\";\n$fa-var-inr: \"\\f156\";\n$fa-var-instagram: \"\\f16d\";\n$fa-var-institution: \"\\f19c\";\n$fa-var-ioxhost: \"\\f208\";\n$fa-var-italic: \"\\f033\";\n$fa-var-joomla: \"\\f1aa\";\n$fa-var-jpy: \"\\f157\";\n$fa-var-jsfiddle: \"\\f1cc\";\n$fa-var-key: \"\\f084\";\n$fa-var-keyboard-o: \"\\f11c\";\n$fa-var-krw: \"\\f159\";\n$fa-var-language: \"\\f1ab\";\n$fa-var-laptop: \"\\f109\";\n$fa-var-lastfm: \"\\f202\";\n$fa-var-lastfm-square: \"\\f203\";\n$fa-var-leaf: \"\\f06c\";\n$fa-var-leanpub: \"\\f212\";\n$fa-var-legal: \"\\f0e3\";\n$fa-var-lemon-o: \"\\f094\";\n$fa-var-level-down: \"\\f149\";\n$fa-var-level-up: \"\\f148\";\n$fa-var-life-bouy: \"\\f1cd\";\n$fa-var-life-buoy: \"\\f1cd\";\n$fa-var-life-ring: \"\\f1cd\";\n$fa-var-life-saver: \"\\f1cd\";\n$fa-var-lightbulb-o: \"\\f0eb\";\n$fa-var-line-chart: \"\\f201\";\n$fa-var-link: \"\\f0c1\";\n$fa-var-linkedin: \"\\f0e1\";\n$fa-var-linkedin-square: \"\\f08c\";\n$fa-var-linux: \"\\f17c\";\n$fa-var-list: \"\\f03a\";\n$fa-var-list-alt: \"\\f022\";\n$fa-var-list-ol: \"\\f0cb\";\n$fa-var-list-ul: \"\\f0ca\";\n$fa-var-location-arrow: \"\\f124\";\n$fa-var-lock: \"\\f023\";\n$fa-var-long-arrow-down: \"\\f175\";\n$fa-var-long-arrow-left: \"\\f177\";\n$fa-var-long-arrow-right: \"\\f178\";\n$fa-var-long-arrow-up: \"\\f176\";\n$fa-var-magic: \"\\f0d0\";\n$fa-var-magnet: \"\\f076\";\n$fa-var-mail-forward: \"\\f064\";\n$fa-var-mail-reply: \"\\f112\";\n$fa-var-mail-reply-all: \"\\f122\";\n$fa-var-male: \"\\f183\";\n$fa-var-map-marker: \"\\f041\";\n$fa-var-mars: \"\\f222\";\n$fa-var-mars-double: \"\\f227\";\n$fa-var-mars-stroke: \"\\f229\";\n$fa-var-mars-stroke-h: \"\\f22b\";\n$fa-var-mars-stroke-v: \"\\f22a\";\n$fa-var-maxcdn: \"\\f136\";\n$fa-var-meanpath: \"\\f20c\";\n$fa-var-medium: \"\\f23a\";\n$fa-var-medkit: \"\\f0fa\";\n$fa-var-meh-o: \"\\f11a\";\n$fa-var-mercury: \"\\f223\";\n$fa-var-microphone: \"\\f130\";\n$fa-var-microphone-slash: \"\\f131\";\n$fa-var-minus: \"\\f068\";\n$fa-var-minus-circle: \"\\f056\";\n$fa-var-minus-square: \"\\f146\";\n$fa-var-minus-square-o: \"\\f147\";\n$fa-var-mobile: \"\\f10b\";\n$fa-var-mobile-phone: \"\\f10b\";\n$fa-var-money: \"\\f0d6\";\n$fa-var-moon-o: \"\\f186\";\n$fa-var-mortar-board: \"\\f19d\";\n$fa-var-motorcycle: \"\\f21c\";\n$fa-var-music: \"\\f001\";\n$fa-var-navicon: \"\\f0c9\";\n$fa-var-neuter: \"\\f22c\";\n$fa-var-newspaper-o: \"\\f1ea\";\n$fa-var-openid: \"\\f19b\";\n$fa-var-outdent: \"\\f03b\";\n$fa-var-pagelines: \"\\f18c\";\n$fa-var-paint-brush: \"\\f1fc\";\n$fa-var-paper-plane: \"\\f1d8\";\n$fa-var-paper-plane-o: \"\\f1d9\";\n$fa-var-paperclip: \"\\f0c6\";\n$fa-var-paragraph: \"\\f1dd\";\n$fa-var-paste: \"\\f0ea\";\n$fa-var-pause: \"\\f04c\";\n$fa-var-paw: \"\\f1b0\";\n$fa-var-paypal: \"\\f1ed\";\n$fa-var-pencil: \"\\f040\";\n$fa-var-pencil-square: \"\\f14b\";\n$fa-var-pencil-square-o: \"\\f044\";\n$fa-var-phone: \"\\f095\";\n$fa-var-phone-square: \"\\f098\";\n$fa-var-photo: \"\\f03e\";\n$fa-var-picture-o: \"\\f03e\";\n$fa-var-pie-chart: \"\\f200\";\n$fa-var-pied-piper: \"\\f1a7\";\n$fa-var-pied-piper-alt: \"\\f1a8\";\n$fa-var-pinterest: \"\\f0d2\";\n$fa-var-pinterest-p: \"\\f231\";\n$fa-var-pinterest-square: \"\\f0d3\";\n$fa-var-plane: \"\\f072\";\n$fa-var-play: \"\\f04b\";\n$fa-var-play-circle: \"\\f144\";\n$fa-var-play-circle-o: \"\\f01d\";\n$fa-var-plug: \"\\f1e6\";\n$fa-var-plus: \"\\f067\";\n$fa-var-plus-circle: \"\\f055\";\n$fa-var-plus-square: \"\\f0fe\";\n$fa-var-plus-square-o: \"\\f196\";\n$fa-var-power-off: \"\\f011\";\n$fa-var-print: \"\\f02f\";\n$fa-var-puzzle-piece: \"\\f12e\";\n$fa-var-qq: \"\\f1d6\";\n$fa-var-qrcode: \"\\f029\";\n$fa-var-question: \"\\f128\";\n$fa-var-question-circle: \"\\f059\";\n$fa-var-quote-left: \"\\f10d\";\n$fa-var-quote-right: \"\\f10e\";\n$fa-var-ra: \"\\f1d0\";\n$fa-var-random: \"\\f074\";\n$fa-var-rebel: \"\\f1d0\";\n$fa-var-recycle: \"\\f1b8\";\n$fa-var-reddit: \"\\f1a1\";\n$fa-var-reddit-square: \"\\f1a2\";\n$fa-var-refresh: \"\\f021\";\n$fa-var-remove: \"\\f00d\";\n$fa-var-renren: \"\\f18b\";\n$fa-var-reorder: \"\\f0c9\";\n$fa-var-repeat: \"\\f01e\";\n$fa-var-reply: \"\\f112\";\n$fa-var-reply-all: \"\\f122\";\n$fa-var-retweet: \"\\f079\";\n$fa-var-rmb: \"\\f157\";\n$fa-var-road: \"\\f018\";\n$fa-var-rocket: \"\\f135\";\n$fa-var-rotate-left: \"\\f0e2\";\n$fa-var-rotate-right: \"\\f01e\";\n$fa-var-rouble: \"\\f158\";\n$fa-var-rss: \"\\f09e\";\n$fa-var-rss-square: \"\\f143\";\n$fa-var-rub: \"\\f158\";\n$fa-var-ruble: \"\\f158\";\n$fa-var-rupee: \"\\f156\";\n$fa-var-save: \"\\f0c7\";\n$fa-var-scissors: \"\\f0c4\";\n$fa-var-search: \"\\f002\";\n$fa-var-search-minus: \"\\f010\";\n$fa-var-search-plus: \"\\f00e\";\n$fa-var-sellsy: \"\\f213\";\n$fa-var-send: \"\\f1d8\";\n$fa-var-send-o: \"\\f1d9\";\n$fa-var-server: \"\\f233\";\n$fa-var-share: \"\\f064\";\n$fa-var-share-alt: \"\\f1e0\";\n$fa-var-share-alt-square: \"\\f1e1\";\n$fa-var-share-square: \"\\f14d\";\n$fa-var-share-square-o: \"\\f045\";\n$fa-var-shekel: \"\\f20b\";\n$fa-var-sheqel: \"\\f20b\";\n$fa-var-shield: \"\\f132\";\n$fa-var-ship: \"\\f21a\";\n$fa-var-shirtsinbulk: \"\\f214\";\n$fa-var-shopping-cart: \"\\f07a\";\n$fa-var-sign-in: \"\\f090\";\n$fa-var-sign-out: \"\\f08b\";\n$fa-var-signal: \"\\f012\";\n$fa-var-simplybuilt: \"\\f215\";\n$fa-var-sitemap: \"\\f0e8\";\n$fa-var-skyatlas: \"\\f216\";\n$fa-var-skype: \"\\f17e\";\n$fa-var-slack: \"\\f198\";\n$fa-var-sliders: \"\\f1de\";\n$fa-var-slideshare: \"\\f1e7\";\n$fa-var-smile-o: \"\\f118\";\n$fa-var-soccer-ball-o: \"\\f1e3\";\n$fa-var-sort: \"\\f0dc\";\n$fa-var-sort-alpha-asc: \"\\f15d\";\n$fa-var-sort-alpha-desc: \"\\f15e\";\n$fa-var-sort-amount-asc: \"\\f160\";\n$fa-var-sort-amount-desc: \"\\f161\";\n$fa-var-sort-asc: \"\\f0de\";\n$fa-var-sort-desc: \"\\f0dd\";\n$fa-var-sort-down: \"\\f0dd\";\n$fa-var-sort-numeric-asc: \"\\f162\";\n$fa-var-sort-numeric-desc: \"\\f163\";\n$fa-var-sort-up: \"\\f0de\";\n$fa-var-soundcloud: \"\\f1be\";\n$fa-var-space-shuttle: \"\\f197\";\n$fa-var-spinner: \"\\f110\";\n$fa-var-spoon: \"\\f1b1\";\n$fa-var-spotify: \"\\f1bc\";\n$fa-var-square: \"\\f0c8\";\n$fa-var-square-o: \"\\f096\";\n$fa-var-stack-exchange: \"\\f18d\";\n$fa-var-stack-overflow: \"\\f16c\";\n$fa-var-star: \"\\f005\";\n$fa-var-star-half: \"\\f089\";\n$fa-var-star-half-empty: \"\\f123\";\n$fa-var-star-half-full: \"\\f123\";\n$fa-var-star-half-o: \"\\f123\";\n$fa-var-star-o: \"\\f006\";\n$fa-var-steam: \"\\f1b6\";\n$fa-var-steam-square: \"\\f1b7\";\n$fa-var-step-backward: \"\\f048\";\n$fa-var-step-forward: \"\\f051\";\n$fa-var-stethoscope: \"\\f0f1\";\n$fa-var-stop: \"\\f04d\";\n$fa-var-street-view: \"\\f21d\";\n$fa-var-strikethrough: \"\\f0cc\";\n$fa-var-stumbleupon: \"\\f1a4\";\n$fa-var-stumbleupon-circle: \"\\f1a3\";\n$fa-var-subscript: \"\\f12c\";\n$fa-var-subway: \"\\f239\";\n$fa-var-suitcase: \"\\f0f2\";\n$fa-var-sun-o: \"\\f185\";\n$fa-var-superscript: \"\\f12b\";\n$fa-var-support: \"\\f1cd\";\n$fa-var-table: \"\\f0ce\";\n$fa-var-tablet: \"\\f10a\";\n$fa-var-tachometer: \"\\f0e4\";\n$fa-var-tag: \"\\f02b\";\n$fa-var-tags: \"\\f02c\";\n$fa-var-tasks: \"\\f0ae\";\n$fa-var-taxi: \"\\f1ba\";\n$fa-var-tencent-weibo: \"\\f1d5\";\n$fa-var-terminal: \"\\f120\";\n$fa-var-text-height: \"\\f034\";\n$fa-var-text-width: \"\\f035\";\n$fa-var-th: \"\\f00a\";\n$fa-var-th-large: \"\\f009\";\n$fa-var-th-list: \"\\f00b\";\n$fa-var-thumb-tack: \"\\f08d\";\n$fa-var-thumbs-down: \"\\f165\";\n$fa-var-thumbs-o-down: \"\\f088\";\n$fa-var-thumbs-o-up: \"\\f087\";\n$fa-var-thumbs-up: \"\\f164\";\n$fa-var-ticket: \"\\f145\";\n$fa-var-times: \"\\f00d\";\n$fa-var-times-circle: \"\\f057\";\n$fa-var-times-circle-o: \"\\f05c\";\n$fa-var-tint: \"\\f043\";\n$fa-var-toggle-down: \"\\f150\";\n$fa-var-toggle-left: \"\\f191\";\n$fa-var-toggle-off: \"\\f204\";\n$fa-var-toggle-on: \"\\f205\";\n$fa-var-toggle-right: \"\\f152\";\n$fa-var-toggle-up: \"\\f151\";\n$fa-var-train: \"\\f238\";\n$fa-var-transgender: \"\\f224\";\n$fa-var-transgender-alt: \"\\f225\";\n$fa-var-trash: \"\\f1f8\";\n$fa-var-trash-o: \"\\f014\";\n$fa-var-tree: \"\\f1bb\";\n$fa-var-trello: \"\\f181\";\n$fa-var-trophy: \"\\f091\";\n$fa-var-truck: \"\\f0d1\";\n$fa-var-try: \"\\f195\";\n$fa-var-tty: \"\\f1e4\";\n$fa-var-tumblr: \"\\f173\";\n$fa-var-tumblr-square: \"\\f174\";\n$fa-var-turkish-lira: \"\\f195\";\n$fa-var-twitch: \"\\f1e8\";\n$fa-var-twitter: \"\\f099\";\n$fa-var-twitter-square: \"\\f081\";\n$fa-var-umbrella: \"\\f0e9\";\n$fa-var-underline: \"\\f0cd\";\n$fa-var-undo: \"\\f0e2\";\n$fa-var-university: \"\\f19c\";\n$fa-var-unlink: \"\\f127\";\n$fa-var-unlock: \"\\f09c\";\n$fa-var-unlock-alt: \"\\f13e\";\n$fa-var-unsorted: \"\\f0dc\";\n$fa-var-upload: \"\\f093\";\n$fa-var-usd: \"\\f155\";\n$fa-var-user: \"\\f007\";\n$fa-var-user-md: \"\\f0f0\";\n$fa-var-user-plus: \"\\f234\";\n$fa-var-user-secret: \"\\f21b\";\n$fa-var-user-times: \"\\f235\";\n$fa-var-users: \"\\f0c0\";\n$fa-var-venus: \"\\f221\";\n$fa-var-venus-double: \"\\f226\";\n$fa-var-venus-mars: \"\\f228\";\n$fa-var-viacoin: \"\\f237\";\n$fa-var-video-camera: \"\\f03d\";\n$fa-var-vimeo-square: \"\\f194\";\n$fa-var-vine: \"\\f1ca\";\n$fa-var-vk: \"\\f189\";\n$fa-var-volume-down: \"\\f027\";\n$fa-var-volume-off: \"\\f026\";\n$fa-var-volume-up: \"\\f028\";\n$fa-var-warning: \"\\f071\";\n$fa-var-wechat: \"\\f1d7\";\n$fa-var-weibo: \"\\f18a\";\n$fa-var-weixin: \"\\f1d7\";\n$fa-var-whatsapp: \"\\f232\";\n$fa-var-wheelchair: \"\\f193\";\n$fa-var-wifi: \"\\f1eb\";\n$fa-var-windows: \"\\f17a\";\n$fa-var-won: \"\\f159\";\n$fa-var-wordpress: \"\\f19a\";\n$fa-var-wrench: \"\\f0ad\";\n$fa-var-xing: \"\\f168\";\n$fa-var-xing-square: \"\\f169\";\n$fa-var-yahoo: \"\\f19e\";\n$fa-var-yelp: \"\\f1e9\";\n$fa-var-yen: \"\\f157\";\n$fa-var-youtube: \"\\f167\";\n$fa-var-youtube-play: \"\\f16a\";\n$fa-var-youtube-square: \"\\f166\";\n"
  },
  {
    "path": "website/font-awesome/scss/font-awesome.scss",
    "content": "/*!\n *  Font Awesome 4.3.0 by @davegandy - http://fontawesome.io - @fontawesome\n *  License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)\n */\n\n@import \"variables\";\n@import \"mixins\";\n@import \"path\";\n@import \"core\";\n@import \"larger\";\n@import \"fixed-width\";\n@import \"list\";\n@import \"bordered-pulled\";\n@import \"animated\";\n@import \"rotated-flipped\";\n@import \"stacked\";\n@import \"icons\";\n"
  },
  {
    "path": "website/index.md",
    "content": "---\nlayout: home\n---\n"
  },
  {
    "path": "website/js/bootstrap.js",
    "content": "/*!\n * Bootstrap v3.3.2 (http://getbootstrap.com)\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n\nif (typeof jQuery === 'undefined') {\n  throw new Error('Bootstrap\\'s JavaScript requires jQuery')\n}\n\n+function ($) {\n  'use strict';\n  var version = $.fn.jquery.split(' ')[0].split('.')\n  if ((version[0] < 2 && version[1] < 9) || (version[0] == 1 && version[1] == 9 && version[2] < 1)) {\n    throw new Error('Bootstrap\\'s JavaScript requires jQuery version 1.9.1 or higher')\n  }\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: transition.js v3.3.2\n * http://getbootstrap.com/javascript/#transitions\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // CSS TRANSITION SUPPORT (Shoutout: http://www.modernizr.com/)\n  // ============================================================\n\n  function transitionEnd() {\n    var el = document.createElement('bootstrap')\n\n    var transEndEventNames = {\n      WebkitTransition : 'webkitTransitionEnd',\n      MozTransition    : 'transitionend',\n      OTransition      : 'oTransitionEnd otransitionend',\n      transition       : 'transitionend'\n    }\n\n    for (var name in transEndEventNames) {\n      if (el.style[name] !== undefined) {\n        return { end: transEndEventNames[name] }\n      }\n    }\n\n    return false // explicit for ie8 (  ._.)\n  }\n\n  // http://blog.alexmaccaw.com/css-transitions\n  $.fn.emulateTransitionEnd = function (duration) {\n    var called = false\n    var $el = this\n    $(this).one('bsTransitionEnd', function () { called = true })\n    var callback = function () { if (!called) $($el).trigger($.support.transition.end) }\n    setTimeout(callback, duration)\n    return this\n  }\n\n  $(function () {\n    $.support.transition = transitionEnd()\n\n    if (!$.support.transition) return\n\n    $.event.special.bsTransitionEnd = {\n      bindType: $.support.transition.end,\n      delegateType: $.support.transition.end,\n      handle: function (e) {\n        if ($(e.target).is(this)) return e.handleObj.handler.apply(this, arguments)\n      }\n    }\n  })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: alert.js v3.3.2\n * http://getbootstrap.com/javascript/#alerts\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // ALERT CLASS DEFINITION\n  // ======================\n\n  var dismiss = '[data-dismiss=\"alert\"]'\n  var Alert   = function (el) {\n    $(el).on('click', dismiss, this.close)\n  }\n\n  Alert.VERSION = '3.3.2'\n\n  Alert.TRANSITION_DURATION = 150\n\n  Alert.prototype.close = function (e) {\n    var $this    = $(this)\n    var selector = $this.attr('data-target')\n\n    if (!selector) {\n      selector = $this.attr('href')\n      selector = selector && selector.replace(/.*(?=#[^\\s]*$)/, '') // strip for ie7\n    }\n\n    var $parent = $(selector)\n\n    if (e) e.preventDefault()\n\n    if (!$parent.length) {\n      $parent = $this.closest('.alert')\n    }\n\n    $parent.trigger(e = $.Event('close.bs.alert'))\n\n    if (e.isDefaultPrevented()) return\n\n    $parent.removeClass('in')\n\n    function removeElement() {\n      // detach from parent, fire event then clean up data\n      $parent.detach().trigger('closed.bs.alert').remove()\n    }\n\n    $.support.transition && $parent.hasClass('fade') ?\n      $parent\n        .one('bsTransitionEnd', removeElement)\n        .emulateTransitionEnd(Alert.TRANSITION_DURATION) :\n      removeElement()\n  }\n\n\n  // ALERT PLUGIN DEFINITION\n  // =======================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this = $(this)\n      var data  = $this.data('bs.alert')\n\n      if (!data) $this.data('bs.alert', (data = new Alert(this)))\n      if (typeof option == 'string') data[option].call($this)\n    })\n  }\n\n  var old = $.fn.alert\n\n  $.fn.alert             = Plugin\n  $.fn.alert.Constructor = Alert\n\n\n  // ALERT NO CONFLICT\n  // =================\n\n  $.fn.alert.noConflict = function () {\n    $.fn.alert = old\n    return this\n  }\n\n\n  // ALERT DATA-API\n  // ==============\n\n  $(document).on('click.bs.alert.data-api', dismiss, Alert.prototype.close)\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: button.js v3.3.2\n * http://getbootstrap.com/javascript/#buttons\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // BUTTON PUBLIC CLASS DEFINITION\n  // ==============================\n\n  var Button = function (element, options) {\n    this.$element  = $(element)\n    this.options   = $.extend({}, Button.DEFAULTS, options)\n    this.isLoading = false\n  }\n\n  Button.VERSION  = '3.3.2'\n\n  Button.DEFAULTS = {\n    loadingText: 'loading...'\n  }\n\n  Button.prototype.setState = function (state) {\n    var d    = 'disabled'\n    var $el  = this.$element\n    var val  = $el.is('input') ? 'val' : 'html'\n    var data = $el.data()\n\n    state = state + 'Text'\n\n    if (data.resetText == null) $el.data('resetText', $el[val]())\n\n    // push to event loop to allow forms to submit\n    setTimeout($.proxy(function () {\n      $el[val](data[state] == null ? this.options[state] : data[state])\n\n      if (state == 'loadingText') {\n        this.isLoading = true\n        $el.addClass(d).attr(d, d)\n      } else if (this.isLoading) {\n        this.isLoading = false\n        $el.removeClass(d).removeAttr(d)\n      }\n    }, this), 0)\n  }\n\n  Button.prototype.toggle = function () {\n    var changed = true\n    var $parent = this.$element.closest('[data-toggle=\"buttons\"]')\n\n    if ($parent.length) {\n      var $input = this.$element.find('input')\n      if ($input.prop('type') == 'radio') {\n        if ($input.prop('checked') && this.$element.hasClass('active')) changed = false\n        else $parent.find('.active').removeClass('active')\n      }\n      if (changed) $input.prop('checked', !this.$element.hasClass('active')).trigger('change')\n    } else {\n      this.$element.attr('aria-pressed', !this.$element.hasClass('active'))\n    }\n\n    if (changed) this.$element.toggleClass('active')\n  }\n\n\n  // BUTTON PLUGIN DEFINITION\n  // ========================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.button')\n      var options = typeof option == 'object' && option\n\n      if (!data) $this.data('bs.button', (data = new Button(this, options)))\n\n      if (option == 'toggle') data.toggle()\n      else if (option) data.setState(option)\n    })\n  }\n\n  var old = $.fn.button\n\n  $.fn.button             = Plugin\n  $.fn.button.Constructor = Button\n\n\n  // BUTTON NO CONFLICT\n  // ==================\n\n  $.fn.button.noConflict = function () {\n    $.fn.button = old\n    return this\n  }\n\n\n  // BUTTON DATA-API\n  // ===============\n\n  $(document)\n    .on('click.bs.button.data-api', '[data-toggle^=\"button\"]', function (e) {\n      var $btn = $(e.target)\n      if (!$btn.hasClass('btn')) $btn = $btn.closest('.btn')\n      Plugin.call($btn, 'toggle')\n      e.preventDefault()\n    })\n    .on('focus.bs.button.data-api blur.bs.button.data-api', '[data-toggle^=\"button\"]', function (e) {\n      $(e.target).closest('.btn').toggleClass('focus', /^focus(in)?$/.test(e.type))\n    })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: carousel.js v3.3.2\n * http://getbootstrap.com/javascript/#carousel\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // CAROUSEL CLASS DEFINITION\n  // =========================\n\n  var Carousel = function (element, options) {\n    this.$element    = $(element)\n    this.$indicators = this.$element.find('.carousel-indicators')\n    this.options     = options\n    this.paused      =\n    this.sliding     =\n    this.interval    =\n    this.$active     =\n    this.$items      = null\n\n    this.options.keyboard && this.$element.on('keydown.bs.carousel', $.proxy(this.keydown, this))\n\n    this.options.pause == 'hover' && !('ontouchstart' in document.documentElement) && this.$element\n      .on('mouseenter.bs.carousel', $.proxy(this.pause, this))\n      .on('mouseleave.bs.carousel', $.proxy(this.cycle, this))\n  }\n\n  Carousel.VERSION  = '3.3.2'\n\n  Carousel.TRANSITION_DURATION = 600\n\n  Carousel.DEFAULTS = {\n    interval: 5000,\n    pause: 'hover',\n    wrap: true,\n    keyboard: true\n  }\n\n  Carousel.prototype.keydown = function (e) {\n    if (/input|textarea/i.test(e.target.tagName)) return\n    switch (e.which) {\n      case 37: this.prev(); break\n      case 39: this.next(); break\n      default: return\n    }\n\n    e.preventDefault()\n  }\n\n  Carousel.prototype.cycle = function (e) {\n    e || (this.paused = false)\n\n    this.interval && clearInterval(this.interval)\n\n    this.options.interval\n      && !this.paused\n      && (this.interval = setInterval($.proxy(this.next, this), this.options.interval))\n\n    return this\n  }\n\n  Carousel.prototype.getItemIndex = function (item) {\n    this.$items = item.parent().children('.item')\n    return this.$items.index(item || this.$active)\n  }\n\n  Carousel.prototype.getItemForDirection = function (direction, active) {\n    var activeIndex = this.getItemIndex(active)\n    var willWrap = (direction == 'prev' && activeIndex === 0)\n                || (direction == 'next' && activeIndex == (this.$items.length - 1))\n    if (willWrap && !this.options.wrap) return active\n    var delta = direction == 'prev' ? -1 : 1\n    var itemIndex = (activeIndex + delta) % this.$items.length\n    return this.$items.eq(itemIndex)\n  }\n\n  Carousel.prototype.to = function (pos) {\n    var that        = this\n    var activeIndex = this.getItemIndex(this.$active = this.$element.find('.item.active'))\n\n    if (pos > (this.$items.length - 1) || pos < 0) return\n\n    if (this.sliding)       return this.$element.one('slid.bs.carousel', function () { that.to(pos) }) // yes, \"slid\"\n    if (activeIndex == pos) return this.pause().cycle()\n\n    return this.slide(pos > activeIndex ? 'next' : 'prev', this.$items.eq(pos))\n  }\n\n  Carousel.prototype.pause = function (e) {\n    e || (this.paused = true)\n\n    if (this.$element.find('.next, .prev').length && $.support.transition) {\n      this.$element.trigger($.support.transition.end)\n      this.cycle(true)\n    }\n\n    this.interval = clearInterval(this.interval)\n\n    return this\n  }\n\n  Carousel.prototype.next = function () {\n    if (this.sliding) return\n    return this.slide('next')\n  }\n\n  Carousel.prototype.prev = function () {\n    if (this.sliding) return\n    return this.slide('prev')\n  }\n\n  Carousel.prototype.slide = function (type, next) {\n    var $active   = this.$element.find('.item.active')\n    var $next     = next || this.getItemForDirection(type, $active)\n    var isCycling = this.interval\n    var direction = type == 'next' ? 'left' : 'right'\n    var that      = this\n\n    if ($next.hasClass('active')) return (this.sliding = false)\n\n    var relatedTarget = $next[0]\n    var slideEvent = $.Event('slide.bs.carousel', {\n      relatedTarget: relatedTarget,\n      direction: direction\n    })\n    this.$element.trigger(slideEvent)\n    if (slideEvent.isDefaultPrevented()) return\n\n    this.sliding = true\n\n    isCycling && this.pause()\n\n    if (this.$indicators.length) {\n      this.$indicators.find('.active').removeClass('active')\n      var $nextIndicator = $(this.$indicators.children()[this.getItemIndex($next)])\n      $nextIndicator && $nextIndicator.addClass('active')\n    }\n\n    var slidEvent = $.Event('slid.bs.carousel', { relatedTarget: relatedTarget, direction: direction }) // yes, \"slid\"\n    if ($.support.transition && this.$element.hasClass('slide')) {\n      $next.addClass(type)\n      $next[0].offsetWidth // force reflow\n      $active.addClass(direction)\n      $next.addClass(direction)\n      $active\n        .one('bsTransitionEnd', function () {\n          $next.removeClass([type, direction].join(' ')).addClass('active')\n          $active.removeClass(['active', direction].join(' '))\n          that.sliding = false\n          setTimeout(function () {\n            that.$element.trigger(slidEvent)\n          }, 0)\n        })\n        .emulateTransitionEnd(Carousel.TRANSITION_DURATION)\n    } else {\n      $active.removeClass('active')\n      $next.addClass('active')\n      this.sliding = false\n      this.$element.trigger(slidEvent)\n    }\n\n    isCycling && this.cycle()\n\n    return this\n  }\n\n\n  // CAROUSEL PLUGIN DEFINITION\n  // ==========================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.carousel')\n      var options = $.extend({}, Carousel.DEFAULTS, $this.data(), typeof option == 'object' && option)\n      var action  = typeof option == 'string' ? option : options.slide\n\n      if (!data) $this.data('bs.carousel', (data = new Carousel(this, options)))\n      if (typeof option == 'number') data.to(option)\n      else if (action) data[action]()\n      else if (options.interval) data.pause().cycle()\n    })\n  }\n\n  var old = $.fn.carousel\n\n  $.fn.carousel             = Plugin\n  $.fn.carousel.Constructor = Carousel\n\n\n  // CAROUSEL NO CONFLICT\n  // ====================\n\n  $.fn.carousel.noConflict = function () {\n    $.fn.carousel = old\n    return this\n  }\n\n\n  // CAROUSEL DATA-API\n  // =================\n\n  var clickHandler = function (e) {\n    var href\n    var $this   = $(this)\n    var $target = $($this.attr('data-target') || (href = $this.attr('href')) && href.replace(/.*(?=#[^\\s]+$)/, '')) // strip for ie7\n    if (!$target.hasClass('carousel')) return\n    var options = $.extend({}, $target.data(), $this.data())\n    var slideIndex = $this.attr('data-slide-to')\n    if (slideIndex) options.interval = false\n\n    Plugin.call($target, options)\n\n    if (slideIndex) {\n      $target.data('bs.carousel').to(slideIndex)\n    }\n\n    e.preventDefault()\n  }\n\n  $(document)\n    .on('click.bs.carousel.data-api', '[data-slide]', clickHandler)\n    .on('click.bs.carousel.data-api', '[data-slide-to]', clickHandler)\n\n  $(window).on('load', function () {\n    $('[data-ride=\"carousel\"]').each(function () {\n      var $carousel = $(this)\n      Plugin.call($carousel, $carousel.data())\n    })\n  })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: collapse.js v3.3.2\n * http://getbootstrap.com/javascript/#collapse\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // COLLAPSE PUBLIC CLASS DEFINITION\n  // ================================\n\n  var Collapse = function (element, options) {\n    this.$element      = $(element)\n    this.options       = $.extend({}, Collapse.DEFAULTS, options)\n    this.$trigger      = $(this.options.trigger).filter('[href=\"#' + element.id + '\"], [data-target=\"#' + element.id + '\"]')\n    this.transitioning = null\n\n    if (this.options.parent) {\n      this.$parent = this.getParent()\n    } else {\n      this.addAriaAndCollapsedClass(this.$element, this.$trigger)\n    }\n\n    if (this.options.toggle) this.toggle()\n  }\n\n  Collapse.VERSION  = '3.3.2'\n\n  Collapse.TRANSITION_DURATION = 350\n\n  Collapse.DEFAULTS = {\n    toggle: true,\n    trigger: '[data-toggle=\"collapse\"]'\n  }\n\n  Collapse.prototype.dimension = function () {\n    var hasWidth = this.$element.hasClass('width')\n    return hasWidth ? 'width' : 'height'\n  }\n\n  Collapse.prototype.show = function () {\n    if (this.transitioning || this.$element.hasClass('in')) return\n\n    var activesData\n    var actives = this.$parent && this.$parent.children('.panel').children('.in, .collapsing')\n\n    if (actives && actives.length) {\n      activesData = actives.data('bs.collapse')\n      if (activesData && activesData.transitioning) return\n    }\n\n    var startEvent = $.Event('show.bs.collapse')\n    this.$element.trigger(startEvent)\n    if (startEvent.isDefaultPrevented()) return\n\n    if (actives && actives.length) {\n      Plugin.call(actives, 'hide')\n      activesData || actives.data('bs.collapse', null)\n    }\n\n    var dimension = this.dimension()\n\n    this.$element\n      .removeClass('collapse')\n      .addClass('collapsing')[dimension](0)\n      .attr('aria-expanded', true)\n\n    this.$trigger\n      .removeClass('collapsed')\n      .attr('aria-expanded', true)\n\n    this.transitioning = 1\n\n    var complete = function () {\n      this.$element\n        .removeClass('collapsing')\n        .addClass('collapse in')[dimension]('')\n      this.transitioning = 0\n      this.$element\n        .trigger('shown.bs.collapse')\n    }\n\n    if (!$.support.transition) return complete.call(this)\n\n    var scrollSize = $.camelCase(['scroll', dimension].join('-'))\n\n    this.$element\n      .one('bsTransitionEnd', $.proxy(complete, this))\n      .emulateTransitionEnd(Collapse.TRANSITION_DURATION)[dimension](this.$element[0][scrollSize])\n  }\n\n  Collapse.prototype.hide = function () {\n    if (this.transitioning || !this.$element.hasClass('in')) return\n\n    var startEvent = $.Event('hide.bs.collapse')\n    this.$element.trigger(startEvent)\n    if (startEvent.isDefaultPrevented()) return\n\n    var dimension = this.dimension()\n\n    this.$element[dimension](this.$element[dimension]())[0].offsetHeight\n\n    this.$element\n      .addClass('collapsing')\n      .removeClass('collapse in')\n      .attr('aria-expanded', false)\n\n    this.$trigger\n      .addClass('collapsed')\n      .attr('aria-expanded', false)\n\n    this.transitioning = 1\n\n    var complete = function () {\n      this.transitioning = 0\n      this.$element\n        .removeClass('collapsing')\n        .addClass('collapse')\n        .trigger('hidden.bs.collapse')\n    }\n\n    if (!$.support.transition) return complete.call(this)\n\n    this.$element\n      [dimension](0)\n      .one('bsTransitionEnd', $.proxy(complete, this))\n      .emulateTransitionEnd(Collapse.TRANSITION_DURATION)\n  }\n\n  Collapse.prototype.toggle = function () {\n    this[this.$element.hasClass('in') ? 'hide' : 'show']()\n  }\n\n  Collapse.prototype.getParent = function () {\n    return $(this.options.parent)\n      .find('[data-toggle=\"collapse\"][data-parent=\"' + this.options.parent + '\"]')\n      .each($.proxy(function (i, element) {\n        var $element = $(element)\n        this.addAriaAndCollapsedClass(getTargetFromTrigger($element), $element)\n      }, this))\n      .end()\n  }\n\n  Collapse.prototype.addAriaAndCollapsedClass = function ($element, $trigger) {\n    var isOpen = $element.hasClass('in')\n\n    $element.attr('aria-expanded', isOpen)\n    $trigger\n      .toggleClass('collapsed', !isOpen)\n      .attr('aria-expanded', isOpen)\n  }\n\n  function getTargetFromTrigger($trigger) {\n    var href\n    var target = $trigger.attr('data-target')\n      || (href = $trigger.attr('href')) && href.replace(/.*(?=#[^\\s]+$)/, '') // strip for ie7\n\n    return $(target)\n  }\n\n\n  // COLLAPSE PLUGIN DEFINITION\n  // ==========================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.collapse')\n      var options = $.extend({}, Collapse.DEFAULTS, $this.data(), typeof option == 'object' && option)\n\n      if (!data && options.toggle && option == 'show') options.toggle = false\n      if (!data) $this.data('bs.collapse', (data = new Collapse(this, options)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  var old = $.fn.collapse\n\n  $.fn.collapse             = Plugin\n  $.fn.collapse.Constructor = Collapse\n\n\n  // COLLAPSE NO CONFLICT\n  // ====================\n\n  $.fn.collapse.noConflict = function () {\n    $.fn.collapse = old\n    return this\n  }\n\n\n  // COLLAPSE DATA-API\n  // =================\n\n  $(document).on('click.bs.collapse.data-api', '[data-toggle=\"collapse\"]', function (e) {\n    var $this   = $(this)\n\n    if (!$this.attr('data-target')) e.preventDefault()\n\n    var $target = getTargetFromTrigger($this)\n    var data    = $target.data('bs.collapse')\n    var option  = data ? 'toggle' : $.extend({}, $this.data(), { trigger: this })\n\n    Plugin.call($target, option)\n  })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: dropdown.js v3.3.2\n * http://getbootstrap.com/javascript/#dropdowns\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // DROPDOWN CLASS DEFINITION\n  // =========================\n\n  var backdrop = '.dropdown-backdrop'\n  var toggle   = '[data-toggle=\"dropdown\"]'\n  var Dropdown = function (element) {\n    $(element).on('click.bs.dropdown', this.toggle)\n  }\n\n  Dropdown.VERSION = '3.3.2'\n\n  Dropdown.prototype.toggle = function (e) {\n    var $this = $(this)\n\n    if ($this.is('.disabled, :disabled')) return\n\n    var $parent  = getParent($this)\n    var isActive = $parent.hasClass('open')\n\n    clearMenus()\n\n    if (!isActive) {\n      if ('ontouchstart' in document.documentElement && !$parent.closest('.navbar-nav').length) {\n        // if mobile we use a backdrop because click events don't delegate\n        $('<div class=\"dropdown-backdrop\"/>').insertAfter($(this)).on('click', clearMenus)\n      }\n\n      var relatedTarget = { relatedTarget: this }\n      $parent.trigger(e = $.Event('show.bs.dropdown', relatedTarget))\n\n      if (e.isDefaultPrevented()) return\n\n      $this\n        .trigger('focus')\n        .attr('aria-expanded', 'true')\n\n      $parent\n        .toggleClass('open')\n        .trigger('shown.bs.dropdown', relatedTarget)\n    }\n\n    return false\n  }\n\n  Dropdown.prototype.keydown = function (e) {\n    if (!/(38|40|27|32)/.test(e.which) || /input|textarea/i.test(e.target.tagName)) return\n\n    var $this = $(this)\n\n    e.preventDefault()\n    e.stopPropagation()\n\n    if ($this.is('.disabled, :disabled')) return\n\n    var $parent  = getParent($this)\n    var isActive = $parent.hasClass('open')\n\n    if ((!isActive && e.which != 27) || (isActive && e.which == 27)) {\n      if (e.which == 27) $parent.find(toggle).trigger('focus')\n      return $this.trigger('click')\n    }\n\n    var desc = ' li:not(.divider):visible a'\n    var $items = $parent.find('[role=\"menu\"]' + desc + ', [role=\"listbox\"]' + desc)\n\n    if (!$items.length) return\n\n    var index = $items.index(e.target)\n\n    if (e.which == 38 && index > 0)                 index--                        // up\n    if (e.which == 40 && index < $items.length - 1) index++                        // down\n    if (!~index)                                      index = 0\n\n    $items.eq(index).trigger('focus')\n  }\n\n  function clearMenus(e) {\n    if (e && e.which === 3) return\n    $(backdrop).remove()\n    $(toggle).each(function () {\n      var $this         = $(this)\n      var $parent       = getParent($this)\n      var relatedTarget = { relatedTarget: this }\n\n      if (!$parent.hasClass('open')) return\n\n      $parent.trigger(e = $.Event('hide.bs.dropdown', relatedTarget))\n\n      if (e.isDefaultPrevented()) return\n\n      $this.attr('aria-expanded', 'false')\n      $parent.removeClass('open').trigger('hidden.bs.dropdown', relatedTarget)\n    })\n  }\n\n  function getParent($this) {\n    var selector = $this.attr('data-target')\n\n    if (!selector) {\n      selector = $this.attr('href')\n      selector = selector && /#[A-Za-z]/.test(selector) && selector.replace(/.*(?=#[^\\s]*$)/, '') // strip for ie7\n    }\n\n    var $parent = selector && $(selector)\n\n    return $parent && $parent.length ? $parent : $this.parent()\n  }\n\n\n  // DROPDOWN PLUGIN DEFINITION\n  // ==========================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this = $(this)\n      var data  = $this.data('bs.dropdown')\n\n      if (!data) $this.data('bs.dropdown', (data = new Dropdown(this)))\n      if (typeof option == 'string') data[option].call($this)\n    })\n  }\n\n  var old = $.fn.dropdown\n\n  $.fn.dropdown             = Plugin\n  $.fn.dropdown.Constructor = Dropdown\n\n\n  // DROPDOWN NO CONFLICT\n  // ====================\n\n  $.fn.dropdown.noConflict = function () {\n    $.fn.dropdown = old\n    return this\n  }\n\n\n  // APPLY TO STANDARD DROPDOWN ELEMENTS\n  // ===================================\n\n  $(document)\n    .on('click.bs.dropdown.data-api', clearMenus)\n    .on('click.bs.dropdown.data-api', '.dropdown form', function (e) { e.stopPropagation() })\n    .on('click.bs.dropdown.data-api', toggle, Dropdown.prototype.toggle)\n    .on('keydown.bs.dropdown.data-api', toggle, Dropdown.prototype.keydown)\n    .on('keydown.bs.dropdown.data-api', '[role=\"menu\"]', Dropdown.prototype.keydown)\n    .on('keydown.bs.dropdown.data-api', '[role=\"listbox\"]', Dropdown.prototype.keydown)\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: modal.js v3.3.2\n * http://getbootstrap.com/javascript/#modals\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // MODAL CLASS DEFINITION\n  // ======================\n\n  var Modal = function (element, options) {\n    this.options        = options\n    this.$body          = $(document.body)\n    this.$element       = $(element)\n    this.$backdrop      =\n    this.isShown        = null\n    this.scrollbarWidth = 0\n\n    if (this.options.remote) {\n      this.$element\n        .find('.modal-content')\n        .load(this.options.remote, $.proxy(function () {\n          this.$element.trigger('loaded.bs.modal')\n        }, this))\n    }\n  }\n\n  Modal.VERSION  = '3.3.2'\n\n  Modal.TRANSITION_DURATION = 300\n  Modal.BACKDROP_TRANSITION_DURATION = 150\n\n  Modal.DEFAULTS = {\n    backdrop: true,\n    keyboard: true,\n    show: true\n  }\n\n  Modal.prototype.toggle = function (_relatedTarget) {\n    return this.isShown ? this.hide() : this.show(_relatedTarget)\n  }\n\n  Modal.prototype.show = function (_relatedTarget) {\n    var that = this\n    var e    = $.Event('show.bs.modal', { relatedTarget: _relatedTarget })\n\n    this.$element.trigger(e)\n\n    if (this.isShown || e.isDefaultPrevented()) return\n\n    this.isShown = true\n\n    this.checkScrollbar()\n    this.setScrollbar()\n    this.$body.addClass('modal-open')\n\n    this.escape()\n    this.resize()\n\n    this.$element.on('click.dismiss.bs.modal', '[data-dismiss=\"modal\"]', $.proxy(this.hide, this))\n\n    this.backdrop(function () {\n      var transition = $.support.transition && that.$element.hasClass('fade')\n\n      if (!that.$element.parent().length) {\n        that.$element.appendTo(that.$body) // don't move modals dom position\n      }\n\n      that.$element\n        .show()\n        .scrollTop(0)\n\n      if (that.options.backdrop) that.adjustBackdrop()\n      that.adjustDialog()\n\n      if (transition) {\n        that.$element[0].offsetWidth // force reflow\n      }\n\n      that.$element\n        .addClass('in')\n        .attr('aria-hidden', false)\n\n      that.enforceFocus()\n\n      var e = $.Event('shown.bs.modal', { relatedTarget: _relatedTarget })\n\n      transition ?\n        that.$element.find('.modal-dialog') // wait for modal to slide in\n          .one('bsTransitionEnd', function () {\n            that.$element.trigger('focus').trigger(e)\n          })\n          .emulateTransitionEnd(Modal.TRANSITION_DURATION) :\n        that.$element.trigger('focus').trigger(e)\n    })\n  }\n\n  Modal.prototype.hide = function (e) {\n    if (e) e.preventDefault()\n\n    e = $.Event('hide.bs.modal')\n\n    this.$element.trigger(e)\n\n    if (!this.isShown || e.isDefaultPrevented()) return\n\n    this.isShown = false\n\n    this.escape()\n    this.resize()\n\n    $(document).off('focusin.bs.modal')\n\n    this.$element\n      .removeClass('in')\n      .attr('aria-hidden', true)\n      .off('click.dismiss.bs.modal')\n\n    $.support.transition && this.$element.hasClass('fade') ?\n      this.$element\n        .one('bsTransitionEnd', $.proxy(this.hideModal, this))\n        .emulateTransitionEnd(Modal.TRANSITION_DURATION) :\n      this.hideModal()\n  }\n\n  Modal.prototype.enforceFocus = function () {\n    $(document)\n      .off('focusin.bs.modal') // guard against infinite focus loop\n      .on('focusin.bs.modal', $.proxy(function (e) {\n        if (this.$element[0] !== e.target && !this.$element.has(e.target).length) {\n          this.$element.trigger('focus')\n        }\n      }, this))\n  }\n\n  Modal.prototype.escape = function () {\n    if (this.isShown && this.options.keyboard) {\n      this.$element.on('keydown.dismiss.bs.modal', $.proxy(function (e) {\n        e.which == 27 && this.hide()\n      }, this))\n    } else if (!this.isShown) {\n      this.$element.off('keydown.dismiss.bs.modal')\n    }\n  }\n\n  Modal.prototype.resize = function () {\n    if (this.isShown) {\n      $(window).on('resize.bs.modal', $.proxy(this.handleUpdate, this))\n    } else {\n      $(window).off('resize.bs.modal')\n    }\n  }\n\n  Modal.prototype.hideModal = function () {\n    var that = this\n    this.$element.hide()\n    this.backdrop(function () {\n      that.$body.removeClass('modal-open')\n      that.resetAdjustments()\n      that.resetScrollbar()\n      that.$element.trigger('hidden.bs.modal')\n    })\n  }\n\n  Modal.prototype.removeBackdrop = function () {\n    this.$backdrop && this.$backdrop.remove()\n    this.$backdrop = null\n  }\n\n  Modal.prototype.backdrop = function (callback) {\n    var that = this\n    var animate = this.$element.hasClass('fade') ? 'fade' : ''\n\n    if (this.isShown && this.options.backdrop) {\n      var doAnimate = $.support.transition && animate\n\n      this.$backdrop = $('<div class=\"modal-backdrop ' + animate + '\" />')\n        .prependTo(this.$element)\n        .on('click.dismiss.bs.modal', $.proxy(function (e) {\n          if (e.target !== e.currentTarget) return\n          this.options.backdrop == 'static'\n            ? this.$element[0].focus.call(this.$element[0])\n            : this.hide.call(this)\n        }, this))\n\n      if (doAnimate) this.$backdrop[0].offsetWidth // force reflow\n\n      this.$backdrop.addClass('in')\n\n      if (!callback) return\n\n      doAnimate ?\n        this.$backdrop\n          .one('bsTransitionEnd', callback)\n          .emulateTransitionEnd(Modal.BACKDROP_TRANSITION_DURATION) :\n        callback()\n\n    } else if (!this.isShown && this.$backdrop) {\n      this.$backdrop.removeClass('in')\n\n      var callbackRemove = function () {\n        that.removeBackdrop()\n        callback && callback()\n      }\n      $.support.transition && this.$element.hasClass('fade') ?\n        this.$backdrop\n          .one('bsTransitionEnd', callbackRemove)\n          .emulateTransitionEnd(Modal.BACKDROP_TRANSITION_DURATION) :\n        callbackRemove()\n\n    } else if (callback) {\n      callback()\n    }\n  }\n\n  // these following methods are used to handle overflowing modals\n\n  Modal.prototype.handleUpdate = function () {\n    if (this.options.backdrop) this.adjustBackdrop()\n    this.adjustDialog()\n  }\n\n  Modal.prototype.adjustBackdrop = function () {\n    this.$backdrop\n      .css('height', 0)\n      .css('height', this.$element[0].scrollHeight)\n  }\n\n  Modal.prototype.adjustDialog = function () {\n    var modalIsOverflowing = this.$element[0].scrollHeight > document.documentElement.clientHeight\n\n    this.$element.css({\n      paddingLeft:  !this.bodyIsOverflowing && modalIsOverflowing ? this.scrollbarWidth : '',\n      paddingRight: this.bodyIsOverflowing && !modalIsOverflowing ? this.scrollbarWidth : ''\n    })\n  }\n\n  Modal.prototype.resetAdjustments = function () {\n    this.$element.css({\n      paddingLeft: '',\n      paddingRight: ''\n    })\n  }\n\n  Modal.prototype.checkScrollbar = function () {\n    this.bodyIsOverflowing = document.body.scrollHeight > document.documentElement.clientHeight\n    this.scrollbarWidth = this.measureScrollbar()\n  }\n\n  Modal.prototype.setScrollbar = function () {\n    var bodyPad = parseInt((this.$body.css('padding-right') || 0), 10)\n    if (this.bodyIsOverflowing) this.$body.css('padding-right', bodyPad + this.scrollbarWidth)\n  }\n\n  Modal.prototype.resetScrollbar = function () {\n    this.$body.css('padding-right', '')\n  }\n\n  Modal.prototype.measureScrollbar = function () { // thx walsh\n    var scrollDiv = document.createElement('div')\n    scrollDiv.className = 'modal-scrollbar-measure'\n    this.$body.append(scrollDiv)\n    var scrollbarWidth = scrollDiv.offsetWidth - scrollDiv.clientWidth\n    this.$body[0].removeChild(scrollDiv)\n    return scrollbarWidth\n  }\n\n\n  // MODAL PLUGIN DEFINITION\n  // =======================\n\n  function Plugin(option, _relatedTarget) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.modal')\n      var options = $.extend({}, Modal.DEFAULTS, $this.data(), typeof option == 'object' && option)\n\n      if (!data) $this.data('bs.modal', (data = new Modal(this, options)))\n      if (typeof option == 'string') data[option](_relatedTarget)\n      else if (options.show) data.show(_relatedTarget)\n    })\n  }\n\n  var old = $.fn.modal\n\n  $.fn.modal             = Plugin\n  $.fn.modal.Constructor = Modal\n\n\n  // MODAL NO CONFLICT\n  // =================\n\n  $.fn.modal.noConflict = function () {\n    $.fn.modal = old\n    return this\n  }\n\n\n  // MODAL DATA-API\n  // ==============\n\n  $(document).on('click.bs.modal.data-api', '[data-toggle=\"modal\"]', function (e) {\n    var $this   = $(this)\n    var href    = $this.attr('href')\n    var $target = $($this.attr('data-target') || (href && href.replace(/.*(?=#[^\\s]+$)/, ''))) // strip for ie7\n    var option  = $target.data('bs.modal') ? 'toggle' : $.extend({ remote: !/#/.test(href) && href }, $target.data(), $this.data())\n\n    if ($this.is('a')) e.preventDefault()\n\n    $target.one('show.bs.modal', function (showEvent) {\n      if (showEvent.isDefaultPrevented()) return // only register focus restorer if modal will actually get shown\n      $target.one('hidden.bs.modal', function () {\n        $this.is(':visible') && $this.trigger('focus')\n      })\n    })\n    Plugin.call($target, option, this)\n  })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: tooltip.js v3.3.2\n * http://getbootstrap.com/javascript/#tooltip\n * Inspired by the original jQuery.tipsy by Jason Frame\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // TOOLTIP PUBLIC CLASS DEFINITION\n  // ===============================\n\n  var Tooltip = function (element, options) {\n    this.type       =\n    this.options    =\n    this.enabled    =\n    this.timeout    =\n    this.hoverState =\n    this.$element   = null\n\n    this.init('tooltip', element, options)\n  }\n\n  Tooltip.VERSION  = '3.3.2'\n\n  Tooltip.TRANSITION_DURATION = 150\n\n  Tooltip.DEFAULTS = {\n    animation: true,\n    placement: 'top',\n    selector: false,\n    template: '<div class=\"tooltip\" role=\"tooltip\"><div class=\"tooltip-arrow\"></div><div class=\"tooltip-inner\"></div></div>',\n    trigger: 'hover focus',\n    title: '',\n    delay: 0,\n    html: false,\n    container: false,\n    viewport: {\n      selector: 'body',\n      padding: 0\n    }\n  }\n\n  Tooltip.prototype.init = function (type, element, options) {\n    this.enabled   = true\n    this.type      = type\n    this.$element  = $(element)\n    this.options   = this.getOptions(options)\n    this.$viewport = this.options.viewport && $(this.options.viewport.selector || this.options.viewport)\n\n    var triggers = this.options.trigger.split(' ')\n\n    for (var i = triggers.length; i--;) {\n      var trigger = triggers[i]\n\n      if (trigger == 'click') {\n        this.$element.on('click.' + this.type, this.options.selector, $.proxy(this.toggle, this))\n      } else if (trigger != 'manual') {\n        var eventIn  = trigger == 'hover' ? 'mouseenter' : 'focusin'\n        var eventOut = trigger == 'hover' ? 'mouseleave' : 'focusout'\n\n        this.$element.on(eventIn  + '.' + this.type, this.options.selector, $.proxy(this.enter, this))\n        this.$element.on(eventOut + '.' + this.type, this.options.selector, $.proxy(this.leave, this))\n      }\n    }\n\n    this.options.selector ?\n      (this._options = $.extend({}, this.options, { trigger: 'manual', selector: '' })) :\n      this.fixTitle()\n  }\n\n  Tooltip.prototype.getDefaults = function () {\n    return Tooltip.DEFAULTS\n  }\n\n  Tooltip.prototype.getOptions = function (options) {\n    options = $.extend({}, this.getDefaults(), this.$element.data(), options)\n\n    if (options.delay && typeof options.delay == 'number') {\n      options.delay = {\n        show: options.delay,\n        hide: options.delay\n      }\n    }\n\n    return options\n  }\n\n  Tooltip.prototype.getDelegateOptions = function () {\n    var options  = {}\n    var defaults = this.getDefaults()\n\n    this._options && $.each(this._options, function (key, value) {\n      if (defaults[key] != value) options[key] = value\n    })\n\n    return options\n  }\n\n  Tooltip.prototype.enter = function (obj) {\n    var self = obj instanceof this.constructor ?\n      obj : $(obj.currentTarget).data('bs.' + this.type)\n\n    if (self && self.$tip && self.$tip.is(':visible')) {\n      self.hoverState = 'in'\n      return\n    }\n\n    if (!self) {\n      self = new this.constructor(obj.currentTarget, this.getDelegateOptions())\n      $(obj.currentTarget).data('bs.' + this.type, self)\n    }\n\n    clearTimeout(self.timeout)\n\n    self.hoverState = 'in'\n\n    if (!self.options.delay || !self.options.delay.show) return self.show()\n\n    self.timeout = setTimeout(function () {\n      if (self.hoverState == 'in') self.show()\n    }, self.options.delay.show)\n  }\n\n  Tooltip.prototype.leave = function (obj) {\n    var self = obj instanceof this.constructor ?\n      obj : $(obj.currentTarget).data('bs.' + this.type)\n\n    if (!self) {\n      self = new this.constructor(obj.currentTarget, this.getDelegateOptions())\n      $(obj.currentTarget).data('bs.' + this.type, self)\n    }\n\n    clearTimeout(self.timeout)\n\n    self.hoverState = 'out'\n\n    if (!self.options.delay || !self.options.delay.hide) return self.hide()\n\n    self.timeout = setTimeout(function () {\n      if (self.hoverState == 'out') self.hide()\n    }, self.options.delay.hide)\n  }\n\n  Tooltip.prototype.show = function () {\n    var e = $.Event('show.bs.' + this.type)\n\n    if (this.hasContent() && this.enabled) {\n      this.$element.trigger(e)\n\n      var inDom = $.contains(this.$element[0].ownerDocument.documentElement, this.$element[0])\n      if (e.isDefaultPrevented() || !inDom) return\n      var that = this\n\n      var $tip = this.tip()\n\n      var tipId = this.getUID(this.type)\n\n      this.setContent()\n      $tip.attr('id', tipId)\n      this.$element.attr('aria-describedby', tipId)\n\n      if (this.options.animation) $tip.addClass('fade')\n\n      var placement = typeof this.options.placement == 'function' ?\n        this.options.placement.call(this, $tip[0], this.$element[0]) :\n        this.options.placement\n\n      var autoToken = /\\s?auto?\\s?/i\n      var autoPlace = autoToken.test(placement)\n      if (autoPlace) placement = placement.replace(autoToken, '') || 'top'\n\n      $tip\n        .detach()\n        .css({ top: 0, left: 0, display: 'block' })\n        .addClass(placement)\n        .data('bs.' + this.type, this)\n\n      this.options.container ? $tip.appendTo(this.options.container) : $tip.insertAfter(this.$element)\n\n      var pos          = this.getPosition()\n      var actualWidth  = $tip[0].offsetWidth\n      var actualHeight = $tip[0].offsetHeight\n\n      if (autoPlace) {\n        var orgPlacement = placement\n        var $container   = this.options.container ? $(this.options.container) : this.$element.parent()\n        var containerDim = this.getPosition($container)\n\n        placement = placement == 'bottom' && pos.bottom + actualHeight > containerDim.bottom ? 'top'    :\n                    placement == 'top'    && pos.top    - actualHeight < containerDim.top    ? 'bottom' :\n                    placement == 'right'  && pos.right  + actualWidth  > containerDim.width  ? 'left'   :\n                    placement == 'left'   && pos.left   - actualWidth  < containerDim.left   ? 'right'  :\n                    placement\n\n        $tip\n          .removeClass(orgPlacement)\n          .addClass(placement)\n      }\n\n      var calculatedOffset = this.getCalculatedOffset(placement, pos, actualWidth, actualHeight)\n\n      this.applyPlacement(calculatedOffset, placement)\n\n      var complete = function () {\n        var prevHoverState = that.hoverState\n        that.$element.trigger('shown.bs.' + that.type)\n        that.hoverState = null\n\n        if (prevHoverState == 'out') that.leave(that)\n      }\n\n      $.support.transition && this.$tip.hasClass('fade') ?\n        $tip\n          .one('bsTransitionEnd', complete)\n          .emulateTransitionEnd(Tooltip.TRANSITION_DURATION) :\n        complete()\n    }\n  }\n\n  Tooltip.prototype.applyPlacement = function (offset, placement) {\n    var $tip   = this.tip()\n    var width  = $tip[0].offsetWidth\n    var height = $tip[0].offsetHeight\n\n    // manually read margins because getBoundingClientRect includes difference\n    var marginTop = parseInt($tip.css('margin-top'), 10)\n    var marginLeft = parseInt($tip.css('margin-left'), 10)\n\n    // we must check for NaN for ie 8/9\n    if (isNaN(marginTop))  marginTop  = 0\n    if (isNaN(marginLeft)) marginLeft = 0\n\n    offset.top  = offset.top  + marginTop\n    offset.left = offset.left + marginLeft\n\n    // $.fn.offset doesn't round pixel values\n    // so we use setOffset directly with our own function B-0\n    $.offset.setOffset($tip[0], $.extend({\n      using: function (props) {\n        $tip.css({\n          top: Math.round(props.top),\n          left: Math.round(props.left)\n        })\n      }\n    }, offset), 0)\n\n    $tip.addClass('in')\n\n    // check to see if placing tip in new offset caused the tip to resize itself\n    var actualWidth  = $tip[0].offsetWidth\n    var actualHeight = $tip[0].offsetHeight\n\n    if (placement == 'top' && actualHeight != height) {\n      offset.top = offset.top + height - actualHeight\n    }\n\n    var delta = this.getViewportAdjustedDelta(placement, offset, actualWidth, actualHeight)\n\n    if (delta.left) offset.left += delta.left\n    else offset.top += delta.top\n\n    var isVertical          = /top|bottom/.test(placement)\n    var arrowDelta          = isVertical ? delta.left * 2 - width + actualWidth : delta.top * 2 - height + actualHeight\n    var arrowOffsetPosition = isVertical ? 'offsetWidth' : 'offsetHeight'\n\n    $tip.offset(offset)\n    this.replaceArrow(arrowDelta, $tip[0][arrowOffsetPosition], isVertical)\n  }\n\n  Tooltip.prototype.replaceArrow = function (delta, dimension, isHorizontal) {\n    this.arrow()\n      .css(isHorizontal ? 'left' : 'top', 50 * (1 - delta / dimension) + '%')\n      .css(isHorizontal ? 'top' : 'left', '')\n  }\n\n  Tooltip.prototype.setContent = function () {\n    var $tip  = this.tip()\n    var title = this.getTitle()\n\n    $tip.find('.tooltip-inner')[this.options.html ? 'html' : 'text'](title)\n    $tip.removeClass('fade in top bottom left right')\n  }\n\n  Tooltip.prototype.hide = function (callback) {\n    var that = this\n    var $tip = this.tip()\n    var e    = $.Event('hide.bs.' + this.type)\n\n    function complete() {\n      if (that.hoverState != 'in') $tip.detach()\n      that.$element\n        .removeAttr('aria-describedby')\n        .trigger('hidden.bs.' + that.type)\n      callback && callback()\n    }\n\n    this.$element.trigger(e)\n\n    if (e.isDefaultPrevented()) return\n\n    $tip.removeClass('in')\n\n    $.support.transition && this.$tip.hasClass('fade') ?\n      $tip\n        .one('bsTransitionEnd', complete)\n        .emulateTransitionEnd(Tooltip.TRANSITION_DURATION) :\n      complete()\n\n    this.hoverState = null\n\n    return this\n  }\n\n  Tooltip.prototype.fixTitle = function () {\n    var $e = this.$element\n    if ($e.attr('title') || typeof ($e.attr('data-original-title')) != 'string') {\n      $e.attr('data-original-title', $e.attr('title') || '').attr('title', '')\n    }\n  }\n\n  Tooltip.prototype.hasContent = function () {\n    return this.getTitle()\n  }\n\n  Tooltip.prototype.getPosition = function ($element) {\n    $element   = $element || this.$element\n\n    var el     = $element[0]\n    var isBody = el.tagName == 'BODY'\n\n    var elRect    = el.getBoundingClientRect()\n    if (elRect.width == null) {\n      // width and height are missing in IE8, so compute them manually; see https://github.com/twbs/bootstrap/issues/14093\n      elRect = $.extend({}, elRect, { width: elRect.right - elRect.left, height: elRect.bottom - elRect.top })\n    }\n    var elOffset  = isBody ? { top: 0, left: 0 } : $element.offset()\n    var scroll    = { scroll: isBody ? document.documentElement.scrollTop || document.body.scrollTop : $element.scrollTop() }\n    var outerDims = isBody ? { width: $(window).width(), height: $(window).height() } : null\n\n    return $.extend({}, elRect, scroll, outerDims, elOffset)\n  }\n\n  Tooltip.prototype.getCalculatedOffset = function (placement, pos, actualWidth, actualHeight) {\n    return placement == 'bottom' ? { top: pos.top + pos.height,   left: pos.left + pos.width / 2 - actualWidth / 2 } :\n           placement == 'top'    ? { top: pos.top - actualHeight, left: pos.left + pos.width / 2 - actualWidth / 2 } :\n           placement == 'left'   ? { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left - actualWidth } :\n        /* placement == 'right' */ { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left + pos.width }\n\n  }\n\n  Tooltip.prototype.getViewportAdjustedDelta = function (placement, pos, actualWidth, actualHeight) {\n    var delta = { top: 0, left: 0 }\n    if (!this.$viewport) return delta\n\n    var viewportPadding = this.options.viewport && this.options.viewport.padding || 0\n    var viewportDimensions = this.getPosition(this.$viewport)\n\n    if (/right|left/.test(placement)) {\n      var topEdgeOffset    = pos.top - viewportPadding - viewportDimensions.scroll\n      var bottomEdgeOffset = pos.top + viewportPadding - viewportDimensions.scroll + actualHeight\n      if (topEdgeOffset < viewportDimensions.top) { // top overflow\n        delta.top = viewportDimensions.top - topEdgeOffset\n      } else if (bottomEdgeOffset > viewportDimensions.top + viewportDimensions.height) { // bottom overflow\n        delta.top = viewportDimensions.top + viewportDimensions.height - bottomEdgeOffset\n      }\n    } else {\n      var leftEdgeOffset  = pos.left - viewportPadding\n      var rightEdgeOffset = pos.left + viewportPadding + actualWidth\n      if (leftEdgeOffset < viewportDimensions.left) { // left overflow\n        delta.left = viewportDimensions.left - leftEdgeOffset\n      } else if (rightEdgeOffset > viewportDimensions.width) { // right overflow\n        delta.left = viewportDimensions.left + viewportDimensions.width - rightEdgeOffset\n      }\n    }\n\n    return delta\n  }\n\n  Tooltip.prototype.getTitle = function () {\n    var title\n    var $e = this.$element\n    var o  = this.options\n\n    title = $e.attr('data-original-title')\n      || (typeof o.title == 'function' ? o.title.call($e[0]) :  o.title)\n\n    return title\n  }\n\n  Tooltip.prototype.getUID = function (prefix) {\n    do prefix += ~~(Math.random() * 1000000)\n    while (document.getElementById(prefix))\n    return prefix\n  }\n\n  Tooltip.prototype.tip = function () {\n    return (this.$tip = this.$tip || $(this.options.template))\n  }\n\n  Tooltip.prototype.arrow = function () {\n    return (this.$arrow = this.$arrow || this.tip().find('.tooltip-arrow'))\n  }\n\n  Tooltip.prototype.enable = function () {\n    this.enabled = true\n  }\n\n  Tooltip.prototype.disable = function () {\n    this.enabled = false\n  }\n\n  Tooltip.prototype.toggleEnabled = function () {\n    this.enabled = !this.enabled\n  }\n\n  Tooltip.prototype.toggle = function (e) {\n    var self = this\n    if (e) {\n      self = $(e.currentTarget).data('bs.' + this.type)\n      if (!self) {\n        self = new this.constructor(e.currentTarget, this.getDelegateOptions())\n        $(e.currentTarget).data('bs.' + this.type, self)\n      }\n    }\n\n    self.tip().hasClass('in') ? self.leave(self) : self.enter(self)\n  }\n\n  Tooltip.prototype.destroy = function () {\n    var that = this\n    clearTimeout(this.timeout)\n    this.hide(function () {\n      that.$element.off('.' + that.type).removeData('bs.' + that.type)\n    })\n  }\n\n\n  // TOOLTIP PLUGIN DEFINITION\n  // =========================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.tooltip')\n      var options = typeof option == 'object' && option\n\n      if (!data && option == 'destroy') return\n      if (!data) $this.data('bs.tooltip', (data = new Tooltip(this, options)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  var old = $.fn.tooltip\n\n  $.fn.tooltip             = Plugin\n  $.fn.tooltip.Constructor = Tooltip\n\n\n  // TOOLTIP NO CONFLICT\n  // ===================\n\n  $.fn.tooltip.noConflict = function () {\n    $.fn.tooltip = old\n    return this\n  }\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: popover.js v3.3.2\n * http://getbootstrap.com/javascript/#popovers\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // POPOVER PUBLIC CLASS DEFINITION\n  // ===============================\n\n  var Popover = function (element, options) {\n    this.init('popover', element, options)\n  }\n\n  if (!$.fn.tooltip) throw new Error('Popover requires tooltip.js')\n\n  Popover.VERSION  = '3.3.2'\n\n  Popover.DEFAULTS = $.extend({}, $.fn.tooltip.Constructor.DEFAULTS, {\n    placement: 'right',\n    trigger: 'click',\n    content: '',\n    template: '<div class=\"popover\" role=\"tooltip\"><div class=\"arrow\"></div><h3 class=\"popover-title\"></h3><div class=\"popover-content\"></div></div>'\n  })\n\n\n  // NOTE: POPOVER EXTENDS tooltip.js\n  // ================================\n\n  Popover.prototype = $.extend({}, $.fn.tooltip.Constructor.prototype)\n\n  Popover.prototype.constructor = Popover\n\n  Popover.prototype.getDefaults = function () {\n    return Popover.DEFAULTS\n  }\n\n  Popover.prototype.setContent = function () {\n    var $tip    = this.tip()\n    var title   = this.getTitle()\n    var content = this.getContent()\n\n    $tip.find('.popover-title')[this.options.html ? 'html' : 'text'](title)\n    $tip.find('.popover-content').children().detach().end()[ // we use append for html objects to maintain js events\n      this.options.html ? (typeof content == 'string' ? 'html' : 'append') : 'text'\n    ](content)\n\n    $tip.removeClass('fade top bottom left right in')\n\n    // IE8 doesn't accept hiding via the `:empty` pseudo selector, we have to do\n    // this manually by checking the contents.\n    if (!$tip.find('.popover-title').html()) $tip.find('.popover-title').hide()\n  }\n\n  Popover.prototype.hasContent = function () {\n    return this.getTitle() || this.getContent()\n  }\n\n  Popover.prototype.getContent = function () {\n    var $e = this.$element\n    var o  = this.options\n\n    return $e.attr('data-content')\n      || (typeof o.content == 'function' ?\n            o.content.call($e[0]) :\n            o.content)\n  }\n\n  Popover.prototype.arrow = function () {\n    return (this.$arrow = this.$arrow || this.tip().find('.arrow'))\n  }\n\n  Popover.prototype.tip = function () {\n    if (!this.$tip) this.$tip = $(this.options.template)\n    return this.$tip\n  }\n\n\n  // POPOVER PLUGIN DEFINITION\n  // =========================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.popover')\n      var options = typeof option == 'object' && option\n\n      if (!data && option == 'destroy') return\n      if (!data) $this.data('bs.popover', (data = new Popover(this, options)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  var old = $.fn.popover\n\n  $.fn.popover             = Plugin\n  $.fn.popover.Constructor = Popover\n\n\n  // POPOVER NO CONFLICT\n  // ===================\n\n  $.fn.popover.noConflict = function () {\n    $.fn.popover = old\n    return this\n  }\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: scrollspy.js v3.3.2\n * http://getbootstrap.com/javascript/#scrollspy\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // SCROLLSPY CLASS DEFINITION\n  // ==========================\n\n  function ScrollSpy(element, options) {\n    var process  = $.proxy(this.process, this)\n\n    this.$body          = $('body')\n    this.$scrollElement = $(element).is('body') ? $(window) : $(element)\n    this.options        = $.extend({}, ScrollSpy.DEFAULTS, options)\n    this.selector       = (this.options.target || '') + ' .nav li > a'\n    this.offsets        = []\n    this.targets        = []\n    this.activeTarget   = null\n    this.scrollHeight   = 0\n\n    this.$scrollElement.on('scroll.bs.scrollspy', process)\n    this.refresh()\n    this.process()\n  }\n\n  ScrollSpy.VERSION  = '3.3.2'\n\n  ScrollSpy.DEFAULTS = {\n    offset: 10\n  }\n\n  ScrollSpy.prototype.getScrollHeight = function () {\n    return this.$scrollElement[0].scrollHeight || Math.max(this.$body[0].scrollHeight, document.documentElement.scrollHeight)\n  }\n\n  ScrollSpy.prototype.refresh = function () {\n    var offsetMethod = 'offset'\n    var offsetBase   = 0\n\n    if (!$.isWindow(this.$scrollElement[0])) {\n      offsetMethod = 'position'\n      offsetBase   = this.$scrollElement.scrollTop()\n    }\n\n    this.offsets = []\n    this.targets = []\n    this.scrollHeight = this.getScrollHeight()\n\n    var self     = this\n\n    this.$body\n      .find(this.selector)\n      .map(function () {\n        var $el   = $(this)\n        var href  = $el.data('target') || $el.attr('href')\n        var $href = /^#./.test(href) && $(href)\n\n        return ($href\n          && $href.length\n          && $href.is(':visible')\n          && [[$href[offsetMethod]().top + offsetBase, href]]) || null\n      })\n      .sort(function (a, b) { return a[0] - b[0] })\n      .each(function () {\n        self.offsets.push(this[0])\n        self.targets.push(this[1])\n      })\n  }\n\n  ScrollSpy.prototype.process = function () {\n    var scrollTop    = this.$scrollElement.scrollTop() + this.options.offset\n    var scrollHeight = this.getScrollHeight()\n    var maxScroll    = this.options.offset + scrollHeight - this.$scrollElement.height()\n    var offsets      = this.offsets\n    var targets      = this.targets\n    var activeTarget = this.activeTarget\n    var i\n\n    if (this.scrollHeight != scrollHeight) {\n      this.refresh()\n    }\n\n    if (scrollTop >= maxScroll) {\n      return activeTarget != (i = targets[targets.length - 1]) && this.activate(i)\n    }\n\n    if (activeTarget && scrollTop < offsets[0]) {\n      this.activeTarget = null\n      return this.clear()\n    }\n\n    for (i = offsets.length; i--;) {\n      activeTarget != targets[i]\n        && scrollTop >= offsets[i]\n        && (!offsets[i + 1] || scrollTop <= offsets[i + 1])\n        && this.activate(targets[i])\n    }\n  }\n\n  ScrollSpy.prototype.activate = function (target) {\n    this.activeTarget = target\n\n    this.clear()\n\n    var selector = this.selector +\n        '[data-target=\"' + target + '\"],' +\n        this.selector + '[href=\"' + target + '\"]'\n\n    var active = $(selector)\n      .parents('li')\n      .addClass('active')\n\n    if (active.parent('.dropdown-menu').length) {\n      active = active\n        .closest('li.dropdown')\n        .addClass('active')\n    }\n\n    active.trigger('activate.bs.scrollspy')\n  }\n\n  ScrollSpy.prototype.clear = function () {\n    $(this.selector)\n      .parentsUntil(this.options.target, '.active')\n      .removeClass('active')\n  }\n\n\n  // SCROLLSPY PLUGIN DEFINITION\n  // ===========================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.scrollspy')\n      var options = typeof option == 'object' && option\n\n      if (!data) $this.data('bs.scrollspy', (data = new ScrollSpy(this, options)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  var old = $.fn.scrollspy\n\n  $.fn.scrollspy             = Plugin\n  $.fn.scrollspy.Constructor = ScrollSpy\n\n\n  // SCROLLSPY NO CONFLICT\n  // =====================\n\n  $.fn.scrollspy.noConflict = function () {\n    $.fn.scrollspy = old\n    return this\n  }\n\n\n  // SCROLLSPY DATA-API\n  // ==================\n\n  $(window).on('load.bs.scrollspy.data-api', function () {\n    $('[data-spy=\"scroll\"]').each(function () {\n      var $spy = $(this)\n      Plugin.call($spy, $spy.data())\n    })\n  })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: tab.js v3.3.2\n * http://getbootstrap.com/javascript/#tabs\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // TAB CLASS DEFINITION\n  // ====================\n\n  var Tab = function (element) {\n    this.element = $(element)\n  }\n\n  Tab.VERSION = '3.3.2'\n\n  Tab.TRANSITION_DURATION = 150\n\n  Tab.prototype.show = function () {\n    var $this    = this.element\n    var $ul      = $this.closest('ul:not(.dropdown-menu)')\n    var selector = $this.data('target')\n\n    if (!selector) {\n      selector = $this.attr('href')\n      selector = selector && selector.replace(/.*(?=#[^\\s]*$)/, '') // strip for ie7\n    }\n\n    if ($this.parent('li').hasClass('active')) return\n\n    var $previous = $ul.find('.active:last a')\n    var hideEvent = $.Event('hide.bs.tab', {\n      relatedTarget: $this[0]\n    })\n    var showEvent = $.Event('show.bs.tab', {\n      relatedTarget: $previous[0]\n    })\n\n    $previous.trigger(hideEvent)\n    $this.trigger(showEvent)\n\n    if (showEvent.isDefaultPrevented() || hideEvent.isDefaultPrevented()) return\n\n    var $target = $(selector)\n\n    this.activate($this.closest('li'), $ul)\n    this.activate($target, $target.parent(), function () {\n      $previous.trigger({\n        type: 'hidden.bs.tab',\n        relatedTarget: $this[0]\n      })\n      $this.trigger({\n        type: 'shown.bs.tab',\n        relatedTarget: $previous[0]\n      })\n    })\n  }\n\n  Tab.prototype.activate = function (element, container, callback) {\n    var $active    = container.find('> .active')\n    var transition = callback\n      && $.support.transition\n      && (($active.length && $active.hasClass('fade')) || !!container.find('> .fade').length)\n\n    function next() {\n      $active\n        .removeClass('active')\n        .find('> .dropdown-menu > .active')\n          .removeClass('active')\n        .end()\n        .find('[data-toggle=\"tab\"]')\n          .attr('aria-expanded', false)\n\n      element\n        .addClass('active')\n        .find('[data-toggle=\"tab\"]')\n          .attr('aria-expanded', true)\n\n      if (transition) {\n        element[0].offsetWidth // reflow for transition\n        element.addClass('in')\n      } else {\n        element.removeClass('fade')\n      }\n\n      if (element.parent('.dropdown-menu')) {\n        element\n          .closest('li.dropdown')\n            .addClass('active')\n          .end()\n          .find('[data-toggle=\"tab\"]')\n            .attr('aria-expanded', true)\n      }\n\n      callback && callback()\n    }\n\n    $active.length && transition ?\n      $active\n        .one('bsTransitionEnd', next)\n        .emulateTransitionEnd(Tab.TRANSITION_DURATION) :\n      next()\n\n    $active.removeClass('in')\n  }\n\n\n  // TAB PLUGIN DEFINITION\n  // =====================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this = $(this)\n      var data  = $this.data('bs.tab')\n\n      if (!data) $this.data('bs.tab', (data = new Tab(this)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  var old = $.fn.tab\n\n  $.fn.tab             = Plugin\n  $.fn.tab.Constructor = Tab\n\n\n  // TAB NO CONFLICT\n  // ===============\n\n  $.fn.tab.noConflict = function () {\n    $.fn.tab = old\n    return this\n  }\n\n\n  // TAB DATA-API\n  // ============\n\n  var clickHandler = function (e) {\n    e.preventDefault()\n    Plugin.call($(this), 'show')\n  }\n\n  $(document)\n    .on('click.bs.tab.data-api', '[data-toggle=\"tab\"]', clickHandler)\n    .on('click.bs.tab.data-api', '[data-toggle=\"pill\"]', clickHandler)\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: affix.js v3.3.2\n * http://getbootstrap.com/javascript/#affix\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // AFFIX CLASS DEFINITION\n  // ======================\n\n  var Affix = function (element, options) {\n    this.options = $.extend({}, Affix.DEFAULTS, options)\n\n    this.$target = $(this.options.target)\n      .on('scroll.bs.affix.data-api', $.proxy(this.checkPosition, this))\n      .on('click.bs.affix.data-api',  $.proxy(this.checkPositionWithEventLoop, this))\n\n    this.$element     = $(element)\n    this.affixed      =\n    this.unpin        =\n    this.pinnedOffset = null\n\n    this.checkPosition()\n  }\n\n  Affix.VERSION  = '3.3.2'\n\n  Affix.RESET    = 'affix affix-top affix-bottom'\n\n  Affix.DEFAULTS = {\n    offset: 0,\n    target: window\n  }\n\n  Affix.prototype.getState = function (scrollHeight, height, offsetTop, offsetBottom) {\n    var scrollTop    = this.$target.scrollTop()\n    var position     = this.$element.offset()\n    var targetHeight = this.$target.height()\n\n    if (offsetTop != null && this.affixed == 'top') return scrollTop < offsetTop ? 'top' : false\n\n    if (this.affixed == 'bottom') {\n      if (offsetTop != null) return (scrollTop + this.unpin <= position.top) ? false : 'bottom'\n      return (scrollTop + targetHeight <= scrollHeight - offsetBottom) ? false : 'bottom'\n    }\n\n    var initializing   = this.affixed == null\n    var colliderTop    = initializing ? scrollTop : position.top\n    var colliderHeight = initializing ? targetHeight : height\n\n    if (offsetTop != null && scrollTop <= offsetTop) return 'top'\n    if (offsetBottom != null && (colliderTop + colliderHeight >= scrollHeight - offsetBottom)) return 'bottom'\n\n    return false\n  }\n\n  Affix.prototype.getPinnedOffset = function () {\n    if (this.pinnedOffset) return this.pinnedOffset\n    this.$element.removeClass(Affix.RESET).addClass('affix')\n    var scrollTop = this.$target.scrollTop()\n    var position  = this.$element.offset()\n    return (this.pinnedOffset = position.top - scrollTop)\n  }\n\n  Affix.prototype.checkPositionWithEventLoop = function () {\n    setTimeout($.proxy(this.checkPosition, this), 1)\n  }\n\n  Affix.prototype.checkPosition = function () {\n    if (!this.$element.is(':visible')) return\n\n    var height       = this.$element.height()\n    var offset       = this.options.offset\n    var offsetTop    = offset.top\n    var offsetBottom = offset.bottom\n    var scrollHeight = $('body').height()\n\n    if (typeof offset != 'object')         offsetBottom = offsetTop = offset\n    if (typeof offsetTop == 'function')    offsetTop    = offset.top(this.$element)\n    if (typeof offsetBottom == 'function') offsetBottom = offset.bottom(this.$element)\n\n    var affix = this.getState(scrollHeight, height, offsetTop, offsetBottom)\n\n    if (this.affixed != affix) {\n      if (this.unpin != null) this.$element.css('top', '')\n\n      var affixType = 'affix' + (affix ? '-' + affix : '')\n      var e         = $.Event(affixType + '.bs.affix')\n\n      this.$element.trigger(e)\n\n      if (e.isDefaultPrevented()) return\n\n      this.affixed = affix\n      this.unpin = affix == 'bottom' ? this.getPinnedOffset() : null\n\n      this.$element\n        .removeClass(Affix.RESET)\n        .addClass(affixType)\n        .trigger(affixType.replace('affix', 'affixed') + '.bs.affix')\n    }\n\n    if (affix == 'bottom') {\n      this.$element.offset({\n        top: scrollHeight - height - offsetBottom\n      })\n    }\n  }\n\n\n  // AFFIX PLUGIN DEFINITION\n  // =======================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.affix')\n      var options = typeof option == 'object' && option\n\n      if (!data) $this.data('bs.affix', (data = new Affix(this, options)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  var old = $.fn.affix\n\n  $.fn.affix             = Plugin\n  $.fn.affix.Constructor = Affix\n\n\n  // AFFIX NO CONFLICT\n  // =================\n\n  $.fn.affix.noConflict = function () {\n    $.fn.affix = old\n    return this\n  }\n\n\n  // AFFIX DATA-API\n  // ==============\n\n  $(window).on('load', function () {\n    $('[data-spy=\"affix\"]').each(function () {\n      var $spy = $(this)\n      var data = $spy.data()\n\n      data.offset = data.offset || {}\n\n      if (data.offsetBottom != null) data.offset.bottom = data.offsetBottom\n      if (data.offsetTop    != null) data.offset.top    = data.offsetTop\n\n      Plugin.call($spy, data)\n    })\n  })\n\n}(jQuery);\n"
  },
  {
    "path": "website/js/cbpAnimatedHeader.js",
    "content": "/**\n * cbpAnimatedHeader.js v1.0.0\n * http://www.codrops.com\n *\n * Licensed under the MIT license.\n * http://www.opensource.org/licenses/mit-license.php\n *\n * Copyright 2013, Codrops\n * http://www.codrops.com\n */\nvar cbpAnimatedHeader = (function() {\n\n\tvar docElem = document.documentElement,\n\t\theader = document.querySelector( '.navbar-default' ),\n\t\tdidScroll = false,\n\t\tchangeHeaderOn = 300;\n\n\tfunction init() {\n\t\twindow.addEventListener( 'scroll', function( event ) {\n\t\t\tif( !didScroll ) {\n\t\t\t\tdidScroll = true;\n\t\t\t\tsetTimeout( scrollPage, 250 );\n\t\t\t}\n\t\t}, false );\n\t}\n\n\tfunction scrollPage() {\n\t\tvar sy = scrollY();\n\t\tif ( sy >= changeHeaderOn ) {\n\t\t\tclassie.add( header, 'navbar-shrink' );\n\t\t}\n\t\telse {\n\t\t\tclassie.remove( header, 'navbar-shrink' );\n\t\t}\n\t\tdidScroll = false;\n\t}\n\n\tfunction scrollY() {\n\t\treturn window.pageYOffset || docElem.scrollTop;\n\t}\n\n\tinit();\n\n})();\n"
  },
  {
    "path": "website/js/classie.js",
    "content": "/*!\n * classie - class helper functions\n * from bonzo https://github.com/ded/bonzo\n *\n * classie.has( elem, 'my-class' ) -> true/false\n * classie.add( elem, 'my-new-class' )\n * classie.remove( elem, 'my-unwanted-class' )\n * classie.toggle( elem, 'my-class' )\n */\n\n/*jshint browser: true, strict: true, undef: true */\n/*global define: false */\n\n( function( window ) {\n\n'use strict';\n\n// class helper functions from bonzo https://github.com/ded/bonzo\n\nfunction classReg( className ) {\n  return new RegExp(\"(^|\\\\s+)\" + className + \"(\\\\s+|$)\");\n}\n\n// classList support for class management\n// altho to be fair, the api sucks because it won't accept multiple classes at once\nvar hasClass, addClass, removeClass;\n\nif ( 'classList' in document.documentElement ) {\n  hasClass = function( elem, c ) {\n    return elem.classList.contains( c );\n  };\n  addClass = function( elem, c ) {\n    elem.classList.add( c );\n  };\n  removeClass = function( elem, c ) {\n    elem.classList.remove( c );\n  };\n}\nelse {\n  hasClass = function( elem, c ) {\n    return classReg( c ).test( elem.className );\n  };\n  addClass = function( elem, c ) {\n    if ( !hasClass( elem, c ) ) {\n      elem.className = elem.className + ' ' + c;\n    }\n  };\n  removeClass = function( elem, c ) {\n    elem.className = elem.className.replace( classReg( c ), ' ' );\n  };\n}\n\nfunction toggleClass( elem, c ) {\n  var fn = hasClass( elem, c ) ? removeClass : addClass;\n  fn( elem, c );\n}\n\nvar classie = {\n  // full names\n  hasClass: hasClass,\n  addClass: addClass,\n  removeClass: removeClass,\n  toggleClass: toggleClass,\n  // short names\n  has: hasClass,\n  add: addClass,\n  remove: removeClass,\n  toggle: toggleClass\n};\n\n// transport\nif ( typeof define === 'function' && define.amd ) {\n  // AMD\n  define( classie );\n} else {\n  // browser global\n  window.classie = classie;\n}\n\n})( window );\n"
  },
  {
    "path": "website/js/creative.js",
    "content": "/*!\n * Start Bootstrap - Creative Bootstrap Theme (http://startbootstrap.com)\n * Code licensed under the Apache License v2.0.\n * For details, see http://www.apache.org/licenses/LICENSE-2.0.\n */\n\n(function($) {\n    \"use strict\"; // Start of use strict\n\n    // jQuery for page scrolling feature - requires jQuery Easing plugin\n    $('a.page-scroll').bind('click', function(event) {\n        var $anchor = $(this);\n        $('html, body').stop().animate({\n            scrollTop: ($($anchor.attr('href')).offset().top - 50)\n        }, 1250, 'easeInOutExpo');\n        event.preventDefault();\n    });\n\n    // Highlight the top nav as scrolling occurs\n    $('body').scrollspy({\n        target: '.navbar-fixed-top',\n        offset: 51\n    })\n\n    // Closes the Responsive Menu on Menu Item Click\n    $('.navbar-collapse ul li a').click(function() {\n        $('.navbar-toggle:visible').click();\n    });\n\n    // Fit Text Plugin for Main Header\n    $(\"h1\").fitText(\n        1.2, {\n            minFontSize: '35px',\n            maxFontSize: '65px'\n        }\n    );\n\n    // Offset for Main Navigation\n    $('#mainNav').affix({\n        offset: {\n            top: 100\n        }\n    })\n\n    // Initialize WOW.js Scrolling Animations\n    new WOW().init();\n\n})(jQuery); // End of use strict\n"
  },
  {
    "path": "website/js/jquery.fittext.js",
    "content": "/*global jQuery */\n/*!\n* FitText.js 1.2\n*\n* Copyright 2011, Dave Rupert http://daverupert.com\n* Released under the WTFPL license\n* http://sam.zoy.org/wtfpl/\n*\n* Date: Thu May 05 14:23:00 2011 -0600\n*/\n\n(function( $ ){\n\n  $.fn.fitText = function( kompressor, options ) {\n\n    // Setup options\n    var compressor = kompressor || 1,\n        settings = $.extend({\n          'minFontSize' : Number.NEGATIVE_INFINITY,\n          'maxFontSize' : Number.POSITIVE_INFINITY\n        }, options);\n\n    return this.each(function(){\n\n      // Store the object\n      var $this = $(this);\n\n      // Resizer() resizes items based on the object width divided by the compressor * 10\n      var resizer = function () {\n        $this.css('font-size', Math.max(Math.min($this.width() / (compressor*10), parseFloat(settings.maxFontSize)), parseFloat(settings.minFontSize)));\n      };\n\n      // Call once to set.\n      resizer();\n\n      // Call on resize. Opera debounces their resize by default.\n      $(window).on('resize.fittext orientationchange.fittext', resizer);\n\n    });\n\n  };\n\n})( jQuery );\n"
  },
  {
    "path": "website/js/jquery.js",
    "content": "/*! jQuery v1.11.1 | (c) 2005, 2014 jQuery Foundation, Inc. | jquery.org/license */\n!function(a,b){\"object\"==typeof module&&\"object\"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error(\"jQuery requires a window with a document\");return b(a)}:b(a)}(\"undefined\"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l=\"1.11.1\",m=function(a,b){return new m.fn.init(a,b)},n=/^[\\s\\uFEFF\\xA0]+|[\\s\\uFEFF\\xA0]+$/g,o=/^-ms-/,p=/-([\\da-z])/gi,q=function(a,b){return b.toUpperCase()};m.fn=m.prototype={jquery:l,constructor:m,selector:\"\",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=m.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return m.each(this,a,b)},map:function(a){return this.pushStack(m.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},m.extend=m.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for(\"boolean\"==typeof g&&(j=g,g=arguments[h]||{},h++),\"object\"==typeof g||m.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d in e)a=g[d],c=e[d],g!==c&&(j&&c&&(m.isPlainObject(c)||(b=m.isArray(c)))?(b?(b=!1,f=a&&m.isArray(a)?a:[]):f=a&&m.isPlainObject(a)?a:{},g[d]=m.extend(j,f,c)):void 0!==c&&(g[d]=c));return g},m.extend({expando:\"jQuery\"+(l+Math.random()).replace(/\\D/g,\"\"),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return\"function\"===m.type(a)},isArray:Array.isArray||function(a){return\"array\"===m.type(a)},isWindow:function(a){return null!=a&&a==a.window},isNumeric:function(a){return!m.isArray(a)&&a-parseFloat(a)>=0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},isPlainObject:function(a){var b;if(!a||\"object\"!==m.type(a)||a.nodeType||m.isWindow(a))return!1;try{if(a.constructor&&!j.call(a,\"constructor\")&&!j.call(a.constructor.prototype,\"isPrototypeOf\"))return!1}catch(c){return!1}if(k.ownLast)for(b in a)return j.call(a,b);for(b in a);return void 0===b||j.call(a,b)},type:function(a){return null==a?a+\"\":\"object\"==typeof a||\"function\"==typeof a?h[i.call(a)]||\"object\":typeof a},globalEval:function(b){b&&m.trim(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(o,\"ms-\").replace(p,q)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=r(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?\"\":(a+\"\").replace(n,\"\")},makeArray:function(a,b){var c=b||[];return null!=a&&(r(Object(a))?m.merge(c,\"string\"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(g)return g.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,b){var c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=r(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return\"string\"==typeof b&&(f=a[b],b=a,a=f),m.isFunction(a)?(c=d.call(arguments,2),e=function(){return a.apply(b||this,c.concat(d.call(arguments)))},e.guid=a.guid=a.guid||m.guid++,e):void 0},now:function(){return+new Date},support:k}),m.each(\"Boolean Number String Function Array Date RegExp Object Error\".split(\" \"),function(a,b){h[\"[object \"+b+\"]\"]=b.toLowerCase()});function r(a){var b=a.length,c=m.type(a);return\"function\"===c||m.isWindow(a)?!1:1===a.nodeType&&b?!0:\"array\"===c||0===b||\"number\"==typeof b&&b>0&&b-1 in a}var s=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u=\"sizzle\"+-new Date,v=a.document,w=0,x=0,y=gb(),z=gb(),A=gb(),B=function(a,b){return a===b&&(l=!0),0},C=\"undefined\",D=1<<31,E={}.hasOwnProperty,F=[],G=F.pop,H=F.push,I=F.push,J=F.slice,K=F.indexOf||function(a){for(var b=0,c=this.length;c>b;b++)if(this[b]===a)return b;return-1},L=\"checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped\",M=\"[\\\\x20\\\\t\\\\r\\\\n\\\\f]\",N=\"(?:\\\\\\\\.|[\\\\w-]|[^\\\\x00-\\\\xa0])+\",O=N.replace(\"w\",\"w#\"),P=\"\\\\[\"+M+\"*(\"+N+\")(?:\"+M+\"*([*^$|!~]?=)\"+M+\"*(?:'((?:\\\\\\\\.|[^\\\\\\\\'])*)'|\\\"((?:\\\\\\\\.|[^\\\\\\\\\\\"])*)\\\"|(\"+O+\"))|)\"+M+\"*\\\\]\",Q=\":(\"+N+\")(?:\\\\((('((?:\\\\\\\\.|[^\\\\\\\\'])*)'|\\\"((?:\\\\\\\\.|[^\\\\\\\\\\\"])*)\\\")|((?:\\\\\\\\.|[^\\\\\\\\()[\\\\]]|\"+P+\")*)|.*)\\\\)|)\",R=new RegExp(\"^\"+M+\"+|((?:^|[^\\\\\\\\])(?:\\\\\\\\.)*)\"+M+\"+$\",\"g\"),S=new RegExp(\"^\"+M+\"*,\"+M+\"*\"),T=new RegExp(\"^\"+M+\"*([>+~]|\"+M+\")\"+M+\"*\"),U=new RegExp(\"=\"+M+\"*([^\\\\]'\\\"]*?)\"+M+\"*\\\\]\",\"g\"),V=new RegExp(Q),W=new RegExp(\"^\"+O+\"$\"),X={ID:new RegExp(\"^#(\"+N+\")\"),CLASS:new RegExp(\"^\\\\.(\"+N+\")\"),TAG:new RegExp(\"^(\"+N.replace(\"w\",\"w*\")+\")\"),ATTR:new RegExp(\"^\"+P),PSEUDO:new RegExp(\"^\"+Q),CHILD:new RegExp(\"^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\\\(\"+M+\"*(even|odd|(([+-]|)(\\\\d*)n|)\"+M+\"*(?:([+-]|)\"+M+\"*(\\\\d+)|))\"+M+\"*\\\\)|)\",\"i\"),bool:new RegExp(\"^(?:\"+L+\")$\",\"i\"),needsContext:new RegExp(\"^\"+M+\"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\\\(\"+M+\"*((?:-\\\\d)?\\\\d*)\"+M+\"*\\\\)|)(?=[^-]|$)\",\"i\")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\\d$/i,$=/^[^{]+\\{\\s*\\[native \\w/,_=/^(?:#([\\w-]+)|(\\w+)|\\.([\\w-]+))$/,ab=/[+~]/,bb=/'|\\\\/g,cb=new RegExp(\"\\\\\\\\([\\\\da-f]{1,6}\"+M+\"?|(\"+M+\")|.)\",\"ig\"),db=function(a,b,c){var d=\"0x\"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)};try{I.apply(F=J.call(v.childNodes),v.childNodes),F[v.childNodes.length].nodeType}catch(eb){I={apply:F.length?function(a,b){H.apply(a,J.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fb(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],!a||\"string\"!=typeof a)return d;if(1!==(k=b.nodeType)&&9!==k)return[];if(p&&!e){if(f=_.exec(a))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return I.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName&&b.getElementsByClassName)return I.apply(d,b.getElementsByClassName(j)),d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=9===k&&a,1===k&&\"object\"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute(\"id\"))?s=r.replace(bb,\"\\\\$&\"):b.setAttribute(\"id\",s),s=\"[id='\"+s+\"'] \",l=o.length;while(l--)o[l]=s+qb(o[l]);w=ab.test(a)&&ob(b.parentNode)||b,x=o.join(\",\")}if(x)try{return I.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute(\"id\")}}}return i(a.replace(R,\"$1\"),b,d,e)}function gb(){var a=[];function b(c,e){return a.push(c+\" \")>d.cacheLength&&delete b[a.shift()],b[c+\" \"]=e}return b}function hb(a){return a[u]=!0,a}function ib(a){var b=n.createElement(\"div\");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function jb(a,b){var c=a.split(\"|\"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function kb(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||D)-(~a.sourceIndex||D);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function lb(a){return function(b){var c=b.nodeName.toLowerCase();return\"input\"===c&&b.type===a}}function mb(a){return function(b){var c=b.nodeName.toLowerCase();return(\"input\"===c||\"button\"===c)&&b.type===a}}function nb(a){return hb(function(b){return b=+b,hb(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function ob(a){return a&&typeof a.getElementsByTagName!==C&&a}c=fb.support={},f=fb.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?\"HTML\"!==b.nodeName:!1},m=fb.setDocument=function(a){var b,e=a?a.ownerDocument||a:v,g=e.defaultView;return e!==n&&9===e.nodeType&&e.documentElement?(n=e,o=e.documentElement,p=!f(e),g&&g!==g.top&&(g.addEventListener?g.addEventListener(\"unload\",function(){m()},!1):g.attachEvent&&g.attachEvent(\"onunload\",function(){m()})),c.attributes=ib(function(a){return a.className=\"i\",!a.getAttribute(\"className\")}),c.getElementsByTagName=ib(function(a){return a.appendChild(e.createComment(\"\")),!a.getElementsByTagName(\"*\").length}),c.getElementsByClassName=$.test(e.getElementsByClassName)&&ib(function(a){return a.innerHTML=\"<div class='a'></div><div class='a i'></div>\",a.firstChild.className=\"i\",2===a.getElementsByClassName(\"i\").length}),c.getById=ib(function(a){return o.appendChild(a).id=u,!e.getElementsByName||!e.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if(typeof b.getElementById!==C&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){return a.getAttribute(\"id\")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){var c=typeof a.getAttributeNode!==C&&a.getAttributeNode(\"id\");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return typeof b.getElementsByTagName!==C?b.getElementsByTagName(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if(\"*\"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return typeof b.getElementsByClassName!==C&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(e.querySelectorAll))&&(ib(function(a){a.innerHTML=\"<select msallowclip=''><option selected=''></option></select>\",a.querySelectorAll(\"[msallowclip^='']\").length&&q.push(\"[*^$]=\"+M+\"*(?:''|\\\"\\\")\"),a.querySelectorAll(\"[selected]\").length||q.push(\"\\\\[\"+M+\"*(?:value|\"+L+\")\"),a.querySelectorAll(\":checked\").length||q.push(\":checked\")}),ib(function(a){var b=e.createElement(\"input\");b.setAttribute(\"type\",\"hidden\"),a.appendChild(b).setAttribute(\"name\",\"D\"),a.querySelectorAll(\"[name=d]\").length&&q.push(\"name\"+M+\"*[*^$|!~]?=\"),a.querySelectorAll(\":enabled\").length||q.push(\":enabled\",\":disabled\"),a.querySelectorAll(\"*,:x\"),q.push(\",.*:\")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ib(function(a){c.disconnectedMatch=s.call(a,\"div\"),s.call(a,\"[s!='']:x\"),r.push(\"!=\",Q)}),q=q.length&&new RegExp(q.join(\"|\")),r=r.length&&new RegExp(r.join(\"|\")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===e||a.ownerDocument===v&&t(v,a)?-1:b===e||b.ownerDocument===v&&t(v,b)?1:k?K.call(k,a)-K.call(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,f=a.parentNode,g=b.parentNode,h=[a],i=[b];if(!f||!g)return a===e?-1:b===e?1:f?-1:g?1:k?K.call(k,a)-K.call(k,b):0;if(f===g)return kb(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);while(h[d]===i[d])d++;return d?kb(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},e):n},fb.matches=function(a,b){return fb(a,null,null,b)},fb.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,\"='$1']\"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fb(b,n,null,[a]).length>0},fb.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fb.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&E.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fb.error=function(a){throw new Error(\"Syntax error, unrecognized expression: \"+a)},fb.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fb.getText=function(a){var b,c=\"\",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if(\"string\"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fb.selectors={cacheLength:50,createPseudo:hb,match:X,attrHandle:{},find:{},relative:{\">\":{dir:\"parentNode\",first:!0},\" \":{dir:\"parentNode\"},\"+\":{dir:\"previousSibling\",first:!0},\"~\":{dir:\"previousSibling\"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(cb,db),a[3]=(a[3]||a[4]||a[5]||\"\").replace(cb,db),\"~=\"===a[2]&&(a[3]=\" \"+a[3]+\" \"),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),\"nth\"===a[1].slice(0,3)?(a[3]||fb.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*(\"even\"===a[3]||\"odd\"===a[3])),a[5]=+(a[7]+a[8]||\"odd\"===a[3])):a[3]&&fb.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||\"\":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(\")\",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(cb,db).toLowerCase();return\"*\"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+\" \"];return b||(b=new RegExp(\"(^|\"+M+\")\"+a+\"(\"+M+\"|$)\"))&&y(a,function(a){return b.test(\"string\"==typeof a.className&&a.className||typeof a.getAttribute!==C&&a.getAttribute(\"class\")||\"\")})},ATTR:function(a,b,c){return function(d){var e=fb.attr(d,a);return null==e?\"!=\"===b:b?(e+=\"\",\"=\"===b?e===c:\"!=\"===b?e!==c:\"^=\"===b?c&&0===e.indexOf(c):\"*=\"===b?c&&e.indexOf(c)>-1:\"$=\"===b?c&&e.slice(-c.length)===c:\"~=\"===b?(\" \"+e+\" \").indexOf(c)>-1:\"|=\"===b?e===c||e.slice(0,c.length+1)===c+\"-\":!1):!0}},CHILD:function(a,b,c,d,e){var f=\"nth\"!==a.slice(0,3),g=\"last\"!==a.slice(-4),h=\"of-type\"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?\"nextSibling\":\"previousSibling\",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p=\"only\"===a&&!o&&\"nextSibling\"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fb.error(\"unsupported pseudo: \"+a);return e[u]?e(b):e.length>1?(c=[a,a,\"\",b],d.setFilters.hasOwnProperty(a.toLowerCase())?hb(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=K.call(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:hb(function(a){var b=[],c=[],d=h(a.replace(R,\"$1\"));return d[u]?hb(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),!c.pop()}}),has:hb(function(a){return function(b){return fb(a,b).length>0}}),contains:hb(function(a){return function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:hb(function(a){return W.test(a||\"\")||fb.error(\"unsupported lang: \"+a),a=a.replace(cb,db).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute(\"xml:lang\")||b.getAttribute(\"lang\"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+\"-\");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return\"input\"===b&&!!a.checked||\"option\"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return\"input\"===b&&\"button\"===a.type||\"button\"===b},text:function(a){var b;return\"input\"===a.nodeName.toLowerCase()&&\"text\"===a.type&&(null==(b=a.getAttribute(\"type\"))||\"text\"===b.toLowerCase())},first:nb(function(){return[0]}),last:nb(function(a,b){return[b-1]}),eq:nb(function(a,b,c){return[0>c?c+b:c]}),even:nb(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:nb(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:nb(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:nb(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=lb(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=mb(b);function pb(){}pb.prototype=d.filters=d.pseudos,d.setFilters=new pb,g=fb.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+\" \"];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){(!c||(e=S.exec(h)))&&(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=T.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(R,\" \")}),h=h.slice(c.length));for(g in d.filter)!(e=X[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?fb.error(a):z(a,i).slice(0)};function qb(a){for(var b=0,c=a.length,d=\"\";c>b;b++)d+=a[b].value;return d}function rb(a,b,c){var d=b.dir,e=c&&\"parentNode\"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function sb(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function tb(a,b,c){for(var d=0,e=b.length;e>d;d++)fb(a,b[d],c);return c}function ub(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function vb(a,b,c,d,e,f){return d&&!d[u]&&(d=vb(d)),e&&!e[u]&&(e=vb(e,f)),hb(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||tb(b||\"*\",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ub(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ub(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?K.call(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ub(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):I.apply(g,r)})}function wb(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[\" \"],i=g?1:0,k=rb(function(a){return a===b},h,!0),l=rb(function(a){return K.call(b,a)>-1},h,!0),m=[function(a,c,d){return!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d))}];f>i;i++)if(c=d.relative[a[i].type])m=[rb(sb(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return vb(i>1&&sb(m),i>1&&qb(a.slice(0,i-1).concat({value:\" \"===a[i-2].type?\"*\":\"\"})).replace(R,\"$1\"),c,e>i&&wb(a.slice(i,e)),f>e&&wb(a=a.slice(e)),f>e&&qb(a))}m.push(c)}return sb(m)}function xb(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q=\"0\",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG(\"*\",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!==p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=G.call(i));s=ub(s)}I.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&fb.uniqueSort(i)}return k&&(w=v,j=t),r};return c?hb(f):f}return h=fb.compile=function(a,b){var c,d=[],e=[],f=A[a+\" \"];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wb(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xb(e,d)),f.selector=a}return f},i=fb.select=function(a,b,e,f){var i,j,k,l,m,n=\"function\"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&\"ID\"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(cb,db),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(cb,db),ab.test(j[0].type)&&ob(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qb(j),!a)return I.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,ab.test(a)&&ob(b.parentNode)||b),e},c.sortStable=u.split(\"\").sort(B).join(\"\")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ib(function(a){return 1&a.compareDocumentPosition(n.createElement(\"div\"))}),ib(function(a){return a.innerHTML=\"<a href='#'></a>\",\"#\"===a.firstChild.getAttribute(\"href\")})||jb(\"type|href|height|width\",function(a,b,c){return c?void 0:a.getAttribute(b,\"type\"===b.toLowerCase()?1:2)}),c.attributes&&ib(function(a){return a.innerHTML=\"<input/>\",a.firstChild.setAttribute(\"value\",\"\"),\"\"===a.firstChild.getAttribute(\"value\")})||jb(\"value\",function(a,b,c){return c||\"input\"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ib(function(a){return null==a.getAttribute(\"disabled\")})||jb(L,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fb}(a);m.find=s,m.expr=s.selectors,m.expr[\":\"]=m.expr.pseudos,m.unique=s.uniqueSort,m.text=s.getText,m.isXMLDoc=s.isXML,m.contains=s.contains;var t=m.expr.match.needsContext,u=/^<(\\w+)\\s*\\/?>(?:<\\/\\1>|)$/,v=/^.[^:#\\[\\.,]*$/;function w(a,b,c){if(m.isFunction(b))return m.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return m.grep(a,function(a){return a===b!==c});if(\"string\"==typeof b){if(v.test(b))return m.filter(b,a,c);b=m.filter(b,a)}return m.grep(a,function(a){return m.inArray(a,b)>=0!==c})}m.filter=function(a,b,c){var d=b[0];return c&&(a=\":not(\"+a+\")\"),1===b.length&&1===d.nodeType?m.find.matchesSelector(d,a)?[d]:[]:m.find.matches(a,m.grep(b,function(a){return 1===a.nodeType}))},m.fn.extend({find:function(a){var b,c=[],d=this,e=d.length;if(\"string\"!=typeof a)return this.pushStack(m(a).filter(function(){for(b=0;e>b;b++)if(m.contains(d[b],this))return!0}));for(b=0;e>b;b++)m.find(a,d[b],c);return c=this.pushStack(e>1?m.unique(c):c),c.selector=this.selector?this.selector+\" \"+a:a,c},filter:function(a){return this.pushStack(w(this,a||[],!1))},not:function(a){return this.pushStack(w(this,a||[],!0))},is:function(a){return!!w(this,\"string\"==typeof a&&t.test(a)?m(a):a||[],!1).length}});var x,y=a.document,z=/^(?:\\s*(<[\\w\\W]+>)[^>]*|#([\\w-]*))$/,A=m.fn.init=function(a,b){var c,d;if(!a)return this;if(\"string\"==typeof a){if(c=\"<\"===a.charAt(0)&&\">\"===a.charAt(a.length-1)&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||x).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof m?b[0]:b,m.merge(this,m.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:y,!0)),u.test(c[1])&&m.isPlainObject(b))for(c in b)m.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}if(d=y.getElementById(c[2]),d&&d.parentNode){if(d.id!==c[2])return x.find(a);this.length=1,this[0]=d}return this.context=y,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):m.isFunction(a)?\"undefined\"!=typeof x.ready?x.ready(a):a(m):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),m.makeArray(a,this))};A.prototype=m.fn,x=m(y);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};m.extend({dir:function(a,b,c){var d=[],e=a[b];while(e&&9!==e.nodeType&&(void 0===c||1!==e.nodeType||!m(e).is(c)))1===e.nodeType&&d.push(e),e=e[b];return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),m.fn.extend({has:function(a){var b,c=m(a,this),d=c.length;return this.filter(function(){for(b=0;d>b;b++)if(m.contains(this,c[b]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=t.test(a)||\"string\"!=typeof a?m(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&m.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?m.unique(f):f)},index:function(a){return a?\"string\"==typeof a?m.inArray(this[0],m(a)):m.inArray(a.jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(m.unique(m.merge(this.get(),m(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}m.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return m.dir(a,\"parentNode\")},parentsUntil:function(a,b,c){return m.dir(a,\"parentNode\",c)},next:function(a){return D(a,\"nextSibling\")},prev:function(a){return D(a,\"previousSibling\")},nextAll:function(a){return m.dir(a,\"nextSibling\")},prevAll:function(a){return m.dir(a,\"previousSibling\")},nextUntil:function(a,b,c){return m.dir(a,\"nextSibling\",c)},prevUntil:function(a,b,c){return m.dir(a,\"previousSibling\",c)},siblings:function(a){return m.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return m.sibling(a.firstChild)},contents:function(a){return m.nodeName(a,\"iframe\")?a.contentDocument||a.contentWindow.document:m.merge([],a.childNodes)}},function(a,b){m.fn[a]=function(c,d){var e=m.map(this,b,c);return\"Until\"!==a.slice(-5)&&(d=c),d&&\"string\"==typeof d&&(e=m.filter(d,e)),this.length>1&&(C[a]||(e=m.unique(e)),B.test(a)&&(e=e.reverse())),this.pushStack(e)}});var E=/\\S+/g,F={};function G(a){var b=F[a]={};return m.each(a.match(E)||[],function(a,c){b[c]=!0}),b}m.Callbacks=function(a){a=\"string\"==typeof a?F[a]||G(a):m.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(c=a.memory&&l,d=!0,f=g||0,g=0,e=h.length,b=!0;h&&e>f;f++)if(h[f].apply(l[0],l[1])===!1&&a.stopOnFalse){c=!1;break}b=!1,h&&(i?i.length&&j(i.shift()):c?h=[]:k.disable())},k={add:function(){if(h){var d=h.length;!function f(b){m.each(b,function(b,c){var d=m.type(c);\"function\"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&\"string\"!==d&&f(c)})}(arguments),b?e=h.length:c&&(g=d,j(c))}return this},remove:function(){return h&&m.each(arguments,function(a,c){var d;while((d=m.inArray(c,h,d))>-1)h.splice(d,1),b&&(e>=d&&e--,f>=d&&f--)}),this},has:function(a){return a?m.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],e=0,this},disable:function(){return h=i=c=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,c||k.disable(),this},locked:function(){return!i},fireWith:function(a,c){return!h||d&&!i||(c=c||[],c=[a,c.slice?c.slice():c],b?i.push(c):j(c)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!d}};return k},m.extend({Deferred:function(a){var b=[[\"resolve\",\"done\",m.Callbacks(\"once memory\"),\"resolved\"],[\"reject\",\"fail\",m.Callbacks(\"once memory\"),\"rejected\"],[\"notify\",\"progress\",m.Callbacks(\"memory\")]],c=\"pending\",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return m.Deferred(function(c){m.each(b,function(b,f){var g=m.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&m.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+\"With\"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?m.extend(a,d):d}},e={};return d.pipe=d.then,m.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+\"With\"](this===e?d:this,arguments),this},e[f[0]+\"With\"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&m.isFunction(a.promise)?e:0,g=1===f?a:m.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&m.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;m.fn.ready=function(a){return m.ready.promise().done(a),this},m.extend({isReady:!1,readyWait:1,holdReady:function(a){a?m.readyWait++:m.ready(!0)},ready:function(a){if(a===!0?!--m.readyWait:!m.isReady){if(!y.body)return setTimeout(m.ready);m.isReady=!0,a!==!0&&--m.readyWait>0||(H.resolveWith(y,[m]),m.fn.triggerHandler&&(m(y).triggerHandler(\"ready\"),m(y).off(\"ready\")))}}});function I(){y.addEventListener?(y.removeEventListener(\"DOMContentLoaded\",J,!1),a.removeEventListener(\"load\",J,!1)):(y.detachEvent(\"onreadystatechange\",J),a.detachEvent(\"onload\",J))}function J(){(y.addEventListener||\"load\"===event.type||\"complete\"===y.readyState)&&(I(),m.ready())}m.ready.promise=function(b){if(!H)if(H=m.Deferred(),\"complete\"===y.readyState)setTimeout(m.ready);else if(y.addEventListener)y.addEventListener(\"DOMContentLoaded\",J,!1),a.addEventListener(\"load\",J,!1);else{y.attachEvent(\"onreadystatechange\",J),a.attachEvent(\"onload\",J);var c=!1;try{c=null==a.frameElement&&y.documentElement}catch(d){}c&&c.doScroll&&!function e(){if(!m.isReady){try{c.doScroll(\"left\")}catch(a){return setTimeout(e,50)}I(),m.ready()}}()}return H.promise(b)};var K=\"undefined\",L;for(L in m(k))break;k.ownLast=\"0\"!==L,k.inlineBlockNeedsLayout=!1,m(function(){var a,b,c,d;c=y.getElementsByTagName(\"body\")[0],c&&c.style&&(b=y.createElement(\"div\"),d=y.createElement(\"div\"),d.style.cssText=\"position:absolute;border:0;width:0;height:0;top:0;left:-9999px\",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText=\"display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1\",k.inlineBlockNeedsLayout=a=3===b.offsetWidth,a&&(c.style.zoom=1)),c.removeChild(d))}),function(){var a=y.createElement(\"div\");if(null==k.deleteExpando){k.deleteExpando=!0;try{delete a.test}catch(b){k.deleteExpando=!1}}a=null}(),m.acceptData=function(a){var b=m.noData[(a.nodeName+\" \").toLowerCase()],c=+a.nodeType||1;return 1!==c&&9!==c?!1:!b||b!==!0&&a.getAttribute(\"classid\")===b};var M=/^(?:\\{[\\w\\W]*\\}|\\[[\\w\\W]*\\])$/,N=/([A-Z])/g;function O(a,b,c){if(void 0===c&&1===a.nodeType){var d=\"data-\"+b.replace(N,\"-$1\").toLowerCase();if(c=a.getAttribute(d),\"string\"==typeof c){try{c=\"true\"===c?!0:\"false\"===c?!1:\"null\"===c?null:+c+\"\"===c?+c:M.test(c)?m.parseJSON(c):c}catch(e){}m.data(a,b,c)}else c=void 0}return c}function P(a){var b;for(b in a)if((\"data\"!==b||!m.isEmptyObject(a[b]))&&\"toJSON\"!==b)return!1;return!0}function Q(a,b,d,e){if(m.acceptData(a)){var f,g,h=m.expando,i=a.nodeType,j=i?m.cache:a,k=i?a[h]:a[h]&&h;\nif(k&&j[k]&&(e||j[k].data)||void 0!==d||\"string\"!=typeof b)return k||(k=i?a[h]=c.pop()||m.guid++:h),j[k]||(j[k]=i?{}:{toJSON:m.noop}),(\"object\"==typeof b||\"function\"==typeof b)&&(e?j[k]=m.extend(j[k],b):j[k].data=m.extend(j[k].data,b)),g=j[k],e||(g.data||(g.data={}),g=g.data),void 0!==d&&(g[m.camelCase(b)]=d),\"string\"==typeof b?(f=g[b],null==f&&(f=g[m.camelCase(b)])):f=g,f}}function R(a,b,c){if(m.acceptData(a)){var d,e,f=a.nodeType,g=f?m.cache:a,h=f?a[m.expando]:m.expando;if(g[h]){if(b&&(d=c?g[h]:g[h].data)){m.isArray(b)?b=b.concat(m.map(b,m.camelCase)):b in d?b=[b]:(b=m.camelCase(b),b=b in d?[b]:b.split(\" \")),e=b.length;while(e--)delete d[b[e]];if(c?!P(d):!m.isEmptyObject(d))return}(c||(delete g[h].data,P(g[h])))&&(f?m.cleanData([a],!0):k.deleteExpando||g!=g.window?delete g[h]:g[h]=null)}}}m.extend({cache:{},noData:{\"applet \":!0,\"embed \":!0,\"object \":\"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000\"},hasData:function(a){return a=a.nodeType?m.cache[a[m.expando]]:a[m.expando],!!a&&!P(a)},data:function(a,b,c){return Q(a,b,c)},removeData:function(a,b){return R(a,b)},_data:function(a,b,c){return Q(a,b,c,!0)},_removeData:function(a,b){return R(a,b,!0)}}),m.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=m.data(f),1===f.nodeType&&!m._data(f,\"parsedAttrs\"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf(\"data-\")&&(d=m.camelCase(d.slice(5)),O(f,d,e[d])));m._data(f,\"parsedAttrs\",!0)}return e}return\"object\"==typeof a?this.each(function(){m.data(this,a)}):arguments.length>1?this.each(function(){m.data(this,a,b)}):f?O(f,a,m.data(f,a)):void 0},removeData:function(a){return this.each(function(){m.removeData(this,a)})}}),m.extend({queue:function(a,b,c){var d;return a?(b=(b||\"fx\")+\"queue\",d=m._data(a,b),c&&(!d||m.isArray(c)?d=m._data(a,b,m.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||\"fx\";var c=m.queue(a,b),d=c.length,e=c.shift(),f=m._queueHooks(a,b),g=function(){m.dequeue(a,b)};\"inprogress\"===e&&(e=c.shift(),d--),e&&(\"fx\"===b&&c.unshift(\"inprogress\"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+\"queueHooks\";return m._data(a,c)||m._data(a,c,{empty:m.Callbacks(\"once memory\").add(function(){m._removeData(a,b+\"queue\"),m._removeData(a,c)})})}}),m.fn.extend({queue:function(a,b){var c=2;return\"string\"!=typeof a&&(b=a,a=\"fx\",c--),arguments.length<c?m.queue(this[0],a):void 0===b?this:this.each(function(){var c=m.queue(this,a,b);m._queueHooks(this,a),\"fx\"===a&&\"inprogress\"!==c[0]&&m.dequeue(this,a)})},dequeue:function(a){return this.each(function(){m.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||\"fx\",[])},promise:function(a,b){var c,d=1,e=m.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};\"string\"!=typeof a&&(b=a,a=void 0),a=a||\"fx\";while(g--)c=m._data(f[g],a+\"queueHooks\"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}});var S=/[+-]?(?:\\d*\\.|)\\d+(?:[eE][+-]?\\d+|)/.source,T=[\"Top\",\"Right\",\"Bottom\",\"Left\"],U=function(a,b){return a=b||a,\"none\"===m.css(a,\"display\")||!m.contains(a.ownerDocument,a)},V=m.access=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if(\"object\"===m.type(c)){e=!0;for(h in c)m.access(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,m.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(m(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},W=/^(?:checkbox|radio)$/i;!function(){var a=y.createElement(\"input\"),b=y.createElement(\"div\"),c=y.createDocumentFragment();if(b.innerHTML=\"  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>\",k.leadingWhitespace=3===b.firstChild.nodeType,k.tbody=!b.getElementsByTagName(\"tbody\").length,k.htmlSerialize=!!b.getElementsByTagName(\"link\").length,k.html5Clone=\"<:nav></:nav>\"!==y.createElement(\"nav\").cloneNode(!0).outerHTML,a.type=\"checkbox\",a.checked=!0,c.appendChild(a),k.appendChecked=a.checked,b.innerHTML=\"<textarea>x</textarea>\",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue,c.appendChild(b),b.innerHTML=\"<input type='radio' checked='checked' name='t'/>\",k.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,k.noCloneEvent=!0,b.attachEvent&&(b.attachEvent(\"onclick\",function(){k.noCloneEvent=!1}),b.cloneNode(!0).click()),null==k.deleteExpando){k.deleteExpando=!0;try{delete b.test}catch(d){k.deleteExpando=!1}}}(),function(){var b,c,d=y.createElement(\"div\");for(b in{submit:!0,change:!0,focusin:!0})c=\"on\"+b,(k[b+\"Bubbles\"]=c in a)||(d.setAttribute(c,\"t\"),k[b+\"Bubbles\"]=d.attributes[c].expando===!1);d=null}();var X=/^(?:input|select|textarea)$/i,Y=/^key/,Z=/^(?:mouse|pointer|contextmenu)|click/,$=/^(?:focusinfocus|focusoutblur)$/,_=/^([^.]*)(?:\\.(.+)|)$/;function ab(){return!0}function bb(){return!1}function cb(){try{return y.activeElement}catch(a){}}m.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=m.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return typeof m===K||a&&m.event.triggered===a.type?void 0:m.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||\"\").match(E)||[\"\"],h=b.length;while(h--)f=_.exec(b[h])||[],o=q=f[1],p=(f[2]||\"\").split(\".\").sort(),o&&(j=m.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=m.event.special[o]||{},l=m.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&m.expr.match.needsContext.test(e),namespace:p.join(\".\")},i),(n=g[o])||(n=g[o]=[],n.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent(\"on\"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?n.splice(n.delegateCount++,0,l):n.push(l),m.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m.hasData(a)&&m._data(a);if(r&&(k=r.events)){b=(b||\"\").match(E)||[\"\"],j=b.length;while(j--)if(h=_.exec(b[j])||[],o=q=h[1],p=(h[2]||\"\").split(\".\").sort(),o){l=m.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,n=k[o]||[],h=h[2]&&new RegExp(\"(^|\\\\.)\"+p.join(\"\\\\.(?:.*\\\\.|)\")+\"(\\\\.|$)\"),i=f=n.length;while(f--)g=n[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&(\"**\"!==d||!g.selector)||(n.splice(f,1),g.selector&&n.delegateCount--,l.remove&&l.remove.call(a,g));i&&!n.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||m.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)m.event.remove(a,o+b[j],c,d,!0);m.isEmptyObject(k)&&(delete r.handle,m._removeData(a,\"events\"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,n,o=[d||y],p=j.call(b,\"type\")?b.type:b,q=j.call(b,\"namespace\")?b.namespace.split(\".\"):[];if(h=l=d=d||y,3!==d.nodeType&&8!==d.nodeType&&!$.test(p+m.event.triggered)&&(p.indexOf(\".\")>=0&&(q=p.split(\".\"),p=q.shift(),q.sort()),g=p.indexOf(\":\")<0&&\"on\"+p,b=b[m.expando]?b:new m.Event(p,\"object\"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=q.join(\".\"),b.namespace_re=b.namespace?new RegExp(\"(^|\\\\.)\"+q.join(\"\\\\.(?:.*\\\\.|)\")+\"(\\\\.|$)\"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:m.makeArray(c,[b]),k=m.event.special[p]||{},e||!k.trigger||k.trigger.apply(d,c)!==!1)){if(!e&&!k.noBubble&&!m.isWindow(d)){for(i=k.delegateType||p,$.test(i+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),l=h;l===(d.ownerDocument||y)&&o.push(l.defaultView||l.parentWindow||a)}n=0;while((h=o[n++])&&!b.isPropagationStopped())b.type=n>1?i:k.bindType||p,f=(m._data(h,\"events\")||{})[b.type]&&m._data(h,\"handle\"),f&&f.apply(h,c),f=g&&h[g],f&&f.apply&&m.acceptData(h)&&(b.result=f.apply(h,c),b.result===!1&&b.preventDefault());if(b.type=p,!e&&!b.isDefaultPrevented()&&(!k._default||k._default.apply(o.pop(),c)===!1)&&m.acceptData(d)&&g&&d[p]&&!m.isWindow(d)){l=d[g],l&&(d[g]=null),m.event.triggered=p;try{d[p]()}catch(r){}m.event.triggered=void 0,l&&(d[g]=l)}return b.result}},dispatch:function(a){a=m.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(m._data(this,\"events\")||{})[a.type]||[],k=m.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=m.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,g=0;while((e=f.handlers[g++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(e.namespace))&&(a.handleObj=e,a.data=e.data,c=((m.event.special[e.origType]||{}).handle||e.handler).apply(f.elem,i),void 0!==c&&(a.result=c)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||\"click\"!==a.type))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||\"click\"!==a.type)){for(e=[],f=0;h>f;f++)d=b[f],c=d.selector+\" \",void 0===e[c]&&(e[c]=d.needsContext?m(c,this).index(i)>=0:m.find(c,this,null,[i]).length),e[c]&&e.push(d);e.length&&g.push({elem:i,handlers:e})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},fix:function(a){if(a[m.expando])return a;var b,c,d,e=a.type,f=a,g=this.fixHooks[e];g||(this.fixHooks[e]=g=Z.test(e)?this.mouseHooks:Y.test(e)?this.keyHooks:{}),d=g.props?this.props.concat(g.props):this.props,a=new m.Event(f),b=d.length;while(b--)c=d[b],a[c]=f[c];return a.target||(a.target=f.srcElement||y),3===a.target.nodeType&&(a.target=a.target.parentNode),a.metaKey=!!a.metaKey,g.filter?g.filter(a,f):a},props:\"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which\".split(\" \"),fixHooks:{},keyHooks:{props:\"char charCode key keyCode\".split(\" \"),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:\"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement\".split(\" \"),filter:function(a,b){var c,d,e,f=b.button,g=b.fromElement;return null==a.pageX&&null!=b.clientX&&(d=a.target.ownerDocument||y,e=d.documentElement,c=d.body,a.pageX=b.clientX+(e&&e.scrollLeft||c&&c.scrollLeft||0)-(e&&e.clientLeft||c&&c.clientLeft||0),a.pageY=b.clientY+(e&&e.scrollTop||c&&c.scrollTop||0)-(e&&e.clientTop||c&&c.clientTop||0)),!a.relatedTarget&&g&&(a.relatedTarget=g===a.target?b.toElement:g),a.which||void 0===f||(a.which=1&f?1:2&f?3:4&f?2:0),a}},special:{load:{noBubble:!0},focus:{trigger:function(){if(this!==cb()&&this.focus)try{return this.focus(),!1}catch(a){}},delegateType:\"focusin\"},blur:{trigger:function(){return this===cb()&&this.blur?(this.blur(),!1):void 0},delegateType:\"focusout\"},click:{trigger:function(){return m.nodeName(this,\"input\")&&\"checkbox\"===this.type&&this.click?(this.click(),!1):void 0},_default:function(a){return m.nodeName(a.target,\"a\")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEvent.returnValue=a.result)}}},simulate:function(a,b,c,d){var e=m.extend(new m.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?m.event.trigger(e,null,b):m.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},m.removeEvent=y.removeEventListener?function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)}:function(a,b,c){var d=\"on\"+b;a.detachEvent&&(typeof a[d]===K&&(a[d]=null),a.detachEvent(d,c))},m.Event=function(a,b){return this instanceof m.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?ab:bb):this.type=a,b&&m.extend(this,b),this.timeStamp=a&&a.timeStamp||m.now(),void(this[m.expando]=!0)):new m.Event(a,b)},m.Event.prototype={isDefaultPrevented:bb,isPropagationStopped:bb,isImmediatePropagationStopped:bb,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=ab,a&&(a.preventDefault?a.preventDefault():a.returnValue=!1)},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=ab,a&&(a.stopPropagation&&a.stopPropagation(),a.cancelBubble=!0)},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=ab,a&&a.stopImmediatePropagation&&a.stopImmediatePropagation(),this.stopPropagation()}},m.each({mouseenter:\"mouseover\",mouseleave:\"mouseout\",pointerenter:\"pointerover\",pointerleave:\"pointerout\"},function(a,b){m.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return(!e||e!==d&&!m.contains(d,e))&&(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),k.submitBubbles||(m.event.special.submit={setup:function(){return m.nodeName(this,\"form\")?!1:void m.event.add(this,\"click._submit keypress._submit\",function(a){var b=a.target,c=m.nodeName(b,\"input\")||m.nodeName(b,\"button\")?b.form:void 0;c&&!m._data(c,\"submitBubbles\")&&(m.event.add(c,\"submit._submit\",function(a){a._submit_bubble=!0}),m._data(c,\"submitBubbles\",!0))})},postDispatch:function(a){a._submit_bubble&&(delete a._submit_bubble,this.parentNode&&!a.isTrigger&&m.event.simulate(\"submit\",this.parentNode,a,!0))},teardown:function(){return m.nodeName(this,\"form\")?!1:void m.event.remove(this,\"._submit\")}}),k.changeBubbles||(m.event.special.change={setup:function(){return X.test(this.nodeName)?((\"checkbox\"===this.type||\"radio\"===this.type)&&(m.event.add(this,\"propertychange._change\",function(a){\"checked\"===a.originalEvent.propertyName&&(this._just_changed=!0)}),m.event.add(this,\"click._change\",function(a){this._just_changed&&!a.isTrigger&&(this._just_changed=!1),m.event.simulate(\"change\",this,a,!0)})),!1):void m.event.add(this,\"beforeactivate._change\",function(a){var b=a.target;X.test(b.nodeName)&&!m._data(b,\"changeBubbles\")&&(m.event.add(b,\"change._change\",function(a){!this.parentNode||a.isSimulated||a.isTrigger||m.event.simulate(\"change\",this.parentNode,a,!0)}),m._data(b,\"changeBubbles\",!0))})},handle:function(a){var b=a.target;return this!==b||a.isSimulated||a.isTrigger||\"radio\"!==b.type&&\"checkbox\"!==b.type?a.handleObj.handler.apply(this,arguments):void 0},teardown:function(){return m.event.remove(this,\"._change\"),!X.test(this.nodeName)}}),k.focusinBubbles||m.each({focus:\"focusin\",blur:\"focusout\"},function(a,b){var c=function(a){m.event.simulate(b,a.target,m.event.fix(a),!0)};m.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=m._data(d,b);e||d.addEventListener(a,c,!0),m._data(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=m._data(d,b)-1;e?m._data(d,b,e):(d.removeEventListener(a,c,!0),m._removeData(d,b))}}}),m.fn.extend({on:function(a,b,c,d,e){var f,g;if(\"object\"==typeof a){\"string\"!=typeof b&&(c=c||b,b=void 0);for(f in a)this.on(f,b,c,a[f],e);return this}if(null==c&&null==d?(d=b,c=b=void 0):null==d&&(\"string\"==typeof b?(d=c,c=void 0):(d=c,c=b,b=void 0)),d===!1)d=bb;else if(!d)return this;return 1===e&&(g=d,d=function(a){return m().off(a),g.apply(this,arguments)},d.guid=g.guid||(g.guid=m.guid++)),this.each(function(){m.event.add(this,a,d,c,b)})},one:function(a,b,c,d){return this.on(a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,m(a.delegateTarget).off(d.namespace?d.origType+\".\"+d.namespace:d.origType,d.selector,d.handler),this;if(\"object\"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return(b===!1||\"function\"==typeof b)&&(c=b,b=void 0),c===!1&&(c=bb),this.each(function(){m.event.remove(this,a,c,b)})},trigger:function(a,b){return this.each(function(){m.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?m.event.trigger(a,b,c,!0):void 0}});function db(a){var b=eb.split(\"|\"),c=a.createDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return c}var eb=\"abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video\",fb=/ jQuery\\d+=\"(?:null|\\d+)\"/g,gb=new RegExp(\"<(?:\"+eb+\")[\\\\s/>]\",\"i\"),hb=/^\\s+/,ib=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\\w:]+)[^>]*)\\/>/gi,jb=/<([\\w:]+)/,kb=/<tbody/i,lb=/<|&#?\\w+;/,mb=/<(?:script|style|link)/i,nb=/checked\\s*(?:[^=]|=\\s*.checked.)/i,ob=/^$|\\/(?:java|ecma)script/i,pb=/^true\\/(.*)/,qb=/^\\s*<!(?:\\[CDATA\\[|--)|(?:\\]\\]|--)>\\s*$/g,rb={option:[1,\"<select multiple='multiple'>\",\"</select>\"],legend:[1,\"<fieldset>\",\"</fieldset>\"],area:[1,\"<map>\",\"</map>\"],param:[1,\"<object>\",\"</object>\"],thead:[1,\"<table>\",\"</table>\"],tr:[2,\"<table><tbody>\",\"</tbody></table>\"],col:[2,\"<table><tbody></tbody><colgroup>\",\"</colgroup></table>\"],td:[3,\"<table><tbody><tr>\",\"</tr></tbody></table>\"],_default:k.htmlSerialize?[0,\"\",\"\"]:[1,\"X<div>\",\"</div>\"]},sb=db(y),tb=sb.appendChild(y.createElement(\"div\"));rb.optgroup=rb.option,rb.tbody=rb.tfoot=rb.colgroup=rb.caption=rb.thead,rb.th=rb.td;function ub(a,b){var c,d,e=0,f=typeof a.getElementsByTagName!==K?a.getElementsByTagName(b||\"*\"):typeof a.querySelectorAll!==K?a.querySelectorAll(b||\"*\"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||m.nodeName(d,b)?f.push(d):m.merge(f,ub(d,b));return void 0===b||b&&m.nodeName(a,b)?m.merge([a],f):f}function vb(a){W.test(a.type)&&(a.defaultChecked=a.checked)}function wb(a,b){return m.nodeName(a,\"table\")&&m.nodeName(11!==b.nodeType?b:b.firstChild,\"tr\")?a.getElementsByTagName(\"tbody\")[0]||a.appendChild(a.ownerDocument.createElement(\"tbody\")):a}function xb(a){return a.type=(null!==m.find.attr(a,\"type\"))+\"/\"+a.type,a}function yb(a){var b=pb.exec(a.type);return b?a.type=b[1]:a.removeAttribute(\"type\"),a}function zb(a,b){for(var c,d=0;null!=(c=a[d]);d++)m._data(c,\"globalEval\",!b||m._data(b[d],\"globalEval\"))}function Ab(a,b){if(1===b.nodeType&&m.hasData(a)){var c,d,e,f=m._data(a),g=m._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)m.event.add(b,c,h[c][d])}g.data&&(g.data=m.extend({},g.data))}}function Bb(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!k.noCloneEvent&&b[m.expando]){e=m._data(b);for(d in e.events)m.removeEvent(b,d,e.handle);b.removeAttribute(m.expando)}\"script\"===c&&b.text!==a.text?(xb(b).text=a.text,yb(b)):\"object\"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),k.html5Clone&&a.innerHTML&&!m.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):\"input\"===c&&W.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):\"option\"===c?b.defaultSelected=b.selected=a.defaultSelected:(\"input\"===c||\"textarea\"===c)&&(b.defaultValue=a.defaultValue)}}m.extend({clone:function(a,b,c){var d,e,f,g,h,i=m.contains(a.ownerDocument,a);if(k.html5Clone||m.isXMLDoc(a)||!gb.test(\"<\"+a.nodeName+\">\")?f=a.cloneNode(!0):(tb.innerHTML=a.outerHTML,tb.removeChild(f=tb.firstChild)),!(k.noCloneEvent&&k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||m.isXMLDoc(a)))for(d=ub(f),h=ub(a),g=0;null!=(e=h[g]);++g)d[g]&&Bb(e,d[g]);if(b)if(c)for(h=h||ub(a),d=d||ub(f),g=0;null!=(e=h[g]);g++)Ab(e,d[g]);else Ab(a,f);return d=ub(f,\"script\"),d.length>0&&zb(d,!i&&ub(a,\"script\")),d=h=e=null,f},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,l,n=a.length,o=db(b),p=[],q=0;n>q;q++)if(f=a[q],f||0===f)if(\"object\"===m.type(f))m.merge(p,f.nodeType?[f]:f);else if(lb.test(f)){h=h||o.appendChild(b.createElement(\"div\")),i=(jb.exec(f)||[\"\",\"\"])[1].toLowerCase(),l=rb[i]||rb._default,h.innerHTML=l[1]+f.replace(ib,\"<$1></$2>\")+l[2],e=l[0];while(e--)h=h.lastChild;if(!k.leadingWhitespace&&hb.test(f)&&p.push(b.createTextNode(hb.exec(f)[0])),!k.tbody){f=\"table\"!==i||kb.test(f)?\"<table>\"!==l[1]||kb.test(f)?0:h:h.firstChild,e=f&&f.childNodes.length;while(e--)m.nodeName(j=f.childNodes[e],\"tbody\")&&!j.childNodes.length&&f.removeChild(j)}m.merge(p,h.childNodes),h.textContent=\"\";while(h.firstChild)h.removeChild(h.firstChild);h=o.lastChild}else p.push(b.createTextNode(f));h&&o.removeChild(h),k.appendChecked||m.grep(ub(p,\"input\"),vb),q=0;while(f=p[q++])if((!d||-1===m.inArray(f,d))&&(g=m.contains(f.ownerDocument,f),h=ub(o.appendChild(f),\"script\"),g&&zb(h),c)){e=0;while(f=h[e++])ob.test(f.type||\"\")&&c.push(f)}return h=null,o},cleanData:function(a,b){for(var d,e,f,g,h=0,i=m.expando,j=m.cache,l=k.deleteExpando,n=m.event.special;null!=(d=a[h]);h++)if((b||m.acceptData(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)n[e]?m.event.remove(d,e):m.removeEvent(d,e,g.handle);j[f]&&(delete j[f],l?delete d[i]:typeof d.removeAttribute!==K?d.removeAttribute(i):d[i]=null,c.push(f))}}}),m.fn.extend({text:function(a){return V(this,function(a){return void 0===a?m.text(this):this.empty().append((this[0]&&this[0].ownerDocument||y).createTextNode(a))},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?m.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||m.cleanData(ub(c)),c.parentNode&&(b&&m.contains(c.ownerDocument,c)&&zb(ub(c,\"script\")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&m.cleanData(ub(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&m.nodeName(a,\"select\")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return m.clone(this,a,b)})},html:function(a){return V(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(fb,\"\"):void 0;if(!(\"string\"!=typeof a||mb.test(a)||!k.htmlSerialize&&gb.test(a)||!k.leadingWhitespace&&hb.test(a)||rb[(jb.exec(a)||[\"\",\"\"])[1].toLowerCase()])){a=a.replace(ib,\"<$1></$2>\");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(m.cleanData(ub(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,m.cleanData(ub(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,n=this,o=l-1,p=a[0],q=m.isFunction(p);if(q||l>1&&\"string\"==typeof p&&!k.checkClone&&nb.test(p))return this.each(function(c){var d=n.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(i=m.buildFragment(a,this[0].ownerDocument,!1,this),c=i.firstChild,1===i.childNodes.length&&(i=c),c)){for(g=m.map(ub(i,\"script\"),xb),f=g.length;l>j;j++)d=i,j!==o&&(d=m.clone(d,!0,!0),f&&m.merge(g,ub(d,\"script\"))),b.call(this[j],d,j);if(f)for(h=g[g.length-1].ownerDocument,m.map(g,yb),j=0;f>j;j++)d=g[j],ob.test(d.type||\"\")&&!m._data(d,\"globalEval\")&&m.contains(h,d)&&(d.src?m._evalUrl&&m._evalUrl(d.src):m.globalEval((d.text||d.textContent||d.innerHTML||\"\").replace(qb,\"\")));i=c=null}return this}}),m.each({appendTo:\"append\",prependTo:\"prepend\",insertBefore:\"before\",insertAfter:\"after\",replaceAll:\"replaceWith\"},function(a,b){m.fn[a]=function(a){for(var c,d=0,e=[],g=m(a),h=g.length-1;h>=d;d++)c=d===h?this:this.clone(!0),m(g[d])[b](c),f.apply(e,c.get());return this.pushStack(e)}});var Cb,Db={};function Eb(b,c){var d,e=m(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:m.css(e[0],\"display\");return e.detach(),f}function Fb(a){var b=y,c=Db[a];return c||(c=Eb(a,b),\"none\"!==c&&c||(Cb=(Cb||m(\"<iframe frameborder='0' width='0' height='0'/>\")).appendTo(b.documentElement),b=(Cb[0].contentWindow||Cb[0].contentDocument).document,b.write(),b.close(),c=Eb(a,b),Cb.detach()),Db[a]=c),c}!function(){var a;k.shrinkWrapBlocks=function(){if(null!=a)return a;a=!1;var b,c,d;return c=y.getElementsByTagName(\"body\")[0],c&&c.style?(b=y.createElement(\"div\"),d=y.createElement(\"div\"),d.style.cssText=\"position:absolute;border:0;width:0;height:0;top:0;left:-9999px\",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText=\"-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:1px;width:1px;zoom:1\",b.appendChild(y.createElement(\"div\")).style.width=\"5px\",a=3!==b.offsetWidth),c.removeChild(d),a):void 0}}();var Gb=/^margin/,Hb=new RegExp(\"^(\"+S+\")(?!px)[a-z%]+$\",\"i\"),Ib,Jb,Kb=/^(top|right|bottom|left)$/;a.getComputedStyle?(Ib=function(a){return a.ownerDocument.defaultView.getComputedStyle(a,null)},Jb=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ib(a),g=c?c.getPropertyValue(b)||c[b]:void 0,c&&(\"\"!==g||m.contains(a.ownerDocument,a)||(g=m.style(a,b)),Hb.test(g)&&Gb.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f)),void 0===g?g:g+\"\"}):y.documentElement.currentStyle&&(Ib=function(a){return a.currentStyle},Jb=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ib(a),g=c?c[b]:void 0,null==g&&h&&h[b]&&(g=h[b]),Hb.test(g)&&!Kb.test(b)&&(d=h.left,e=a.runtimeStyle,f=e&&e.left,f&&(e.left=a.currentStyle.left),h.left=\"fontSize\"===b?\"1em\":g,g=h.pixelLeft+\"px\",h.left=d,f&&(e.left=f)),void 0===g?g:g+\"\"||\"auto\"});function Lb(a,b){return{get:function(){var c=a();if(null!=c)return c?void delete this.get:(this.get=b).apply(this,arguments)}}}!function(){var b,c,d,e,f,g,h;if(b=y.createElement(\"div\"),b.innerHTML=\"  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>\",d=b.getElementsByTagName(\"a\")[0],c=d&&d.style){c.cssText=\"float:left;opacity:.5\",k.opacity=\"0.5\"===c.opacity,k.cssFloat=!!c.cssFloat,b.style.backgroundClip=\"content-box\",b.cloneNode(!0).style.backgroundClip=\"\",k.clearCloneStyle=\"content-box\"===b.style.backgroundClip,k.boxSizing=\"\"===c.boxSizing||\"\"===c.MozBoxSizing||\"\"===c.WebkitBoxSizing,m.extend(k,{reliableHiddenOffsets:function(){return null==g&&i(),g},boxSizingReliable:function(){return null==f&&i(),f},pixelPosition:function(){return null==e&&i(),e},reliableMarginRight:function(){return null==h&&i(),h}});function i(){var b,c,d,i;c=y.getElementsByTagName(\"body\")[0],c&&c.style&&(b=y.createElement(\"div\"),d=y.createElement(\"div\"),d.style.cssText=\"position:absolute;border:0;width:0;height:0;top:0;left:-9999px\",c.appendChild(d).appendChild(b),b.style.cssText=\"-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;display:block;margin-top:1%;top:1%;border:1px;padding:1px;width:4px;position:absolute\",e=f=!1,h=!0,a.getComputedStyle&&(e=\"1%\"!==(a.getComputedStyle(b,null)||{}).top,f=\"4px\"===(a.getComputedStyle(b,null)||{width:\"4px\"}).width,i=b.appendChild(y.createElement(\"div\")),i.style.cssText=b.style.cssText=\"-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:0\",i.style.marginRight=i.style.width=\"0\",b.style.width=\"1px\",h=!parseFloat((a.getComputedStyle(i,null)||{}).marginRight)),b.innerHTML=\"<table><tr><td></td><td>t</td></tr></table>\",i=b.getElementsByTagName(\"td\"),i[0].style.cssText=\"margin:0;border:0;padding:0;display:none\",g=0===i[0].offsetHeight,g&&(i[0].style.display=\"\",i[1].style.display=\"none\",g=0===i[0].offsetHeight),c.removeChild(d))}}}(),m.swap=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e};var Mb=/alpha\\([^)]*\\)/i,Nb=/opacity\\s*=\\s*([^)]*)/,Ob=/^(none|table(?!-c[ea]).+)/,Pb=new RegExp(\"^(\"+S+\")(.*)$\",\"i\"),Qb=new RegExp(\"^([+-])=(\"+S+\")\",\"i\"),Rb={position:\"absolute\",visibility:\"hidden\",display:\"block\"},Sb={letterSpacing:\"0\",fontWeight:\"400\"},Tb=[\"Webkit\",\"O\",\"Moz\",\"ms\"];function Ub(a,b){if(b in a)return b;var c=b.charAt(0).toUpperCase()+b.slice(1),d=b,e=Tb.length;while(e--)if(b=Tb[e]+c,b in a)return b;return d}function Vb(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=m._data(d,\"olddisplay\"),c=d.style.display,b?(f[g]||\"none\"!==c||(d.style.display=\"\"),\"\"===d.style.display&&U(d)&&(f[g]=m._data(d,\"olddisplay\",Fb(d.nodeName)))):(e=U(d),(c&&\"none\"!==c||!e)&&m._data(d,\"olddisplay\",e?c:m.css(d,\"display\"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&\"none\"!==d.style.display&&\"\"!==d.style.display||(d.style.display=b?f[g]||\"\":\"none\"));return a}function Wb(a,b,c){var d=Pb.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[2]||\"px\"):b}function Xb(a,b,c,d,e){for(var f=c===(d?\"border\":\"content\")?4:\"width\"===b?1:0,g=0;4>f;f+=2)\"margin\"===c&&(g+=m.css(a,c+T[f],!0,e)),d?(\"content\"===c&&(g-=m.css(a,\"padding\"+T[f],!0,e)),\"margin\"!==c&&(g-=m.css(a,\"border\"+T[f]+\"Width\",!0,e))):(g+=m.css(a,\"padding\"+T[f],!0,e),\"padding\"!==c&&(g+=m.css(a,\"border\"+T[f]+\"Width\",!0,e)));return g}function Yb(a,b,c){var d=!0,e=\"width\"===b?a.offsetWidth:a.offsetHeight,f=Ib(a),g=k.boxSizing&&\"border-box\"===m.css(a,\"boxSizing\",!1,f);if(0>=e||null==e){if(e=Jb(a,b,f),(0>e||null==e)&&(e=a.style[b]),Hb.test(e))return e;d=g&&(k.boxSizingReliable()||e===a.style[b]),e=parseFloat(e)||0}return e+Xb(a,b,c||(g?\"border\":\"content\"),d,f)+\"px\"}m.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=Jb(a,\"opacity\");return\"\"===c?\"1\":c}}}},cssNumber:{columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{\"float\":k.cssFloat?\"cssFloat\":\"styleFloat\"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=m.camelCase(b),i=a.style;if(b=m.cssProps[h]||(m.cssProps[h]=Ub(i,h)),g=m.cssHooks[b]||m.cssHooks[h],void 0===c)return g&&\"get\"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b];if(f=typeof c,\"string\"===f&&(e=Qb.exec(c))&&(c=(e[1]+1)*e[2]+parseFloat(m.css(a,b)),f=\"number\"),null!=c&&c===c&&(\"number\"!==f||m.cssNumber[h]||(c+=\"px\"),k.clearCloneStyle||\"\"!==c||0!==b.indexOf(\"background\")||(i[b]=\"inherit\"),!(g&&\"set\"in g&&void 0===(c=g.set(a,c,d)))))try{i[b]=c}catch(j){}}},css:function(a,b,c,d){var e,f,g,h=m.camelCase(b);return b=m.cssProps[h]||(m.cssProps[h]=Ub(a.style,h)),g=m.cssHooks[b]||m.cssHooks[h],g&&\"get\"in g&&(f=g.get(a,!0,c)),void 0===f&&(f=Jb(a,b,d)),\"normal\"===f&&b in Sb&&(f=Sb[b]),\"\"===c||c?(e=parseFloat(f),c===!0||m.isNumeric(e)?e||0:f):f}}),m.each([\"height\",\"width\"],function(a,b){m.cssHooks[b]={get:function(a,c,d){return c?Ob.test(m.css(a,\"display\"))&&0===a.offsetWidth?m.swap(a,Rb,function(){return Yb(a,b,d)}):Yb(a,b,d):void 0},set:function(a,c,d){var e=d&&Ib(a);return Wb(a,c,d?Xb(a,b,d,k.boxSizing&&\"border-box\"===m.css(a,\"boxSizing\",!1,e),e):0)}}}),k.opacity||(m.cssHooks.opacity={get:function(a,b){return Nb.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||\"\")?.01*parseFloat(RegExp.$1)+\"\":b?\"1\":\"\"},set:function(a,b){var c=a.style,d=a.currentStyle,e=m.isNumeric(b)?\"alpha(opacity=\"+100*b+\")\":\"\",f=d&&d.filter||c.filter||\"\";c.zoom=1,(b>=1||\"\"===b)&&\"\"===m.trim(f.replace(Mb,\"\"))&&c.removeAttribute&&(c.removeAttribute(\"filter\"),\"\"===b||d&&!d.filter)||(c.filter=Mb.test(f)?f.replace(Mb,e):f+\" \"+e)}}),m.cssHooks.marginRight=Lb(k.reliableMarginRight,function(a,b){return b?m.swap(a,{display:\"inline-block\"},Jb,[a,\"marginRight\"]):void 0}),m.each({margin:\"\",padding:\"\",border:\"Width\"},function(a,b){m.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f=\"string\"==typeof c?c.split(\" \"):[c];4>d;d++)e[a+T[d]+b]=f[d]||f[d-2]||f[0];return e}},Gb.test(a)||(m.cssHooks[a+b].set=Wb)}),m.fn.extend({css:function(a,b){return V(this,function(a,b,c){var d,e,f={},g=0;if(m.isArray(b)){for(d=Ib(a),e=b.length;e>g;g++)f[b[g]]=m.css(a,b[g],!1,d);return f}return void 0!==c?m.style(a,b,c):m.css(a,b)},a,b,arguments.length>1)},show:function(){return Vb(this,!0)},hide:function(){return Vb(this)},toggle:function(a){return\"boolean\"==typeof a?a?this.show():this.hide():this.each(function(){U(this)?m(this).show():m(this).hide()})}});function Zb(a,b,c,d,e){return new Zb.prototype.init(a,b,c,d,e)}m.Tween=Zb,Zb.prototype={constructor:Zb,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||\"swing\",this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(m.cssNumber[c]?\"\":\"px\")\n},cur:function(){var a=Zb.propHooks[this.prop];return a&&a.get?a.get(this):Zb.propHooks._default.get(this)},run:function(a){var b,c=Zb.propHooks[this.prop];return this.pos=b=this.options.duration?m.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Zb.propHooks._default.set(this),this}},Zb.prototype.init.prototype=Zb.prototype,Zb.propHooks={_default:{get:function(a){var b;return null==a.elem[a.prop]||a.elem.style&&null!=a.elem.style[a.prop]?(b=m.css(a.elem,a.prop,\"\"),b&&\"auto\"!==b?b:0):a.elem[a.prop]},set:function(a){m.fx.step[a.prop]?m.fx.step[a.prop](a):a.elem.style&&(null!=a.elem.style[m.cssProps[a.prop]]||m.cssHooks[a.prop])?m.style(a.elem,a.prop,a.now+a.unit):a.elem[a.prop]=a.now}}},Zb.propHooks.scrollTop=Zb.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},m.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2}},m.fx=Zb.prototype.init,m.fx.step={};var $b,_b,ac=/^(?:toggle|show|hide)$/,bc=new RegExp(\"^(?:([+-])=|)(\"+S+\")([a-z%]*)$\",\"i\"),cc=/queueHooks$/,dc=[ic],ec={\"*\":[function(a,b){var c=this.createTween(a,b),d=c.cur(),e=bc.exec(b),f=e&&e[3]||(m.cssNumber[a]?\"\":\"px\"),g=(m.cssNumber[a]||\"px\"!==f&&+d)&&bc.exec(m.css(c.elem,a)),h=1,i=20;if(g&&g[3]!==f){f=f||g[3],e=e||[],g=+d||1;do h=h||\".5\",g/=h,m.style(c.elem,a,g+f);while(h!==(h=c.cur()/d)&&1!==h&&--i)}return e&&(g=c.start=+g||+d||0,c.unit=f,c.end=e[1]?g+(e[1]+1)*e[2]:+e[2]),c}]};function fc(){return setTimeout(function(){$b=void 0}),$b=m.now()}function gc(a,b){var c,d={height:a},e=0;for(b=b?1:0;4>e;e+=2-b)c=T[e],d[\"margin\"+c]=d[\"padding\"+c]=a;return b&&(d.opacity=d.width=a),d}function hc(a,b,c){for(var d,e=(ec[b]||[]).concat(ec[\"*\"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function ic(a,b,c){var d,e,f,g,h,i,j,l,n=this,o={},p=a.style,q=a.nodeType&&U(a),r=m._data(a,\"fxshow\");c.queue||(h=m._queueHooks(a,\"fx\"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,n.always(function(){n.always(function(){h.unqueued--,m.queue(a,\"fx\").length||h.empty.fire()})})),1===a.nodeType&&(\"height\"in b||\"width\"in b)&&(c.overflow=[p.overflow,p.overflowX,p.overflowY],j=m.css(a,\"display\"),l=\"none\"===j?m._data(a,\"olddisplay\")||Fb(a.nodeName):j,\"inline\"===l&&\"none\"===m.css(a,\"float\")&&(k.inlineBlockNeedsLayout&&\"inline\"!==Fb(a.nodeName)?p.zoom=1:p.display=\"inline-block\")),c.overflow&&(p.overflow=\"hidden\",k.shrinkWrapBlocks()||n.always(function(){p.overflow=c.overflow[0],p.overflowX=c.overflow[1],p.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],ac.exec(e)){if(delete b[d],f=f||\"toggle\"===e,e===(q?\"hide\":\"show\")){if(\"show\"!==e||!r||void 0===r[d])continue;q=!0}o[d]=r&&r[d]||m.style(a,d)}else j=void 0;if(m.isEmptyObject(o))\"inline\"===(\"none\"===j?Fb(a.nodeName):j)&&(p.display=j);else{r?\"hidden\"in r&&(q=r.hidden):r=m._data(a,\"fxshow\",{}),f&&(r.hidden=!q),q?m(a).show():n.done(function(){m(a).hide()}),n.done(function(){var b;m._removeData(a,\"fxshow\");for(b in o)m.style(a,b,o[b])});for(d in o)g=hc(q?r[d]:0,d,n),d in r||(r[d]=g.start,q&&(g.end=g.start,g.start=\"width\"===d||\"height\"===d?1:0))}}function jc(a,b){var c,d,e,f,g;for(c in a)if(d=m.camelCase(c),e=b[d],f=a[c],m.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=m.cssHooks[d],g&&\"expand\"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function kc(a,b,c){var d,e,f=0,g=dc.length,h=m.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=$b||fc(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:m.extend({},b),opts:m.extend(!0,{specialEasing:{}},c),originalProperties:b,originalOptions:c,startTime:$b||fc(),duration:c.duration,tweens:[],createTween:function(b,c){var d=m.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?h.resolveWith(a,[j,b]):h.rejectWith(a,[j,b]),this}}),k=j.props;for(jc(k,j.opts.specialEasing);g>f;f++)if(d=dc[f].call(j,a,k,j.opts))return d;return m.map(k,hc,j),m.isFunction(j.opts.start)&&j.opts.start.call(a,j),m.fx.timer(m.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}m.Animation=m.extend(kc,{tweener:function(a,b){m.isFunction(a)?(b=a,a=[\"*\"]):a=a.split(\" \");for(var c,d=0,e=a.length;e>d;d++)c=a[d],ec[c]=ec[c]||[],ec[c].unshift(b)},prefilter:function(a,b){b?dc.unshift(a):dc.push(a)}}),m.speed=function(a,b,c){var d=a&&\"object\"==typeof a?m.extend({},a):{complete:c||!c&&b||m.isFunction(a)&&a,duration:a,easing:c&&b||b&&!m.isFunction(b)&&b};return d.duration=m.fx.off?0:\"number\"==typeof d.duration?d.duration:d.duration in m.fx.speeds?m.fx.speeds[d.duration]:m.fx.speeds._default,(null==d.queue||d.queue===!0)&&(d.queue=\"fx\"),d.old=d.complete,d.complete=function(){m.isFunction(d.old)&&d.old.call(this),d.queue&&m.dequeue(this,d.queue)},d},m.fn.extend({fadeTo:function(a,b,c,d){return this.filter(U).css(\"opacity\",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=m.isEmptyObject(a),f=m.speed(b,c,d),g=function(){var b=kc(this,m.extend({},a),f);(e||m._data(this,\"finish\"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return\"string\"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||\"fx\",[]),this.each(function(){var b=!0,e=null!=a&&a+\"queueHooks\",f=m.timers,g=m._data(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&cc.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));(b||!c)&&m.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||\"fx\"),this.each(function(){var b,c=m._data(this),d=c[a+\"queue\"],e=c[a+\"queueHooks\"],f=m.timers,g=d?d.length:0;for(c.finish=!0,m.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),m.each([\"toggle\",\"show\",\"hide\"],function(a,b){var c=m.fn[b];m.fn[b]=function(a,d,e){return null==a||\"boolean\"==typeof a?c.apply(this,arguments):this.animate(gc(b,!0),a,d,e)}}),m.each({slideDown:gc(\"show\"),slideUp:gc(\"hide\"),slideToggle:gc(\"toggle\"),fadeIn:{opacity:\"show\"},fadeOut:{opacity:\"hide\"},fadeToggle:{opacity:\"toggle\"}},function(a,b){m.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),m.timers=[],m.fx.tick=function(){var a,b=m.timers,c=0;for($b=m.now();c<b.length;c++)a=b[c],a()||b[c]!==a||b.splice(c--,1);b.length||m.fx.stop(),$b=void 0},m.fx.timer=function(a){m.timers.push(a),a()?m.fx.start():m.timers.pop()},m.fx.interval=13,m.fx.start=function(){_b||(_b=setInterval(m.fx.tick,m.fx.interval))},m.fx.stop=function(){clearInterval(_b),_b=null},m.fx.speeds={slow:600,fast:200,_default:400},m.fn.delay=function(a,b){return a=m.fx?m.fx.speeds[a]||a:a,b=b||\"fx\",this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},function(){var a,b,c,d,e;b=y.createElement(\"div\"),b.setAttribute(\"className\",\"t\"),b.innerHTML=\"  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>\",d=b.getElementsByTagName(\"a\")[0],c=y.createElement(\"select\"),e=c.appendChild(y.createElement(\"option\")),a=b.getElementsByTagName(\"input\")[0],d.style.cssText=\"top:1px\",k.getSetAttribute=\"t\"!==b.className,k.style=/top/.test(d.getAttribute(\"style\")),k.hrefNormalized=\"/a\"===d.getAttribute(\"href\"),k.checkOn=!!a.value,k.optSelected=e.selected,k.enctype=!!y.createElement(\"form\").enctype,c.disabled=!0,k.optDisabled=!e.disabled,a=y.createElement(\"input\"),a.setAttribute(\"value\",\"\"),k.input=\"\"===a.getAttribute(\"value\"),a.value=\"t\",a.setAttribute(\"type\",\"radio\"),k.radioValue=\"t\"===a.value}();var lc=/\\r/g;m.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=m.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,m(this).val()):a,null==e?e=\"\":\"number\"==typeof e?e+=\"\":m.isArray(e)&&(e=m.map(e,function(a){return null==a?\"\":a+\"\"})),b=m.valHooks[this.type]||m.valHooks[this.nodeName.toLowerCase()],b&&\"set\"in b&&void 0!==b.set(this,e,\"value\")||(this.value=e))});if(e)return b=m.valHooks[e.type]||m.valHooks[e.nodeName.toLowerCase()],b&&\"get\"in b&&void 0!==(c=b.get(e,\"value\"))?c:(c=e.value,\"string\"==typeof c?c.replace(lc,\"\"):null==c?\"\":c)}}}),m.extend({valHooks:{option:{get:function(a){var b=m.find.attr(a,\"value\");return null!=b?b:m.trim(m.text(a))}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f=\"select-one\"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],!(!c.selected&&i!==e||(k.optDisabled?c.disabled:null!==c.getAttribute(\"disabled\"))||c.parentNode.disabled&&m.nodeName(c.parentNode,\"optgroup\"))){if(b=m(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=m.makeArray(b),g=e.length;while(g--)if(d=e[g],m.inArray(m.valHooks.option.get(d),f)>=0)try{d.selected=c=!0}catch(h){d.scrollHeight}else d.selected=!1;return c||(a.selectedIndex=-1),e}}}}),m.each([\"radio\",\"checkbox\"],function(){m.valHooks[this]={set:function(a,b){return m.isArray(b)?a.checked=m.inArray(m(a).val(),b)>=0:void 0}},k.checkOn||(m.valHooks[this].get=function(a){return null===a.getAttribute(\"value\")?\"on\":a.value})});var mc,nc,oc=m.expr.attrHandle,pc=/^(?:checked|selected)$/i,qc=k.getSetAttribute,rc=k.input;m.fn.extend({attr:function(a,b){return V(this,m.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){m.removeAttr(this,a)})}}),m.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(a&&3!==f&&8!==f&&2!==f)return typeof a.getAttribute===K?m.prop(a,b,c):(1===f&&m.isXMLDoc(a)||(b=b.toLowerCase(),d=m.attrHooks[b]||(m.expr.match.bool.test(b)?nc:mc)),void 0===c?d&&\"get\"in d&&null!==(e=d.get(a,b))?e:(e=m.find.attr(a,b),null==e?void 0:e):null!==c?d&&\"set\"in d&&void 0!==(e=d.set(a,c,b))?e:(a.setAttribute(b,c+\"\"),c):void m.removeAttr(a,b))},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(E);if(f&&1===a.nodeType)while(c=f[e++])d=m.propFix[c]||c,m.expr.match.bool.test(c)?rc&&qc||!pc.test(c)?a[d]=!1:a[m.camelCase(\"default-\"+c)]=a[d]=!1:m.attr(a,c,\"\"),a.removeAttribute(qc?c:d)},attrHooks:{type:{set:function(a,b){if(!k.radioValue&&\"radio\"===b&&m.nodeName(a,\"input\")){var c=a.value;return a.setAttribute(\"type\",b),c&&(a.value=c),b}}}}}),nc={set:function(a,b,c){return b===!1?m.removeAttr(a,c):rc&&qc||!pc.test(c)?a.setAttribute(!qc&&m.propFix[c]||c,c):a[m.camelCase(\"default-\"+c)]=a[c]=!0,c}},m.each(m.expr.match.bool.source.match(/\\w+/g),function(a,b){var c=oc[b]||m.find.attr;oc[b]=rc&&qc||!pc.test(b)?function(a,b,d){var e,f;return d||(f=oc[b],oc[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,oc[b]=f),e}:function(a,b,c){return c?void 0:a[m.camelCase(\"default-\"+b)]?b.toLowerCase():null}}),rc&&qc||(m.attrHooks.value={set:function(a,b,c){return m.nodeName(a,\"input\")?void(a.defaultValue=b):mc&&mc.set(a,b,c)}}),qc||(mc={set:function(a,b,c){var d=a.getAttributeNode(c);return d||a.setAttributeNode(d=a.ownerDocument.createAttribute(c)),d.value=b+=\"\",\"value\"===c||b===a.getAttribute(c)?b:void 0}},oc.id=oc.name=oc.coords=function(a,b,c){var d;return c?void 0:(d=a.getAttributeNode(b))&&\"\"!==d.value?d.value:null},m.valHooks.button={get:function(a,b){var c=a.getAttributeNode(b);return c&&c.specified?c.value:void 0},set:mc.set},m.attrHooks.contenteditable={set:function(a,b,c){mc.set(a,\"\"===b?!1:b,c)}},m.each([\"width\",\"height\"],function(a,b){m.attrHooks[b]={set:function(a,c){return\"\"===c?(a.setAttribute(b,\"auto\"),c):void 0}}})),k.style||(m.attrHooks.style={get:function(a){return a.style.cssText||void 0},set:function(a,b){return a.style.cssText=b+\"\"}});var sc=/^(?:input|select|textarea|button|object)$/i,tc=/^(?:a|area)$/i;m.fn.extend({prop:function(a,b){return V(this,m.prop,a,b,arguments.length>1)},removeProp:function(a){return a=m.propFix[a]||a,this.each(function(){try{this[a]=void 0,delete this[a]}catch(b){}})}}),m.extend({propFix:{\"for\":\"htmlFor\",\"class\":\"className\"},prop:function(a,b,c){var d,e,f,g=a.nodeType;if(a&&3!==g&&8!==g&&2!==g)return f=1!==g||!m.isXMLDoc(a),f&&(b=m.propFix[b]||b,e=m.propHooks[b]),void 0!==c?e&&\"set\"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&\"get\"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){var b=m.find.attr(a,\"tabindex\");return b?parseInt(b,10):sc.test(a.nodeName)||tc.test(a.nodeName)&&a.href?0:-1}}}}),k.hrefNormalized||m.each([\"href\",\"src\"],function(a,b){m.propHooks[b]={get:function(a){return a.getAttribute(b,4)}}}),k.optSelected||(m.propHooks.selected={get:function(a){var b=a.parentNode;return b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex),null}}),m.each([\"tabIndex\",\"readOnly\",\"maxLength\",\"cellSpacing\",\"cellPadding\",\"rowSpan\",\"colSpan\",\"useMap\",\"frameBorder\",\"contentEditable\"],function(){m.propFix[this.toLowerCase()]=this}),k.enctype||(m.propFix.enctype=\"encoding\");var uc=/[\\t\\r\\n\\f]/g;m.fn.extend({addClass:function(a){var b,c,d,e,f,g,h=0,i=this.length,j=\"string\"==typeof a&&a;if(m.isFunction(a))return this.each(function(b){m(this).addClass(a.call(this,b,this.className))});if(j)for(b=(a||\"\").match(E)||[];i>h;h++)if(c=this[h],d=1===c.nodeType&&(c.className?(\" \"+c.className+\" \").replace(uc,\" \"):\" \")){f=0;while(e=b[f++])d.indexOf(\" \"+e+\" \")<0&&(d+=e+\" \");g=m.trim(d),c.className!==g&&(c.className=g)}return this},removeClass:function(a){var b,c,d,e,f,g,h=0,i=this.length,j=0===arguments.length||\"string\"==typeof a&&a;if(m.isFunction(a))return this.each(function(b){m(this).removeClass(a.call(this,b,this.className))});if(j)for(b=(a||\"\").match(E)||[];i>h;h++)if(c=this[h],d=1===c.nodeType&&(c.className?(\" \"+c.className+\" \").replace(uc,\" \"):\"\")){f=0;while(e=b[f++])while(d.indexOf(\" \"+e+\" \")>=0)d=d.replace(\" \"+e+\" \",\" \");g=a?m.trim(d):\"\",c.className!==g&&(c.className=g)}return this},toggleClass:function(a,b){var c=typeof a;return\"boolean\"==typeof b&&\"string\"===c?b?this.addClass(a):this.removeClass(a):this.each(m.isFunction(a)?function(c){m(this).toggleClass(a.call(this,c,this.className,b),b)}:function(){if(\"string\"===c){var b,d=0,e=m(this),f=a.match(E)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else(c===K||\"boolean\"===c)&&(this.className&&m._data(this,\"__className__\",this.className),this.className=this.className||a===!1?\"\":m._data(this,\"__className__\")||\"\")})},hasClass:function(a){for(var b=\" \"+a+\" \",c=0,d=this.length;d>c;c++)if(1===this[c].nodeType&&(\" \"+this[c].className+\" \").replace(uc,\" \").indexOf(b)>=0)return!0;return!1}}),m.each(\"blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu\".split(\" \"),function(a,b){m.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),m.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,\"**\"):this.off(b,a||\"**\",c)}});var vc=m.now(),wc=/\\?/,xc=/(,)|(\\[|{)|(}|])|\"(?:[^\"\\\\\\r\\n]|\\\\[\"\\\\\\/bfnrt]|\\\\u[\\da-fA-F]{4})*\"\\s*:?|true|false|null|-?(?!0\\d)\\d+(?:\\.\\d+|)(?:[eE][+-]?\\d+|)/g;m.parseJSON=function(b){if(a.JSON&&a.JSON.parse)return a.JSON.parse(b+\"\");var c,d=null,e=m.trim(b+\"\");return e&&!m.trim(e.replace(xc,function(a,b,e,f){return c&&b&&(d=0),0===d?a:(c=e||b,d+=!f-!e,\"\")}))?Function(\"return \"+e)():m.error(\"Invalid JSON: \"+b)},m.parseXML=function(b){var c,d;if(!b||\"string\"!=typeof b)return null;try{a.DOMParser?(d=new DOMParser,c=d.parseFromString(b,\"text/xml\")):(c=new ActiveXObject(\"Microsoft.XMLDOM\"),c.async=\"false\",c.loadXML(b))}catch(e){c=void 0}return c&&c.documentElement&&!c.getElementsByTagName(\"parsererror\").length||m.error(\"Invalid XML: \"+b),c};var yc,zc,Ac=/#.*$/,Bc=/([?&])_=[^&]*/,Cc=/^(.*?):[ \\t]*([^\\r\\n]*)\\r?$/gm,Dc=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Ec=/^(?:GET|HEAD)$/,Fc=/^\\/\\//,Gc=/^([\\w.+-]+:)(?:\\/\\/(?:[^\\/?#]*@|)([^\\/?#:]*)(?::(\\d+)|)|)/,Hc={},Ic={},Jc=\"*/\".concat(\"*\");try{zc=location.href}catch(Kc){zc=y.createElement(\"a\"),zc.href=\"\",zc=zc.href}yc=Gc.exec(zc.toLowerCase())||[];function Lc(a){return function(b,c){\"string\"!=typeof b&&(c=b,b=\"*\");var d,e=0,f=b.toLowerCase().match(E)||[];if(m.isFunction(c))while(d=f[e++])\"+\"===d.charAt(0)?(d=d.slice(1)||\"*\",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function Mc(a,b,c,d){var e={},f=a===Ic;function g(h){var i;return e[h]=!0,m.each(a[h]||[],function(a,h){var j=h(b,c,d);return\"string\"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e[\"*\"]&&g(\"*\")}function Nc(a,b){var c,d,e=m.ajaxSettings.flatOptions||{};for(d in b)void 0!==b[d]&&((e[d]?a:c||(c={}))[d]=b[d]);return c&&m.extend(!0,a,c),a}function Oc(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while(\"*\"===i[0])i.shift(),void 0===e&&(e=a.mimeType||b.getResponseHeader(\"Content-Type\"));if(e)for(g in h)if(h[g]&&h[g].test(e)){i.unshift(g);break}if(i[0]in c)f=i[0];else{for(g in c){if(!i[0]||a.converters[g+\" \"+i[0]]){f=g;break}d||(d=g)}f=f||d}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function Pc(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if(\"*\"===f)f=i;else if(\"*\"!==i&&i!==f){if(g=j[i+\" \"+f]||j[\"* \"+f],!g)for(e in j)if(h=e.split(\" \"),h[1]===f&&(g=j[i+\" \"+h[0]]||j[\"* \"+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a[\"throws\"])b=g(b);else try{b=g(b)}catch(l){return{state:\"parsererror\",error:g?l:\"No conversion from \"+i+\" to \"+f}}}return{state:\"success\",data:b}}m.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:zc,type:\"GET\",isLocal:Dc.test(yc[1]),global:!0,processData:!0,async:!0,contentType:\"application/x-www-form-urlencoded; charset=UTF-8\",accepts:{\"*\":Jc,text:\"text/plain\",html:\"text/html\",xml:\"application/xml, text/xml\",json:\"application/json, text/javascript\"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:\"responseXML\",text:\"responseText\",json:\"responseJSON\"},converters:{\"* text\":String,\"text html\":!0,\"text json\":m.parseJSON,\"text xml\":m.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?Nc(Nc(a,m.ajaxSettings),b):Nc(m.ajaxSettings,a)},ajaxPrefilter:Lc(Hc),ajaxTransport:Lc(Ic),ajax:function(a,b){\"object\"==typeof a&&(b=a,a=void 0),b=b||{};var c,d,e,f,g,h,i,j,k=m.ajaxSetup({},b),l=k.context||k,n=k.context&&(l.nodeType||l.jquery)?m(l):m.event,o=m.Deferred(),p=m.Callbacks(\"once memory\"),q=k.statusCode||{},r={},s={},t=0,u=\"canceled\",v={readyState:0,getResponseHeader:function(a){var b;if(2===t){if(!j){j={};while(b=Cc.exec(f))j[b[1].toLowerCase()]=b[2]}b=j[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===t?f:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return t||(a=s[c]=s[c]||a,r[a]=b),this},overrideMimeType:function(a){return t||(k.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>t)for(b in a)q[b]=[q[b],a[b]];else v.always(a[v.status]);return this},abort:function(a){var b=a||u;return i&&i.abort(b),x(0,b),this}};if(o.promise(v).complete=p.add,v.success=v.done,v.error=v.fail,k.url=((a||k.url||zc)+\"\").replace(Ac,\"\").replace(Fc,yc[1]+\"//\"),k.type=b.method||b.type||k.method||k.type,k.dataTypes=m.trim(k.dataType||\"*\").toLowerCase().match(E)||[\"\"],null==k.crossDomain&&(c=Gc.exec(k.url.toLowerCase()),k.crossDomain=!(!c||c[1]===yc[1]&&c[2]===yc[2]&&(c[3]||(\"http:\"===c[1]?\"80\":\"443\"))===(yc[3]||(\"http:\"===yc[1]?\"80\":\"443\")))),k.data&&k.processData&&\"string\"!=typeof k.data&&(k.data=m.param(k.data,k.traditional)),Mc(Hc,k,b,v),2===t)return v;h=k.global,h&&0===m.active++&&m.event.trigger(\"ajaxStart\"),k.type=k.type.toUpperCase(),k.hasContent=!Ec.test(k.type),e=k.url,k.hasContent||(k.data&&(e=k.url+=(wc.test(e)?\"&\":\"?\")+k.data,delete k.data),k.cache===!1&&(k.url=Bc.test(e)?e.replace(Bc,\"$1_=\"+vc++):e+(wc.test(e)?\"&\":\"?\")+\"_=\"+vc++)),k.ifModified&&(m.lastModified[e]&&v.setRequestHeader(\"If-Modified-Since\",m.lastModified[e]),m.etag[e]&&v.setRequestHeader(\"If-None-Match\",m.etag[e])),(k.data&&k.hasContent&&k.contentType!==!1||b.contentType)&&v.setRequestHeader(\"Content-Type\",k.contentType),v.setRequestHeader(\"Accept\",k.dataTypes[0]&&k.accepts[k.dataTypes[0]]?k.accepts[k.dataTypes[0]]+(\"*\"!==k.dataTypes[0]?\", \"+Jc+\"; q=0.01\":\"\"):k.accepts[\"*\"]);for(d in k.headers)v.setRequestHeader(d,k.headers[d]);if(k.beforeSend&&(k.beforeSend.call(l,v,k)===!1||2===t))return v.abort();u=\"abort\";for(d in{success:1,error:1,complete:1})v[d](k[d]);if(i=Mc(Ic,k,b,v)){v.readyState=1,h&&n.trigger(\"ajaxSend\",[v,k]),k.async&&k.timeout>0&&(g=setTimeout(function(){v.abort(\"timeout\")},k.timeout));try{t=1,i.send(r,x)}catch(w){if(!(2>t))throw w;x(-1,w)}}else x(-1,\"No Transport\");function x(a,b,c,d){var j,r,s,u,w,x=b;2!==t&&(t=2,g&&clearTimeout(g),i=void 0,f=d||\"\",v.readyState=a>0?4:0,j=a>=200&&300>a||304===a,c&&(u=Oc(k,v,c)),u=Pc(k,u,v,j),j?(k.ifModified&&(w=v.getResponseHeader(\"Last-Modified\"),w&&(m.lastModified[e]=w),w=v.getResponseHeader(\"etag\"),w&&(m.etag[e]=w)),204===a||\"HEAD\"===k.type?x=\"nocontent\":304===a?x=\"notmodified\":(x=u.state,r=u.data,s=u.error,j=!s)):(s=x,(a||!x)&&(x=\"error\",0>a&&(a=0))),v.status=a,v.statusText=(b||x)+\"\",j?o.resolveWith(l,[r,x,v]):o.rejectWith(l,[v,x,s]),v.statusCode(q),q=void 0,h&&n.trigger(j?\"ajaxSuccess\":\"ajaxError\",[v,k,j?r:s]),p.fireWith(l,[v,x]),h&&(n.trigger(\"ajaxComplete\",[v,k]),--m.active||m.event.trigger(\"ajaxStop\")))}return v},getJSON:function(a,b,c){return m.get(a,b,c,\"json\")},getScript:function(a,b){return m.get(a,void 0,b,\"script\")}}),m.each([\"get\",\"post\"],function(a,b){m[b]=function(a,c,d,e){return m.isFunction(c)&&(e=e||d,d=c,c=void 0),m.ajax({url:a,type:b,dataType:e,data:c,success:d})}}),m.each([\"ajaxStart\",\"ajaxStop\",\"ajaxComplete\",\"ajaxError\",\"ajaxSuccess\",\"ajaxSend\"],function(a,b){m.fn[b]=function(a){return this.on(b,a)}}),m._evalUrl=function(a){return m.ajax({url:a,type:\"GET\",dataType:\"script\",async:!1,global:!1,\"throws\":!0})},m.fn.extend({wrapAll:function(a){if(m.isFunction(a))return this.each(function(b){m(this).wrapAll(a.call(this,b))});if(this[0]){var b=m(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&1===a.firstChild.nodeType)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){return this.each(m.isFunction(a)?function(b){m(this).wrapInner(a.call(this,b))}:function(){var b=m(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=m.isFunction(a);return this.each(function(c){m(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){m.nodeName(this,\"body\")||m(this).replaceWith(this.childNodes)}).end()}}),m.expr.filters.hidden=function(a){return a.offsetWidth<=0&&a.offsetHeight<=0||!k.reliableHiddenOffsets()&&\"none\"===(a.style&&a.style.display||m.css(a,\"display\"))},m.expr.filters.visible=function(a){return!m.expr.filters.hidden(a)};var Qc=/%20/g,Rc=/\\[\\]$/,Sc=/\\r?\\n/g,Tc=/^(?:submit|button|image|reset|file)$/i,Uc=/^(?:input|select|textarea|keygen)/i;function Vc(a,b,c,d){var e;if(m.isArray(b))m.each(b,function(b,e){c||Rc.test(a)?d(a,e):Vc(a+\"[\"+(\"object\"==typeof e?b:\"\")+\"]\",e,c,d)});else if(c||\"object\"!==m.type(b))d(a,b);else for(e in b)Vc(a+\"[\"+e+\"]\",b[e],c,d)}m.param=function(a,b){var c,d=[],e=function(a,b){b=m.isFunction(b)?b():null==b?\"\":b,d[d.length]=encodeURIComponent(a)+\"=\"+encodeURIComponent(b)};if(void 0===b&&(b=m.ajaxSettings&&m.ajaxSettings.traditional),m.isArray(a)||a.jquery&&!m.isPlainObject(a))m.each(a,function(){e(this.name,this.value)});else for(c in a)Vc(c,a[c],b,e);return d.join(\"&\").replace(Qc,\"+\")},m.fn.extend({serialize:function(){return m.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=m.prop(this,\"elements\");return a?m.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!m(this).is(\":disabled\")&&Uc.test(this.nodeName)&&!Tc.test(a)&&(this.checked||!W.test(a))}).map(function(a,b){var c=m(this).val();return null==c?null:m.isArray(c)?m.map(c,function(a){return{name:b.name,value:a.replace(Sc,\"\\r\\n\")}}):{name:b.name,value:c.replace(Sc,\"\\r\\n\")}}).get()}}),m.ajaxSettings.xhr=void 0!==a.ActiveXObject?function(){return!this.isLocal&&/^(get|post|head|put|delete|options)$/i.test(this.type)&&Zc()||$c()}:Zc;var Wc=0,Xc={},Yc=m.ajaxSettings.xhr();a.ActiveXObject&&m(a).on(\"unload\",function(){for(var a in Xc)Xc[a](void 0,!0)}),k.cors=!!Yc&&\"withCredentials\"in Yc,Yc=k.ajax=!!Yc,Yc&&m.ajaxTransport(function(a){if(!a.crossDomain||k.cors){var b;return{send:function(c,d){var e,f=a.xhr(),g=++Wc;if(f.open(a.type,a.url,a.async,a.username,a.password),a.xhrFields)for(e in a.xhrFields)f[e]=a.xhrFields[e];a.mimeType&&f.overrideMimeType&&f.overrideMimeType(a.mimeType),a.crossDomain||c[\"X-Requested-With\"]||(c[\"X-Requested-With\"]=\"XMLHttpRequest\");for(e in c)void 0!==c[e]&&f.setRequestHeader(e,c[e]+\"\");f.send(a.hasContent&&a.data||null),b=function(c,e){var h,i,j;if(b&&(e||4===f.readyState))if(delete Xc[g],b=void 0,f.onreadystatechange=m.noop,e)4!==f.readyState&&f.abort();else{j={},h=f.status,\"string\"==typeof f.responseText&&(j.text=f.responseText);try{i=f.statusText}catch(k){i=\"\"}h||!a.isLocal||a.crossDomain?1223===h&&(h=204):h=j.text?200:404}j&&d(h,i,j,f.getAllResponseHeaders())},a.async?4===f.readyState?setTimeout(b):f.onreadystatechange=Xc[g]=b:b()},abort:function(){b&&b(void 0,!0)}}}});function Zc(){try{return new a.XMLHttpRequest}catch(b){}}function $c(){try{return new a.ActiveXObject(\"Microsoft.XMLHTTP\")}catch(b){}}m.ajaxSetup({accepts:{script:\"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript\"},contents:{script:/(?:java|ecma)script/},converters:{\"text script\":function(a){return m.globalEval(a),a}}}),m.ajaxPrefilter(\"script\",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type=\"GET\",a.global=!1)}),m.ajaxTransport(\"script\",function(a){if(a.crossDomain){var b,c=y.head||m(\"head\")[0]||y.documentElement;return{send:function(d,e){b=y.createElement(\"script\"),b.async=!0,a.scriptCharset&&(b.charset=a.scriptCharset),b.src=a.url,b.onload=b.onreadystatechange=function(a,c){(c||!b.readyState||/loaded|complete/.test(b.readyState))&&(b.onload=b.onreadystatechange=null,b.parentNode&&b.parentNode.removeChild(b),b=null,c||e(200,\"success\"))},c.insertBefore(b,c.firstChild)},abort:function(){b&&b.onload(void 0,!0)}}}});var _c=[],ad=/(=)\\?(?=&|$)|\\?\\?/;m.ajaxSetup({jsonp:\"callback\",jsonpCallback:function(){var a=_c.pop()||m.expando+\"_\"+vc++;return this[a]=!0,a}}),m.ajaxPrefilter(\"json jsonp\",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(ad.test(b.url)?\"url\":\"string\"==typeof b.data&&!(b.contentType||\"\").indexOf(\"application/x-www-form-urlencoded\")&&ad.test(b.data)&&\"data\");return h||\"jsonp\"===b.dataTypes[0]?(e=b.jsonpCallback=m.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(ad,\"$1\"+e):b.jsonp!==!1&&(b.url+=(wc.test(b.url)?\"&\":\"?\")+b.jsonp+\"=\"+e),b.converters[\"script json\"]=function(){return g||m.error(e+\" was not called\"),g[0]},b.dataTypes[0]=\"json\",f=a[e],a[e]=function(){g=arguments},d.always(function(){a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,_c.push(e)),g&&m.isFunction(f)&&f(g[0]),g=f=void 0}),\"script\"):void 0}),m.parseHTML=function(a,b,c){if(!a||\"string\"!=typeof a)return null;\"boolean\"==typeof b&&(c=b,b=!1),b=b||y;var d=u.exec(a),e=!c&&[];return d?[b.createElement(d[1])]:(d=m.buildFragment([a],b,e),e&&e.length&&m(e).remove(),m.merge([],d.childNodes))};var bd=m.fn.load;m.fn.load=function(a,b,c){if(\"string\"!=typeof a&&bd)return bd.apply(this,arguments);var d,e,f,g=this,h=a.indexOf(\" \");return h>=0&&(d=m.trim(a.slice(h,a.length)),a=a.slice(0,h)),m.isFunction(b)?(c=b,b=void 0):b&&\"object\"==typeof b&&(f=\"POST\"),g.length>0&&m.ajax({url:a,type:f,dataType:\"html\",data:b}).done(function(a){e=arguments,g.html(d?m(\"<div>\").append(m.parseHTML(a)).find(d):a)}).complete(c&&function(a,b){g.each(c,e||[a.responseText,b,a])}),this},m.expr.filters.animated=function(a){return m.grep(m.timers,function(b){return a===b.elem}).length};var cd=a.document.documentElement;function dd(a){return m.isWindow(a)?a:9===a.nodeType?a.defaultView||a.parentWindow:!1}m.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=m.css(a,\"position\"),l=m(a),n={};\"static\"===k&&(a.style.position=\"relative\"),h=l.offset(),f=m.css(a,\"top\"),i=m.css(a,\"left\"),j=(\"absolute\"===k||\"fixed\"===k)&&m.inArray(\"auto\",[f,i])>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),m.isFunction(b)&&(b=b.call(a,c,h)),null!=b.top&&(n.top=b.top-h.top+g),null!=b.left&&(n.left=b.left-h.left+e),\"using\"in b?b.using.call(a,n):l.css(n)}},m.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){m.offset.setOffset(this,a,b)});var b,c,d={top:0,left:0},e=this[0],f=e&&e.ownerDocument;if(f)return b=f.documentElement,m.contains(b,e)?(typeof e.getBoundingClientRect!==K&&(d=e.getBoundingClientRect()),c=dd(f),{top:d.top+(c.pageYOffset||b.scrollTop)-(b.clientTop||0),left:d.left+(c.pageXOffset||b.scrollLeft)-(b.clientLeft||0)}):d},position:function(){if(this[0]){var a,b,c={top:0,left:0},d=this[0];return\"fixed\"===m.css(d,\"position\")?b=d.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),m.nodeName(a[0],\"html\")||(c=a.offset()),c.top+=m.css(a[0],\"borderTopWidth\",!0),c.left+=m.css(a[0],\"borderLeftWidth\",!0)),{top:b.top-c.top-m.css(d,\"marginTop\",!0),left:b.left-c.left-m.css(d,\"marginLeft\",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||cd;while(a&&!m.nodeName(a,\"html\")&&\"static\"===m.css(a,\"position\"))a=a.offsetParent;return a||cd})}}),m.each({scrollLeft:\"pageXOffset\",scrollTop:\"pageYOffset\"},function(a,b){var c=/Y/.test(b);m.fn[a]=function(d){return V(this,function(a,d,e){var f=dd(a);return void 0===e?f?b in f?f[b]:f.document.documentElement[d]:a[d]:void(f?f.scrollTo(c?m(f).scrollLeft():e,c?e:m(f).scrollTop()):a[d]=e)},a,d,arguments.length,null)}}),m.each([\"top\",\"left\"],function(a,b){m.cssHooks[b]=Lb(k.pixelPosition,function(a,c){return c?(c=Jb(a,b),Hb.test(c)?m(a).position()[b]+\"px\":c):void 0})}),m.each({Height:\"height\",Width:\"width\"},function(a,b){m.each({padding:\"inner\"+a,content:b,\"\":\"outer\"+a},function(c,d){m.fn[d]=function(d,e){var f=arguments.length&&(c||\"boolean\"!=typeof d),g=c||(d===!0||e===!0?\"margin\":\"border\");return V(this,function(b,c,d){var e;return m.isWindow(b)?b.document.documentElement[\"client\"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body[\"scroll\"+a],e[\"scroll\"+a],b.body[\"offset\"+a],e[\"offset\"+a],e[\"client\"+a])):void 0===d?m.css(b,c,g):m.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),m.fn.size=function(){return this.length},m.fn.andSelf=m.fn.addBack,\"function\"==typeof define&&define.amd&&define(\"jquery\",[],function(){return m});var ed=a.jQuery,fd=a.$;return m.noConflict=function(b){return a.$===m&&(a.$=fd),b&&a.jQuery===m&&(a.jQuery=ed),m},typeof b===K&&(a.jQuery=a.$=m),m});\n"
  },
  {
    "path": "website/platform-kubernetes.md",
    "content": "---\nlayout: page\ntitle: Jupyter Enterprise Gateway and Kubernetes\n---\n\nRecently, we have experienced various advances in AI, in particular around Deep Learning. This have\nincreased the popularity of Deep Learning use cases and also the proliferation of several development\nframeworks that have different runtime and deployment requirements. Containers provides a very flexible\nway to build such heterogenous environments and Kubernetes provides an easy way to deploy and manage such\ndeployments with the benefit of elasticity and other quality of services.\n\nJupyter Enterprise Gateway extends the Jupyter Notebook platform and enables Jupyter Notebook\nkernels to run as independent pods in a Kubernetes cluster, providing the necessary environment\nisolation to support the development and training of Deep Learning models.\n\nUsing the Kubernetes support in Jupyter Enterprise Gateway, the container image where the kernel will be\nlaunched becomes a choice, where you can easily start a Python kernel on a TensorFlow community image\nto enable working on TensorFlow models, or you can have a Python kernel on a PyTorch community image.\n\nKubernetes also gives the ability to associate/share specialized hardwares such as GPUs and TPUs\nto the kernel pod, providing necessary power for training Deep Learning models.\n\n<br/>\n\n<div align=\"center\">\n  <img src=\"./img/platform-kubernetes.png\" height=\"50%\" width=\"50%\">\n</div>\n\n<br/>\n\n### Deployment\n\n<br/>\n\nJupyter Enterprise Gateway can easily be deployed into your Kubernetes cluster:\n\n<div>\n<pre><code>kubectl apply -f https://raw.githubusercontent.com/jupyter/enterprise_gateway/main/etc/kubernetes/enterprise-gateway.yaml</code></pre>\n</div>\n\n#### Deployment Scripts\n\n<br/>\n\nThe Jupyter Enterprise Gateway development team uses some Ansible scripts for provisioning\ntest environments, these scripts might be useful for users trying to get started with the gateway\non a Kubernetes environment.\n\n- Ansible Deployment scripts : <a href=\"https://github.com/lresende/ansible-spark-cluster\">ansible-kubernetes-cluster</a>\n"
  },
  {
    "path": "website/platform-spark.md",
    "content": "---\nlayout: page\ntitle: Jupyter Enterprise Gateway and Apache Spark\n---\n\nThe Big Data Analytics use cases require processing large data sets which are not containable by\nthe resources available on a single machine.\n\nJupyter Enterprise Gateway extends the Jupyter Notebook platform and enables Jupyter Notebook\nkernels to run as Apache Spark applications in YARN cluster mode.\n\nBy leveraging the functionality\nof the underlying resource management applications like Hadoop YARN, etc., Jupyter Enterprise Gateway\ndistributes kernels across the compute cluster, dramatically increasing the number of simultaneously\nactive notebooks/kernels.\n\n<br/>\n\n### Deployment\n\n<br/>\n\nJupyter Enterprise Gateway can easily be incorporated into your Analytics Platform.\n\nIf you are using a distribution like HDP, the gateway can be installed in an edge node\n(optionally secured by Knox). Jupyter Notebooks can then connect via the gateway and\nhave access to run the Notebook kernels in the Spark/YARN nodes. If you have Kerberos\nsecurity enable on the cluster, then each notebook kernel will be running as the userid\nfrom the users that requested the notebook kernel, thus leveraging all configured ACLs\nwhen accessing HDFS, and other secured resources.\n\n<div align=\"center\">\n  <img src=\"./img/platform-spark-hdp.png\" height=\"50%\" width=\"50%\">\n</div>\n\nNote that the use of a distribution is not a requirement, and we also support running\nJupyter Enterprise Gateway in a vanilla deployment of Spark and YARN.\n\n<div align=\"center\">\n  <img src=\"./img/platform-spark-yarn.png\" height=\"50%\" width=\"50%\">\n</div>\n\n#### Deployment Scripts\n\n<br/>\n\nThe Jupyter Enterprise Gateway development team uses some Ansible scripts for provisioning\ntest environments, these scripts might be useful for users trying to get started with the gateway.\n\n- Ansible Deployment scripts : <a href=\"https://github.com/lresende/ansible-spark-cluster\">ansible-spark-cluster</a>\n"
  },
  {
    "path": "website/privacy-policy.md",
    "content": "---\nlayout: page\ntitle: Jupyter Enterprise Gateway Privacy Policy\n---\n\n## Jupyter Enterprise Gateway Privacy Policy\n\nInformation about your use of this website is collected using server access logs and a tracking cookie.\nThe collected information consists of the following:\n\n- The IP address from which you access the website;\n- The type of browser and operating system you use to access our site;\n- The date and time you access our site;\n- The pages you visit; and\n- The addresses of pages from where you followed a link to our site.\n\nPart of this information is gathered using a tracking cookie set by the [Google Analytics](https://www.google.com/analytics/)\nservice and handled by Google as described in their [privacy policy](https://www.google.com/privacy.html).\nSee your browser documentation for instructions on how to disable the cookie if you prefer not to share this data with Google.\n\nWe use the gathered information to help us make our site more useful to visitors and to better understand how and when our site is used. We do not track or collect personally identifiable information or associate gathered data with any personally identifying information from other sources.\n\nBy using this website, you consent to the collection of this data in the manner and for the purpose described above.\n"
  },
  {
    "path": "website/publish.sh",
    "content": "#!/usr/bin/env bash\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n\nset -e\n\nBASE_DIR=$(pwd)\nWORK_DIR=$(pwd)/build\nSOURCE_DIR=$(pwd)/build/enterprise_gateway/website\nHTML_DIR=$(pwd)/build/website\n\n\necho \"  \"\necho \"-------------------------------------------------------------\"\necho \"-------       Build and publish project website       -------\"\necho \"-------------------------------------------------------------\"\necho \"  \"\necho \"Base directory:   $BASE_DIR\"\necho \"Work directory:   $WORK_DIR\"\necho \"Source directory: $SOURCE_DIR\"\necho \"HTML directory:   $HTML_DIR\"\necho \"  \"\n\nset -o xtrace\n\n\nfunction checkout_code {\n    rm -rf $WORK_DIR\n    mkdir -p $WORK_DIR\n    cd $WORK_DIR\n    # Checkout code\n    git clone git@github.com:jupyter/enterprise_gateway.git\n    cd enterprise_gateway\n    git_hash=`git rev-parse --short HEAD`\n    echo \"Checked out Jupyter Enterprise Gateway git hash $git_hash\"\n}\n\nfunction build_website {\n    rm -rf $HTML_DIR\n    mkdir -p $HTML_DIR\n    cd $SOURCE_DIR\n    jekyll clean\n    jekyll build -d $HTML_DIR\n}\n\nfunction publish_website {\n    cd $WORK_DIR/enterprise_gateway\n    git checkout gh-pages\n    git branch --set-upstream-to=origin/gh-pages gh-pages\n    git pull --rebase\n    rm -rf *\n    git checkout .gitignore\n    git checkout README.md\n    cp -r $HTML_DIR/ $WORK_DIR/enterprise_gateway\n    git add *\n    git commit -a -m\"Publishing website using commit $git_hash\"\n    echo \"Publishing website using commit $git_hash\"\n    git push origin gh-pages\n}\n\necho \"Preparing to publish website...\"\n\ncheckout_code\n\nbuild_website\n\npublish_website\n\n\necho \"Website published...\"\necho \"   https://jupyter.org/enterprise_gateway/\"\necho \"   https://jupyter.github.io/enterprise_gateway/\"\n\n\ncd \"$BASE_DIR\" #return to base dir\n"
  }
]