main 56a80a112385 cached
295 files
1.6 MB
426.9k tokens
853 symbols
1 requests
Download .txt
Showing preview only (1,734K chars total). Download the full file or copy to clipboard to get everything.
Repository: jupyter-server/enterprise_gateway
Branch: main
Commit: 56a80a112385
Files: 295
Total size: 1.6 MB

Directory structure:
gitextract_mwzi65qv/

├── .git-blame-ignore-revs
├── .gitattributes
├── .github/
│   ├── ISSUE_TEMPLATE.md
│   ├── codeql/
│   │   └── codeql-config.yml
│   ├── dependabot.yml
│   └── workflows/
│       ├── build.yml
│       └── codeql-analysis.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .readthedocs.yaml
├── LICENSE.md
├── Makefile
├── README.md
├── codecov.yml
├── conftest.py
├── docs/
│   ├── Makefile
│   ├── doc-requirements.txt
│   ├── environment.yml
│   ├── make.bat
│   └── source/
│       ├── _static/
│       │   └── custom.css
│       ├── conf.py
│       ├── contributors/
│       │   ├── contrib.md
│       │   ├── debug.md
│       │   ├── devinstall.md
│       │   ├── docker.md
│       │   ├── index.rst
│       │   ├── roadmap.md
│       │   ├── sequence-diagrams.md
│       │   └── system-architecture.md
│       ├── developers/
│       │   ├── custom-images.md
│       │   ├── dev-process-proxy.md
│       │   ├── index.rst
│       │   ├── kernel-launcher.md
│       │   ├── kernel-library.md
│       │   ├── kernel-manager.md
│       │   ├── kernel-specification.md
│       │   └── rest-api.rst
│       ├── index.rst
│       ├── operators/
│       │   ├── config-add-env.md
│       │   ├── config-availability.md
│       │   ├── config-cli.md
│       │   ├── config-culling.md
│       │   ├── config-dynamic.md
│       │   ├── config-env-debug.md
│       │   ├── config-file.md
│       │   ├── config-kernel-override.md
│       │   ├── config-security.md
│       │   ├── config-sys-env.md
│       │   ├── deploy-conductor.md
│       │   ├── deploy-distributed.md
│       │   ├── deploy-docker.md
│       │   ├── deploy-kubernetes.md
│       │   ├── deploy-single.md
│       │   ├── deploy-yarn-cluster.md
│       │   ├── index.rst
│       │   ├── installing-eg.md
│       │   ├── installing-kernels.md
│       │   └── launching-eg.md
│       ├── other/
│       │   ├── index.rst
│       │   ├── related-resources.md
│       │   └── troubleshooting.md
│       └── users/
│           ├── client-config.md
│           ├── connecting-to-eg.md
│           ├── index.rst
│           ├── installation.md
│           └── kernel-envs.md
├── enterprise_gateway/
│   ├── __init__.py
│   ├── __main__.py
│   ├── _version.py
│   ├── base/
│   │   ├── __init__.py
│   │   └── handlers.py
│   ├── client/
│   │   ├── __init__.py
│   │   └── gateway_client.py
│   ├── enterprisegatewayapp.py
│   ├── itests/
│   │   ├── __init__.py
│   │   ├── kernels/
│   │   │   └── authorization_test/
│   │   │       └── kernel.json
│   │   ├── test_authorization.py
│   │   ├── test_base.py
│   │   ├── test_python_kernel.py
│   │   ├── test_r_kernel.py
│   │   └── test_scala_kernel.py
│   ├── mixins.py
│   ├── services/
│   │   ├── __init__.py
│   │   ├── api/
│   │   │   ├── __init__.py
│   │   │   ├── handlers.py
│   │   │   ├── swagger.json
│   │   │   └── swagger.yaml
│   │   ├── kernels/
│   │   │   ├── __init__.py
│   │   │   ├── handlers.py
│   │   │   └── remotemanager.py
│   │   ├── kernelspecs/
│   │   │   ├── __init__.py
│   │   │   ├── handlers.py
│   │   │   └── kernelspec_cache.py
│   │   ├── processproxies/
│   │   │   ├── __init__.py
│   │   │   ├── conductor.py
│   │   │   ├── container.py
│   │   │   ├── crd.py
│   │   │   ├── distributed.py
│   │   │   ├── docker_swarm.py
│   │   │   ├── k8s.py
│   │   │   ├── processproxy.py
│   │   │   ├── spark_operator.py
│   │   │   └── yarn.py
│   │   └── sessions/
│   │       ├── __init__.py
│   │       ├── handlers.py
│   │       ├── kernelsessionmanager.py
│   │       └── sessionmanager.py
│   └── tests/
│       ├── __init__.py
│       ├── resources/
│       │   ├── failing_code2.ipynb
│       │   ├── failing_code3.ipynb
│       │   ├── kernel_api2.ipynb
│       │   ├── kernel_api3.ipynb
│       │   ├── kernels/
│       │   │   └── kernel_defaults_test/
│       │   │       └── kernel.json
│       │   ├── public/
│       │   │   └── index.html
│       │   ├── responses_2.ipynb
│       │   ├── responses_3.ipynb
│       │   ├── simple_api2.ipynb
│       │   ├── simple_api3.ipynb
│       │   ├── unknown_kernel.ipynb
│       │   ├── zen2.ipynb
│       │   └── zen3.ipynb
│       ├── test_enterprise_gateway.py
│       ├── test_gatewayapp.py
│       ├── test_handlers.py
│       ├── test_kernelspec_cache.py
│       ├── test_mixins.py
│       ├── test_process_proxy.py
│       └── test_yaml_injection.py
├── etc/
│   ├── Makefile
│   ├── docker/
│   │   ├── demo-base/
│   │   │   ├── Dockerfile
│   │   │   ├── README.md
│   │   │   ├── bootstrap-yarn-spark.sh
│   │   │   ├── core-site.xml.template
│   │   │   ├── fix-permissions
│   │   │   ├── hdfs-site.xml
│   │   │   ├── mapred-site.xml
│   │   │   ├── ssh_config
│   │   │   └── yarn-site.xml.template
│   │   ├── docker-compose.yml
│   │   ├── enterprise-gateway/
│   │   │   ├── Dockerfile
│   │   │   ├── README.md
│   │   │   └── start-enterprise-gateway.sh
│   │   ├── enterprise-gateway-demo/
│   │   │   ├── Dockerfile
│   │   │   ├── README.md
│   │   │   ├── bootstrap-enterprise-gateway.sh
│   │   │   └── start-enterprise-gateway.sh.template
│   │   ├── kernel-image-puller/
│   │   │   ├── Dockerfile
│   │   │   ├── README.md
│   │   │   ├── image_fetcher.py
│   │   │   ├── kernel_image_puller.py
│   │   │   └── requirements.txt
│   │   ├── kernel-py/
│   │   │   ├── Dockerfile
│   │   │   └── README.md
│   │   ├── kernel-r/
│   │   │   ├── Dockerfile
│   │   │   └── README.md
│   │   ├── kernel-scala/
│   │   │   ├── Dockerfile
│   │   │   └── README.md
│   │   ├── kernel-spark-py/
│   │   │   ├── Dockerfile
│   │   │   └── README.md
│   │   ├── kernel-spark-r/
│   │   │   ├── Dockerfile
│   │   │   └── README.md
│   │   ├── kernel-tf-gpu-py/
│   │   │   ├── Dockerfile
│   │   │   └── README.md
│   │   └── kernel-tf-py/
│   │       ├── Dockerfile
│   │       └── README.md
│   ├── kernel-launchers/
│   │   ├── R/
│   │   │   └── scripts/
│   │   │       ├── launch_IRkernel.R
│   │   │       └── server_listener.py
│   │   ├── bootstrap/
│   │   │   └── bootstrap-kernel.sh
│   │   ├── docker/
│   │   │   └── scripts/
│   │   │       └── launch_docker.py
│   │   ├── kubernetes/
│   │   │   └── scripts/
│   │   │       ├── kernel-pod.yaml.j2
│   │   │       └── launch_kubernetes.py
│   │   ├── operators/
│   │   │   └── scripts/
│   │   │       ├── launch_custom_resource.py
│   │   │       └── sparkoperator.k8s.io-v1beta2.yaml.j2
│   │   ├── python/
│   │   │   └── scripts/
│   │   │       └── launch_ipykernel.py
│   │   └── scala/
│   │       └── toree-launcher/
│   │           ├── build.sbt
│   │           ├── project/
│   │           │   ├── build.properties
│   │           │   ├── plugins.sbt
│   │           │   └── scalastyle-config.xml
│   │           └── src/
│   │               └── main/
│   │                   └── scala/
│   │                       └── launcher/
│   │                           ├── KernelProfile.scala
│   │                           ├── ToreeLauncher.scala
│   │                           └── utils/
│   │                               ├── SecurityUtils.scala
│   │                               └── SocketUtils.scala
│   ├── kernel-resources/
│   │   └── ir/
│   │       └── kernel.js
│   ├── kernelspecs/
│   │   ├── R_docker/
│   │   │   └── kernel.json
│   │   ├── R_kubernetes/
│   │   │   └── kernel.json
│   │   ├── dask_python_yarn_remote/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── python_distributed/
│   │   │   └── kernel.json
│   │   ├── python_docker/
│   │   │   └── kernel.json
│   │   ├── python_kubernetes/
│   │   │   └── kernel.json
│   │   ├── python_tf_docker/
│   │   │   └── kernel.json
│   │   ├── python_tf_gpu_docker/
│   │   │   └── kernel.json
│   │   ├── python_tf_gpu_kubernetes/
│   │   │   └── kernel.json
│   │   ├── python_tf_kubernetes/
│   │   │   └── kernel.json
│   │   ├── scala_docker/
│   │   │   └── kernel.json
│   │   ├── scala_kubernetes/
│   │   │   └── kernel.json
│   │   ├── spark_R_conductor_cluster/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_R_kubernetes/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_R_yarn_client/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_R_yarn_cluster/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_python_conductor_cluster/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_python_kubernetes/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_python_operator/
│   │   │   └── kernel.json
│   │   ├── spark_python_yarn_client/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_python_yarn_cluster/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_scala_conductor_cluster/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_scala_kubernetes/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_scala_yarn_client/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   └── spark_scala_yarn_cluster/
│   │       ├── bin/
│   │       │   └── run.sh
│   │       └── kernel.json
│   └── kubernetes/
│       └── helm/
│           └── enterprise-gateway/
│               ├── Chart.yaml
│               ├── templates/
│               │   ├── daemonset.yaml
│               │   ├── deployment.yaml
│               │   ├── eg-clusterrole.yaml
│               │   ├── eg-clusterrolebinding.yaml
│               │   ├── eg-serviceaccount.yaml
│               │   ├── imagepullSecret.yaml
│               │   ├── ingress.yaml
│               │   ├── kip-clusterrole.yaml
│               │   ├── kip-clusterrolebinding.yaml
│               │   ├── kip-serviceaccount.yaml
│               │   ├── psp.yaml
│               │   └── service.yaml
│               └── values.yaml
├── pyproject.toml
├── release.sh
├── requirements.yml
└── website/
    ├── .gitignore
    ├── README.md
    ├── _config.yml
    ├── _data/
    │   └── navigation.yml
    ├── _includes/
    │   ├── call-to-action.html
    │   ├── contact.html
    │   ├── features.html
    │   ├── head.html
    │   ├── header.html
    │   ├── nav.html
    │   ├── platforms.html
    │   └── scripts.html
    ├── _layouts/
    │   ├── home.html
    │   └── page.html
    ├── _sass/
    │   ├── _base.scss
    │   └── _mixins.scss
    ├── css/
    │   ├── bootstrap.css
    │   └── main.scss
    ├── font-awesome/
    │   ├── css/
    │   │   └── font-awesome.css
    │   ├── fonts/
    │   │   └── FontAwesome.otf
    │   ├── less/
    │   │   ├── animated.less
    │   │   ├── bordered-pulled.less
    │   │   ├── core.less
    │   │   ├── fixed-width.less
    │   │   ├── font-awesome.less
    │   │   ├── icons.less
    │   │   ├── larger.less
    │   │   ├── list.less
    │   │   ├── mixins.less
    │   │   ├── path.less
    │   │   ├── rotated-flipped.less
    │   │   ├── stacked.less
    │   │   └── variables.less
    │   └── scss/
    │       ├── _animated.scss
    │       ├── _bordered-pulled.scss
    │       ├── _core.scss
    │       ├── _fixed-width.scss
    │       ├── _icons.scss
    │       ├── _larger.scss
    │       ├── _list.scss
    │       ├── _mixins.scss
    │       ├── _path.scss
    │       ├── _rotated-flipped.scss
    │       ├── _stacked.scss
    │       ├── _variables.scss
    │       └── font-awesome.scss
    ├── index.md
    ├── js/
    │   ├── bootstrap.js
    │   ├── cbpAnimatedHeader.js
    │   ├── classie.js
    │   ├── creative.js
    │   ├── jquery.fittext.js
    │   └── jquery.js
    ├── platform-kubernetes.md
    ├── platform-spark.md
    ├── privacy-policy.md
    └── publish.sh

================================================
FILE CONTENTS
================================================

================================================
FILE: .git-blame-ignore-revs
================================================
# Initial pre-commit reformat
df811d0deacebfd6cc77e8bf501d9b87ff006fb5


================================================
FILE: .gitattributes
================================================
# Set the default behavior to have all files normalized to Unix-style
# line endings upon check-in.
* text=auto

# Declare files that will always have CRLF line endings on checkout.
*.bat text eol=crlf

# Denote all files that are truly binary and should not be modified.
*.dll binary
*.exp binary
*.lib binary
*.pdb binary
*.exe binary


================================================
FILE: .github/ISSUE_TEMPLATE.md
================================================
Help us improve the Jupyter Enterprise Gateway project by reporting issues
or asking questions.

## Description

## Screenshots / Logs

If applicable, add screenshots and/or logs to help explain your problem.
To generate better logs, please run the gateway with `--debug` command line parameter.

## Environment

- Enterprise Gateway Version \[e.g. 1.x, 2.x, ...\]
- Platform: \[e.g. YARN, Kubernetes ...\]
- Others \[e.g. Jupyter Server 5.7, JupyterHub 1.0, etc\]


================================================
FILE: .github/codeql/codeql-config.yml
================================================
name: "Enterprise Gateway CodeQL config"

queries:
  - uses: security-and-quality

paths-ignore:
  - enterprise_gateway/tests


================================================
FILE: .github/dependabot.yml
================================================
version: 2
updates:
  # Set update schedule for GitHub Actions
  - package-ecosystem: "github-actions"
    directory: "/"
    schedule:
      # Check for updates to GitHub Actions once a week (Mondays by default)
      interval: "weekly"
  # Set update schedule for pip
  - package-ecosystem: "pip"
    directory: "/"
    schedule:
      # Check for updates to Python deps once a week (Mondays by default)
      interval: "weekly"


================================================
FILE: .github/workflows/build.yml
================================================
name: Builds
on:
  push:
  pull_request:

jobs:
  build:
    runs-on: ${{ matrix.os }}
    env:
      ASYNC_TEST_TIMEOUT: 60
      KERNEL_LAUNCH_TIMEOUT: 120
      CONDA_HOME: /usr/share/miniconda
    strategy:
      fail-fast: false
      matrix:
        os: [ubuntu-latest]
        python-version: ["3.10", "3.11"]
    steps:
      - name: Checkout
        uses: actions/checkout@v4
        with:
          clean: true
      - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1
      - name: Display dependency info
        run: |
          python --version
          pip --version
          conda --version
      - name: Add SBT launcher
        uses: sbt/setup-sbt@v1
      - name: Install Python dependencies
        run: |
          pip install ".[test]"
      - name: Build and install Jupyter Enterprise Gateway
        uses: nick-invision/retry@v3.0.0
        with:
          timeout_minutes: 10
          max_attempts: 2
          command: |
            make clean dist enterprise-gateway-demo test-install-wheel
      - name: Log current Python dependencies version
        run: |
          pip freeze
      - name: Run unit tests
        uses: nick-invision/retry@v3.0.0
        with:
          timeout_minutes: 3
          max_attempts: 1
          command: |
            make test
      - name: Run integration tests
        run: |
          # Run integration tests with debug output
          make itest-yarn-debug
      - name: Collect logs
        if: success() || failure()
        run: |
          python --version
          pip --version
          pip list
          echo "==== Docker Container Logs ===="
          docker logs itest-yarn
          echo "==== Docker Container Status ===="
          docker ps -a
          echo "==== Enterprise Gateway Log ===="
          docker exec -it itest-yarn cat /usr/local/share/jupyter/enterprise-gateway.log || true
      - name: Run linters
        run: |
          make lint
      - name: Bump versions
        run: |
          pipx run tbump --dry-run --no-tag --no-push 100.100.100rc0

  link_check:
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4
      - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1
        with:
          python_version: "3.11"
      - name: Install Python dependencies
        run: |
          pip install ".[test]"
      - uses: jupyterlab/maintainer-tools/.github/actions/check-links@v1
        with:
          ignore_links: |-
            http://my-gateway-server\.com:8888|https://docs\.openshift\.com/.*|https://docs\.redhat\.com/.*

  build_docs:
    runs-on: windows-latest
    steps:
      - name: Checkout
        uses: actions/checkout@v4
      - name: Base Setup
        uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1
        with:
          python_version: "3.11"
      - name: Build Docs
        run: make docs

  test_minimum_versions:
    name: Test Minimum Versions
    timeout-minutes: 20
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4
      - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1
        with:
          python_version: "3.11"
      - name: Install dependencies with minimum versions
        run: |
          pip install ".[test]"
      - name: Run the unit tests
        run: |
          pytest -vv -W default || pytest -vv -W default --lf

  make_sdist:
    name: Make SDist
    runs-on: ubuntu-latest
    timeout-minutes: 10
    steps:
      - uses: actions/checkout@v4
      - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1
        with:
          python_version: "3.11"
      - uses: jupyterlab/maintainer-tools/.github/actions/make-sdist@v1

  test_sdist:
    runs-on: ubuntu-latest
    needs: [make_sdist]
    name: Install from SDist and Test
    timeout-minutes: 20
    steps:
      - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1
        with:
          python_version: "3.11"
      - uses: jupyterlab/maintainer-tools/.github/actions/test-sdist@v1

  python_tests_check: # This job does nothing and is only used for the branch protection
    if: always()
    needs:
      - build
      - link_check
      - test_minimum_versions
      - build_docs
      - test_sdist
    runs-on: ubuntu-latest
    steps:
      - name: Decide whether the needed jobs succeeded or failed
        uses: re-actors/alls-green@release/v1
        with:
          jobs: ${{ toJSON(needs) }}


================================================
FILE: .github/workflows/codeql-analysis.yml
================================================
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL Checks"

on:
  push:
    branches: [main]
  pull_request:
    # The branches below must be a subset of the branches above
    branches: [main]
  schedule:
    - cron: "24 7 * * 1"

jobs:
  analyze:
    name: Analyze
    runs-on: ubuntu-latest
    permissions:
      actions: read
      contents: read
      security-events: write

    strategy:
      fail-fast: false
      matrix:
        language: ["python"]
        # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
        # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support

    steps:
      - name: Checkout repository
        uses: actions/checkout@v4
        with:
          # We must fetch at least the immediate parents so that if this is
          # a pull request then we can checkout the head.
          fetch-depth: 2

      # Initializes the CodeQL tools for scanning.
      - name: Initialize CodeQL
        uses: github/codeql-action/init@v3
        with:
          languages: ${{ matrix.language }}
          config-file: ./.github/codeql/codeql-config.yml
          # If you wish to specify custom queries, you can do so here or in a config file.
          # By default, queries listed here will override any specified in a config file.
          # Prefix the list here with "+" to use these queries and those in the config file.

          # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
          # queries: security-extended,security-and-quality

      # Autobuild attempts to build any compiled languages  (C/C++, C#, or Java).
      # If this step fails, then you should remove it and run the build manually (see below)
      - name: Autobuild
        uses: github/codeql-action/autobuild@v3

      # ℹ️ Command-line programs to run using the OS shell.
      # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun

      #   If the Autobuild fails above, remove it and uncomment the following three lines.
      #   modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.

      # - run: |
      #   echo "Run, Build Application using script"
      #   ./location_of_script_within_repo/buildscript.sh

      - name: Perform CodeQL Analysis
        uses: github/codeql-action/analyze@v3


================================================
FILE: .gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]

# C extensions
*.so

# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg

# PyInstaller
#  Usually these files are written by a python script from a template
#  before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.pytest_cache/

# Translations
*.mo
*.pot

# Django stuff:
*.log

# Sphinx documentation
docs/_build/

# PyBuilder
target/

.DS_Store
.ipynb_checkpoints/

# PyCharm
.idea/
*.iml

# Build-related
.image-*

# Jekyll
_site/
.sass-cache/

# Debug-related
.kube/

# vscode ide stuff
*.code-workspace
.history/
.vscode/

# jetbrains ide stuff
*.iml
.idea/


================================================
FILE: .pre-commit-config.yaml
================================================
ci:
  autoupdate_schedule: monthly

repos:
  - repo: https://github.com/pre-commit/pre-commit-hooks
    rev: v4.5.0
    hooks:
      - id: check-case-conflict
      - id: check-ast
      - id: check-docstring-first
      - id: check-executables-have-shebangs
      - id: check-added-large-files
      - id: check-case-conflict
      - id: check-merge-conflict
      - id: check-json
      - id: check-toml
      - id: check-yaml
        exclude: etc/kubernetes/.*.yaml
      - id: end-of-file-fixer
      - id: trailing-whitespace

  - repo: https://github.com/python-jsonschema/check-jsonschema
    rev: 0.27.4
    hooks:
      - id: check-github-workflows

  - repo: https://github.com/executablebooks/mdformat
    rev: 0.7.17
    hooks:
      - id: mdformat
        additional_dependencies:
          [mdformat-gfm, mdformat-frontmatter, mdformat-footnote]

  - repo: https://github.com/psf/black
    rev: 24.2.0
    hooks:
      - id: black

  - repo: https://github.com/charliermarsh/ruff-pre-commit
    rev: v0.3.0
    hooks:
      - id: ruff
        args: ["--fix"]


================================================
FILE: .readthedocs.yaml
================================================
version: 2
build:
  os: "ubuntu-22.04"
  tools:
    python: "mambaforge-22.9"
sphinx:
  configuration: docs/source/conf.py
conda:
  environment: docs/environment.yml


================================================
FILE: LICENSE.md
================================================
# Licensing terms

This project is licensed under the terms of the Modified BSD License
(also known as New or Revised or 3-Clause BSD), as follows:

- Copyright (c) 2001-2015, IPython Development Team
- Copyright (c) 2015-, Jupyter Development Team

All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.

Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.

Neither the name of the Jupyter Development Team nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

## About the Jupyter Development Team

The Jupyter Development Team is the set of all contributors to the Jupyter project.
This includes all of the Jupyter Subprojects, which are the different repositories
under the [jupyter](https://github.com/jupyter/) GitHub organization.

The core team that coordinates development on GitHub can be found here:
https://github.com/jupyter/.

## Our copyright policy

Jupyter uses a shared copyright model. Each contributor maintains copyright
over their contributions to Jupyter. But, it is important to note that these
contributions are typically only changes to the repositories. Thus, the Jupyter
source code, in its entirety is not the copyright of any single person or
institution. Instead, it is the collective copyright of the entire Jupyter
Development Team. If individual contributors want to maintain a record of what
changes/contributions they have specific copyright on, they should indicate
their copyright in the commit message of the change, when they commit the
change to one of the Jupyter repositories.

With this in mind, the following banner should be used in any source code file
to indicate the copyright and license terms:

```
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
```


================================================
FILE: Makefile
================================================
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.

.PHONY: help clean clean-env dev dev-http docs install bdist sdist test release check_dists \
    clean-images clean-enterprise-gateway clean-demo-base clean-kernel-images clean-enterprise-gateway \
    clean-kernel-py clean-kernel-spark-py clean-kernel-r clean-kernel-spark-r clean-kernel-scala clean-kernel-tf-py \
    clean-kernel-tf-gpu-py clean-kernel-image-puller push-images push-enterprise-gateway-demo push-demo-base \
    push-kernel-images push-enterprise-gateway push-kernel-py push-kernel-spark-py push-kernel-r push-kernel-spark-r \
    push-kernel-scala push-kernel-tf-py push-kernel-tf-gpu-py push-kernel-image-puller publish helm-chart

SA?=source activate
ENV:=enterprise-gateway-dev
SHELL:=/bin/bash
MULTIARCH_BUILD?=
TARGET_ARCH?=undefined

VERSION?=3.3.0.dev0
SPARK_VERSION?=3.2.1

ifeq (dev, $(findstring dev, $(VERSION)))
    TAG:=dev
else
    TAG:=$(VERSION)
endif


WHEEL_FILES:=$(shell find . -type f ! -path "./build/*" ! -path "./etc/*" ! -path "./docs/*" ! -path "./.git/*" ! -path "./.idea/*" ! -path "./dist/*" ! -path "./.image-*" ! -path "*/__pycache__/*" )
WHEEL_FILE:=dist/jupyter_enterprise_gateway-$(VERSION)-py3-none-any.whl
SDIST_FILE:=dist/jupyter_enterprise_gateway-$(VERSION).tar.gz
DIST_FILES=$(WHEEL_FILE) $(SDIST_FILE)

HELM_DESIRED_VERSION:=v3.18.3  # Pin the version of helm to use (v3.18.3 is latest as of 6/21/25)
HELM_CHART_VERSION:=$(shell grep version: etc/kubernetes/helm/enterprise-gateway/Chart.yaml | sed 's/version: //')
HELM_CHART_PACKAGE:=dist/enterprise-gateway-$(HELM_CHART_VERSION).tgz
HELM_CHART:=dist/jupyter_enterprise_gateway_helm-$(VERSION).tar.gz
HELM_CHART_DIR:=etc/kubernetes/helm/enterprise-gateway
HELM_CHART_FILES:=$(shell find $(HELM_CHART_DIR) -type f ! -name .DS_Store)
HELM_INSTALL_DIR?=/usr/local/bin

help:
# http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
	@grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'

env: ## Make a dev environment
	-conda env create --file requirements.yml --name $(ENV)
	-conda env config vars set PYTHONPATH=$(PWD) --name $(ENV)

activate: ## Print instructions to activate the virtualenv (default: enterprise-gateway-dev)
	@echo "Run \`$(SA) $(ENV)\` to activate the environment."

clean: ## Make a clean source tree
	-rm -rf dist
	-rm -rf build
	-rm -rf *.egg-info
	-find . -name target -type d -exec rm -fr {} +
	-find . -name __pycache__  -type d -exec rm -fr {} +
	-find enterprise_gateway -name '*.pyc' -exec rm -fr {} +
	-find website -name '.sass-cache' -type d -exec rm -fr {} +
	-find website -name '_site' -type d -exec rm -fr {} +
	-find website -name 'build' -type d -exec rm -fr {} +
	-make -C docs clean
	-make -C etc clean

clean-env: ## Remove conda env
	-conda env remove -n $(ENV) -y

lint: ## Check code style
	@pip install -q -e ".[lint]"
	@pip install -q pipx
	ruff check .
	black --check --diff --color .
	mdformat --check *.md
	pipx run 'validate-pyproject[all]' pyproject.toml
	pipx run interrogate -v .

run-dev: test-install-wheel ## Make a server in jupyter_websocket mode
	python enterprise_gateway

docs: ## Make HTML documentation
	make -C docs requirements html SPHINXOPTS="-W"

kernelspecs:  kernelspecs_all kernelspecs_yarn kernelspecs_conductor kernelspecs_kubernetes kernelspecs_docker kernel_image_files ## Create archives with sample kernelspecs
kernelspecs_all kernelspecs_yarn kernelspecs_conductor kernelspecs_kubernetes kernelspecs_docker kernel_image_files:
	make VERSION=$(VERSION) TAG=$(TAG) SPARK_VERSION=$(SPARK_VERSION) -C  etc $@

test-install: dist test-install-wheel test-install-tar ## Install and minimally run EG with the wheel and tar distributions

test-install-wheel:
	pip uninstall -y jupyter_enterprise_gateway
	pip install dist/jupyter_enterprise_gateway-*.whl && \
		jupyter enterprisegateway --help

test-install-tar:
	pip uninstall -y jupyter_enterprise_gateway
	pip install dist/jupyter_enterprise_gateway-*.tar.gz && \
		jupyter enterprisegateway --help

bdist: $(WHEEL_FILE)

$(WHEEL_FILE): $(WHEEL_FILES)
	pip install build && python -m build --wheel . \
		&& rm -rf *.egg-info && chmod 0755 dist/*.*

sdist: $(SDIST_FILE)

$(SDIST_FILE): $(WHEEL_FILES)
	pip install build && python -m build --sdist . \
		&& rm -rf *.egg-info && chmod 0755 dist/*.*

helm-chart: helm-install $(HELM_CHART) ## Make helm chart distribution

helm-install: $(HELM_INSTALL_DIR)/helm

$(HELM_INSTALL_DIR)/helm: # Download and install helm
	curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 -o /tmp/get_helm.sh \
	&& chmod +x /tmp/get_helm.sh \
	&& DESIRED_VERSION=$(HELM_DESIRED_VERSION) /tmp/get_helm.sh \
	&& rm -f /tmp/get_helm.sh

helm-lint: helm-clean
	helm lint $(HELM_CHART_DIR)

helm-clean: # Remove any .DS_Store files that might wind up in the package
	$(shell find etc/kubernetes/helm -type f -name '.DS_Store' -exec rm -f {} \;)

$(HELM_CHART): $(HELM_CHART_FILES)
	make helm-lint
	helm package $(HELM_CHART_DIR) -d dist
	mv $(HELM_CHART_PACKAGE) $(HELM_CHART)  # Rename output to match other assets

dist: lint bdist sdist kernelspecs helm-chart ## Make source, binary, kernelspecs and helm chart distributions to dist folder

TEST_DEBUG_OPTS:=

test-debug:
	make TEST_DEBUG_OPTS="--nocapture --nologcapture --logging-level=10" test

test: TEST?=
test: ## Run unit tests
ifeq ($(TEST),)
	pytest -vv $(TEST_DEBUG_OPTS)
else
# e.g., make test TEST="test_gatewayapp.py::TestGatewayAppConfig"
	pytest -vv $(TEST_DEBUG_OPTS) enterprise_gateway/tests/$(TEST)
endif

release: dist check_dists ## Make a wheel + source release on PyPI
	twine upload $(DIST_FILES)

check_dists:
	pip install twine && twine check --strict $(DIST_FILES)

# Here for doc purposes
docker-images:  ## Build docker images (includes kernel-based images)
kernel-images: ## Build kernel-based docker images

# Actual working targets...
docker-images: demo-base enterprise-gateway-demo kernel-images enterprise-gateway kernel-py kernel-spark-py kernel-r kernel-spark-r kernel-scala kernel-tf-py kernel-tf-gpu-py kernel-image-puller

enterprise-gateway-demo kernel-images enterprise-gateway kernel-py kernel-spark-py kernel-r kernel-spark-r kernel-scala kernel-tf-py kernel-tf-gpu-py kernel-image-puller:
	make WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) NO_CACHE=$(NO_CACHE) TAG=$(TAG) SPARK_VERSION=$(SPARK_VERSION) MULTIARCH_BUILD=$(MULTIARCH_BUILD) TARGET_ARCH=$(TARGET_ARCH) -C etc $@

demo-base:
	make WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) NO_CACHE=$(NO_CACHE) TAG=$(SPARK_VERSION) SPARK_VERSION=$(SPARK_VERSION) MULTIARCH_BUILD=$(MULTIARCH_BUILD) TARGET_ARCH=$(TARGET_ARCH) -C etc $@

# Here for doc purposes
clean-images: clean-demo-base ## Remove docker images (includes kernel-based images)
clean-kernel-images: ## Remove kernel-based images

clean-images clean-enterprise-gateway-demo clean-kernel-images clean-enterprise-gateway clean-kernel-py clean-kernel-spark-py clean-kernel-r clean-kernel-spark-r clean-kernel-scala clean-kernel-tf-py clean-kernel-tf-gpu-py clean-kernel-image-puller:
	make WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) TAG=$(TAG) -C etc $@

clean-demo-base:
	make WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) TAG=$(SPARK_VERSION) -C etc $@

push-images: push-demo-base
push-images push-enterprise-gateway-demo push-kernel-images push-enterprise-gateway push-kernel-py push-kernel-spark-py push-kernel-r push-kernel-spark-r push-kernel-scala push-kernel-tf-py push-kernel-tf-gpu-py push-kernel-image-puller:
	make WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) TAG=$(TAG) -C etc $@

push-demo-base:
	make WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) TAG=$(SPARK_VERSION) -C etc $@

publish: NO_CACHE=--no-cache
publish: clean clean-images dist docker-images push-images

# itest should have these targets up to date: bdist kernelspecs docker-enterprise-gateway

itest: itest-docker itest-yarn

# itest configurable settings
# indicates two things:
# this prefix is used by itest to determine hostname to test against, in addtion,
# if itests will be run locally with docker-prep target, this will set the hostname within that container as well
ITEST_HOSTNAME_PREFIX?=itest

# indicates the user to emulate.  This equates to 'KERNEL_USERNAME'...
ITEST_USER?=bob
# indicates the other set of options to use.  At this time, only the python notebooks succeed, so we're skipping R and Scala.
ITEST_OPTIONS?=

# here's an example of the options (besides host and user) with their expected values ...
# ITEST_OPTIONS=--impersonation < True | False >

ITEST_YARN_PORT?=8888
ITEST_YARN_HOST?=localhost:$(ITEST_YARN_PORT)
ITEST_YARN_TESTS?=enterprise_gateway/itests

ITEST_KERNEL_LAUNCH_TIMEOUT=120

LOG_LEVEL=INFO

itest-yarn-debug: ## Run integration tests (optionally) against docker demo (YARN) container with print statements
	make LOG_LEVEL=DEBUG TEST_DEBUG_OPTS="--log-level=10" itest-yarn

PREP_ITEST_YARN?=1
itest-yarn: ## Run integration tests (optionally) against docker demo (YARN) container
ifeq (1, $(PREP_ITEST_YARN))
	make itest-yarn-prep
endif
	(GATEWAY_HOST=$(ITEST_YARN_HOST) LOG_LEVEL=$(LOG_LEVEL) KERNEL_USERNAME=$(ITEST_USER) KERNEL_LAUNCH_TIMEOUT=$(ITEST_KERNEL_LAUNCH_TIMEOUT) SPARK_VERSION=$(SPARK_VERSION) ITEST_HOSTNAME_PREFIX=$(ITEST_HOSTNAME_PREFIX) pytest -vv $(TEST_DEBUG_OPTS) $(ITEST_YARN_TESTS))
	@echo "Run \`docker logs itest-yarn\` to see enterprise-gateway log."

PREP_TIMEOUT?=60
itest-yarn-prep:
	@-docker rm -f itest-yarn >> /dev/null
	@echo "Starting enterprise-gateway container (run \`docker logs itest-yarn\` to see container log)..."
	@-docker run -itd -p $(ITEST_YARN_PORT):$(ITEST_YARN_PORT) -p 8088:8088 -p 8042:8042 -h itest-yarn --name itest-yarn -v `pwd`/enterprise_gateway/itests:/tmp/byok elyra/enterprise-gateway-demo:$(TAG) --gateway
	@(r="1"; attempts=0; while [ "$$r" == "1" -a $$attempts -lt $(PREP_TIMEOUT) ]; do echo "Waiting for enterprise-gateway to start..."; sleep 2; ((attempts++)); docker logs itest-yarn |grep --regexp "Jupyter Enterprise Gateway .* is available at http"; r=$$?; done; if [ $$attempts -ge $(PREP_TIMEOUT) ]; then echo "Wait for startup timed out!"; exit 1; fi;)


# This should get cleaned up once docker support is more mature
ITEST_DOCKER_PORT?=8889
ITEST_DOCKER_HOST?=localhost:$(ITEST_DOCKER_PORT)
ITEST_DOCKER_TESTS?=enterprise_gateway/itests/test_r_kernel.py::TestRKernelLocal enterprise_gateway/itests/test_python_kernel.py::TestPythonKernelLocal enterprise_gateway/itests/test_scala_kernel.py::TestScalaKernelLocal
ITEST_DOCKER_KERNELS=PYTHON_KERNEL_LOCAL_NAME=python_docker SCALA_KERNEL_LOCAL_NAME=scala_docker R_KERNEL_LOCAL_NAME=R_docker

itest-docker-debug: ## Run integration tests (optionally) against docker container with print statements
	make LOG_LEVEL=DEBUG TEST_DEBUG_OPTS="--nocapture --nologcapture --logging-level=10" itest-docker

PREP_ITEST_DOCKER?=1
itest-docker: ## Run integration tests (optionally) against docker swarm
ifeq (1, $(PREP_ITEST_DOCKER))
	make itest-docker-prep
endif
	(GATEWAY_HOST=$(ITEST_DOCKER_HOST) LOG_LEVEL=$(LOG_LEVEL) KERNEL_USERNAME=$(ITEST_USER) KERNEL_LAUNCH_TIMEOUT=$(ITEST_KERNEL_LAUNCH_TIMEOUT) $(ITEST_DOCKER_KERNELS) ITEST_HOSTNAME_PREFIX=$(ITEST_USER) pytest -vv $(TEST_DEBUG_OPTS) $(ITEST_DOCKER_TESTS))
	@echo "Run \`docker service logs itest-docker\` to see enterprise-gateway log."

PREP_TIMEOUT?=180
itest-docker-prep:
	@-docker service rm enterprise-gateway_enterprise-gateway enterprise-gateway_enterprise-gateway-proxy
	@-docker swarm leave --force
	# Check if swarm mode is active, if not attempt to create the swarm
	@(docker info | grep -q 'Swarm: active'; if [ $$? -eq 1 ]; then docker swarm init; fi;)
	@echo "Starting enterprise-gateway swarm service (run \`docker service logs enterprise-gateway_enterprise-gateway\` to see service log)..."
	@KG_PORT=${ITEST_DOCKER_PORT} EG_DOCKER_NETWORK=enterprise-gateway docker stack deploy -c etc/docker/docker-compose.yml enterprise-gateway
	@(r="1"; attempts=0; while [ "$$r" == "1" -a $$attempts -lt $(PREP_TIMEOUT) ]; do echo "Waiting for enterprise-gateway to start..."; sleep 2; ((attempts++)); docker service logs enterprise-gateway_enterprise-gateway 2>&1 |grep --regexp "Jupyter Enterprise Gateway .* is available at http"; r=$$?; done; if [ $$attempts -ge $(PREP_TIMEOUT) ]; then echo "Wait for startup timed out!"; exit 1; fi;)


================================================
FILE: README.md
================================================
**[Website](https://jupyter-enterprise-gateway.readthedocs.io/)** |
**[Technical Overview](#technical-overview)** |
**[Installation](#installation)** |
**[System Architecture](#system-architecture)** |
**[Contributing](#contributing)**

# Jupyter Enterprise Gateway

[![Actions Status](https://github.com/jupyter-server/enterprise_gateway/workflows/Builds/badge.svg)](https://github.com/jupyter-server/enterprise_gateway/actions)
[![PyPI version](https://badge.fury.io/py/jupyter-enterprise-gateway.svg)](https://badge.fury.io/py/jupyter-enterprise-gateway)
[![Downloads](https://pepy.tech/badge/jupyter-enterprise-gateway/month)](https://pepy.tech/project/jupyter-enterprise-gateway)
[![Documentation Status](https://readthedocs.org/projects/jupyter-enterprise-gateway/badge/?version=latest)](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/?badge=latest)
[![Google Group](https://img.shields.io/badge/google-group-blue.svg)](https://groups.google.com/forum/#!forum/jupyter)

Jupyter Enterprise Gateway enables Jupyter Notebook to launch remote kernels in a distributed cluster,
including Apache Spark managed by YARN, IBM Spectrum Conductor, Kubernetes or Docker Swarm.

It provides out of the box support for the following kernels:

- Python using IPython kernel
- R using IRkernel
- Scala using Apache Toree kernel

Full Documentation for Jupyter Enterprise Gateway can be found [here](https://jupyter-enterprise-gateway.readthedocs.io/en/latest)

Jupyter Enterprise Gateway does not manage multiple Jupyter Notebook deployments, for that
you should use [JupyterHub](https://github.com/jupyterhub/jupyterhub).

## Technical Overview

Jupyter Enterprise Gateway is a web server that provides headless access to Jupyter kernels within
an enterprise. Inspired by Jupyter Kernel Gateway, Jupyter Enterprise Gateway provides feature parity with Kernel Gateway's [jupyter-websocket mode](https://jupyter-kernel-gateway.readthedocs.io/en/latest/websocket-mode.html) in addition to the following:

- Adds support for remote kernels hosted throughout the enterprise where kernels can be launched in
  the following ways:
  - Local to the Enterprise Gateway server (today's Kernel Gateway behavior)
  - On specific nodes of the cluster utilizing a round-robin algorithm
  - On nodes identified by an associated resource manager
- Provides support for Apache Spark managed by YARN, IBM Spectrum Conductor, Kubernetes or Docker Swarm out of the box. Others can be configured via Enterprise Gateway's extensible framework.
- Secure communication from the client, through the Enterprise Gateway server, to the kernels
- Multi-tenant capabilities
- Persistent kernel sessions
- Ability to associate profiles consisting of configuration settings to a kernel for a given user (see [Project Roadmap](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/roadmap.html))

![Deployment Diagram](https://github.com/jupyter-server/enterprise_gateway/blob/main/docs/source/images/deployment.png?raw=true)

## Installation

Detailed installation instructions are located in the
[Users Guide](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/users/index.html)
of the project docs. Here's a quick start using `pip`:

```bash
# install from pypi
pip install --upgrade jupyter_enterprise_gateway

# show all config options
jupyter enterprisegateway --help-all

# run it with default options
jupyter enterprisegateway
```

Please check the [configuration options within the Operators Guide](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/operators/index.html#configuring-enterprise-gateway)
for information about the supported options.

## System Architecture

The [System Architecture page](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/system-architecture.html)
includes information about Enterprise Gateway's remote kernel, process proxy, and launcher frameworks.

## Contributing

The [Contribution page](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/contrib.html) includes
information about how to contribute to Enterprise Gateway along with our roadmap. While there, you'll want to
[set up a development environment](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/devinstall.html) and check out typical developer tasks.


================================================
FILE: codecov.yml
================================================
codecov:
  notify:
    require_ci_to_pass: yes

coverage:
  precision: 2
  round: down
  range: "70...100"

  status:
    project: no
    patch: no
    changes: no

parsers:
  gcov:
    branch_detection:
      conditional: yes
      loop: yes
      method: no
      macro: no

comment: off


================================================
FILE: conftest.py
================================================
def pytest_addoption(parser):
    parser.addoption("--host", action="store", default="localhost:8888")
    parser.addoption("--username", action="store", default="elyra")
    parser.addoption("--impersonation", action="store", default="false")


def pytest_generate_tests(metafunc):
    # This is called for every test. Only get/set command line arguments
    # if the argument is specified in the list of test "fixturenames".
    if "host" in metafunc.fixturenames:
        metafunc.parametrize("host", [metafunc.config.option.host])
    if "username" in metafunc.fixturenames:
        metafunc.parametrize("username", [metafunc.config.option.username])
    if "impersonation" in metafunc.fixturenames:
        metafunc.parametrize("impersonation", [metafunc.config.option.impersonation])


================================================
FILE: docs/Makefile
================================================
# Makefile for Sphinx documentation
#

# You can set these variables from the command line.
SPHINXOPTS    = -n
SPHINXBUILD   = sphinx-build
PAPER         =
BUILDDIR      = build

# Internal variables.
PAPEROPT_a4     = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source

DOC_REQUIREMENTS = doc-requirements.txt

.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext requirements

help:
	@echo "Please use \`make <target>' where <target> is one of"
	@echo "  requirements to install required packages"
	@echo "  html         to make standalone HTML files"
	@echo "  dirhtml      to make HTML files named index.html in directories"
	@echo "  singlehtml   to make a single large HTML file"
	@echo "  pickle       to make pickle files"
	@echo "  json         to make JSON files"
	@echo "  htmlhelp     to make HTML files and a HTML help project"
	@echo "  qthelp       to make HTML files and a qthelp project"
	@echo "  applehelp    to make an Apple Help Book"
	@echo "  devhelp      to make HTML files and a Devhelp project"
	@echo "  epub         to make an epub"
	@echo "  latex        to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
	@echo "  latexpdf     to make LaTeX files and run them through pdflatex"
	@echo "  latexpdfja   to make LaTeX files and run them through platex/dvipdfmx"
	@echo "  text         to make text files"
	@echo "  man          to make manual pages"
	@echo "  texinfo      to make Texinfo files"
	@echo "  info         to make Texinfo files and run them through makeinfo"
	@echo "  gettext      to make PO message catalogs"
	@echo "  changes      to make an overview of all changed/added/deprecated items"
	@echo "  xml          to make Docutils-native XML files"
	@echo "  pseudoxml    to make pseudoxml-XML files for display purposes"
	@echo "  linkcheck    to check all external links for integrity"
	@echo "  doctest      to run all doctests embedded in the documentation (if enabled)"
	@echo "  coverage     to run coverage check of the documentation (if enabled)"

clean:
	rm -rf $(BUILDDIR)/*

requirements:
	pip install -q -r $(DOC_REQUIREMENTS)

html:
	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
	@echo
	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."

dirhtml:
	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
	@echo
	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."

singlehtml:
	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
	@echo
	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."

pickle:
	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
	@echo
	@echo "Build finished; now you can process the pickle files."

json:
	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
	@echo
	@echo "Build finished; now you can process the JSON files."

htmlhelp:
	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
	@echo
	@echo "Build finished; now you can run HTML Help Workshop with the" \
	      ".hhp project file in $(BUILDDIR)/htmlhelp."

qthelp:
	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
	@echo
	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/JupyterHub.qhcp"
	@echo "To view the help file:"
	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/JupyterHub.qhc"

applehelp:
	$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
	@echo
	@echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
	@echo "N.B. You won't be able to view it unless you put it in" \
	      "~/Library/Documentation/Help or install it in your application" \
	      "bundle."

devhelp:
	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
	@echo
	@echo "Build finished."
	@echo "To view the help file:"
	@echo "# mkdir -p $$HOME/.local/share/devhelp/JupyterHub"
	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/JupyterHub"
	@echo "# devhelp"

epub:
	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
	@echo
	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."

latex:
	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
	@echo
	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
	@echo "Run \`make' in that directory to run these through (pdf)latex" \
	      "(use \`make latexpdf' here to do that automatically)."

latexpdf:
	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
	@echo "Running LaTeX files through pdflatex..."
	$(MAKE) -C $(BUILDDIR)/latex all-pdf
	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."

latexpdfja:
	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
	@echo "Running LaTeX files through platex and dvipdfmx..."
	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."

text:
	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
	@echo
	@echo "Build finished. The text files are in $(BUILDDIR)/text."

man:
	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
	@echo
	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."

texinfo:
	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
	@echo
	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
	@echo "Run \`make' in that directory to run these through makeinfo" \
	      "(use \`make info' here to do that automatically)."

info:
	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
	@echo "Running Texinfo files through makeinfo..."
	make -C $(BUILDDIR)/texinfo info
	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."

gettext:
	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
	@echo
	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."

changes:
	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
	@echo
	@echo "The overview file is in $(BUILDDIR)/changes."

linkcheck:
	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
	@echo
	@echo "Link check complete; look for any errors in the above output " \
	      "or in $(BUILDDIR)/linkcheck/output.txt."

doctest:
	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
	@echo "Testing of doctests in the sources finished, look at the " \
	      "results in $(BUILDDIR)/doctest/output.txt."

coverage:
	$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
	@echo "Testing of coverage in the sources finished, look at the " \
	      "results in $(BUILDDIR)/coverage/python.txt."

xml:
	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
	@echo
	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."

pseudoxml:
	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
	@echo
	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."


================================================
FILE: docs/doc-requirements.txt
================================================
# https://github.com/miyakogi/m2r/issues/66
mistune<4
myst-parser
pydata_sphinx_theme
sphinx
sphinx-markdown-tables
sphinx_book_theme
sphinxcontrib-mermaid
sphinxcontrib-openapi
sphinxcontrib_github_alt
sphinxcontrib_spelling
sphinxemoji
tornado


================================================
FILE: docs/environment.yml
================================================
name: enterprise_gateway_docs
channels:
  - conda-forge
  - defaults
  - free
dependencies:
  - pip
  - python=3.11
  - pip:
      - -r doc-requirements.txt


================================================
FILE: docs/make.bat
================================================
@ECHO OFF

REM Command file for Sphinx documentation

if "%SPHINXBUILD%" == "" (
	set SPHINXBUILD=sphinx-build
)
set BUILDDIR=build
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source
set I18NSPHINXOPTS=%SPHINXOPTS% source
if NOT "%PAPER%" == "" (
	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
	set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
)

if "%1" == "" goto help

if "%1" == "help" (
	:help
	echo.Please use `make ^<target^>` where ^<target^> is one of
	echo.  html       to make standalone HTML files
	echo.  dirhtml    to make HTML files named index.html in directories
	echo.  singlehtml to make a single large HTML file
	echo.  pickle     to make pickle files
	echo.  json       to make JSON files
	echo.  htmlhelp   to make HTML files and a HTML help project
	echo.  qthelp     to make HTML files and a qthelp project
	echo.  devhelp    to make HTML files and a Devhelp project
	echo.  epub       to make an epub
	echo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter
	echo.  text       to make text files
	echo.  man        to make manual pages
	echo.  texinfo    to make Texinfo files
	echo.  gettext    to make PO message catalogs
	echo.  changes    to make an overview over all changed/added/deprecated items
	echo.  xml        to make Docutils-native XML files
	echo.  pseudoxml  to make pseudoxml-XML files for display purposes
	echo.  linkcheck  to check all external links for integrity
	echo.  doctest    to run all doctests embedded in the documentation if enabled
	echo.  coverage   to run coverage check of the documentation if enabled
	goto end
)

if "%1" == "clean" (
	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
	del /q /s %BUILDDIR%\*
	goto end
)


REM Check if sphinx-build is available and fallback to Python version if any
%SPHINXBUILD% 1>NUL 2>NUL
if errorlevel 9009 goto sphinx_python
goto sphinx_ok

:sphinx_python

set SPHINXBUILD=python -m sphinx.__init__
%SPHINXBUILD% 2> nul
if errorlevel 9009 (
	echo.
	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
	echo.installed, then set the SPHINXBUILD environment variable to point
	echo.to the full path of the 'sphinx-build' executable. Alternatively you
	echo.may add the Sphinx directory to PATH.
	echo.
	echo.If you don't have Sphinx installed, grab it from
	echo.https://sphinx-doc.org/
	exit /b 1
)

:sphinx_ok


if "%1" == "html" (
	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
	if errorlevel 1 exit /b 1
	echo.
	echo.Build finished. The HTML pages are in %BUILDDIR%/html.
	goto end
)

if "%1" == "dirhtml" (
	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
	if errorlevel 1 exit /b 1
	echo.
	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
	goto end
)

if "%1" == "singlehtml" (
	%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
	if errorlevel 1 exit /b 1
	echo.
	echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
	goto end
)

if "%1" == "pickle" (
	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
	if errorlevel 1 exit /b 1
	echo.
	echo.Build finished; now you can process the pickle files.
	goto end
)

if "%1" == "json" (
	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
	if errorlevel 1 exit /b 1
	echo.
	echo.Build finished; now you can process the JSON files.
	goto end
)

if "%1" == "htmlhelp" (
	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
	if errorlevel 1 exit /b 1
	echo.
	echo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %BUILDDIR%/htmlhelp.
	goto end
)

if "%1" == "qthelp" (
	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
	if errorlevel 1 exit /b 1
	echo.
	echo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %BUILDDIR%/qthelp, like this:
	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\JupyterHub.qhcp
	echo.To view the help file:
	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\JupyterHub.ghc
	goto end
)

if "%1" == "devhelp" (
	%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
	if errorlevel 1 exit /b 1
	echo.
	echo.Build finished.
	goto end
)

if "%1" == "epub" (
	%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
	if errorlevel 1 exit /b 1
	echo.
	echo.Build finished. The epub file is in %BUILDDIR%/epub.
	goto end
)

if "%1" == "latex" (
	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
	if errorlevel 1 exit /b 1
	echo.
	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
	goto end
)

if "%1" == "latexpdf" (
	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
	cd %BUILDDIR%/latex
	make all-pdf
	cd %~dp0
	echo.
	echo.Build finished; the PDF files are in %BUILDDIR%/latex.
	goto end
)

if "%1" == "latexpdfja" (
	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
	cd %BUILDDIR%/latex
	make all-pdf-ja
	cd %~dp0
	echo.
	echo.Build finished; the PDF files are in %BUILDDIR%/latex.
	goto end
)

if "%1" == "text" (
	%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
	if errorlevel 1 exit /b 1
	echo.
	echo.Build finished. The text files are in %BUILDDIR%/text.
	goto end
)

if "%1" == "man" (
	%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
	if errorlevel 1 exit /b 1
	echo.
	echo.Build finished. The manual pages are in %BUILDDIR%/man.
	goto end
)

if "%1" == "texinfo" (
	%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
	if errorlevel 1 exit /b 1
	echo.
	echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
	goto end
)

if "%1" == "gettext" (
	%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
	if errorlevel 1 exit /b 1
	echo.
	echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
	goto end
)

if "%1" == "changes" (
	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
	if errorlevel 1 exit /b 1
	echo.
	echo.The overview file is in %BUILDDIR%/changes.
	goto end
)

if "%1" == "linkcheck" (
	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
	if errorlevel 1 exit /b 1
	echo.
	echo.Link check complete; look for any errors in the above output ^
or in %BUILDDIR%/linkcheck/output.txt.
	goto end
)

if "%1" == "doctest" (
	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
	if errorlevel 1 exit /b 1
	echo.
	echo.Testing of doctests in the sources finished, look at the ^
results in %BUILDDIR%/doctest/output.txt.
	goto end
)

if "%1" == "coverage" (
	%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage
	if errorlevel 1 exit /b 1
	echo.
	echo.Testing of coverage in the sources finished, look at the ^
results in %BUILDDIR%/coverage/python.txt.
	goto end
)

if "%1" == "xml" (
	%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
	if errorlevel 1 exit /b 1
	echo.
	echo.Build finished. The XML files are in %BUILDDIR%/xml.
	goto end
)

if "%1" == "pseudoxml" (
	%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
	if errorlevel 1 exit /b 1
	echo.
	echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
	goto end
)

:end


================================================
FILE: docs/source/_static/custom.css
================================================
body div.sphinxsidebarwrapper p.logo {
  text-align: left;
}
.mermaid svg {
  height: 100%;
}


================================================
FILE: docs/source/conf.py
================================================
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.

import os

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))

# -- General configuration ------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "3.0"

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
    "myst_parser",
    "sphinx.ext.autodoc",
    "sphinx.ext.doctest",
    "sphinx.ext.intersphinx",
    "sphinx.ext.autosummary",
    "sphinx.ext.mathjax",
    "sphinxcontrib_github_alt",
    "sphinxcontrib.mermaid",
    "sphinxcontrib.openapi",
    "sphinxemoji.sphinxemoji",
]

try:
    import enchant  # noqa

    extensions += ["sphinxcontrib.spelling"]
except ImportError:
    pass

myst_enable_extensions = ["html_image"]
myst_heading_anchors = 4  # Needs to be 4 or higher

# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]

# The suffix(es) of source filenames.
source_suffix = {
    ".rst": "restructuredtext",
    ".txt": "markdown",
    ".md": "markdown",
}

# The encoding of source files.
# source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = "index"

# General information about the project.
project = "Jupyter Enterprise Gateway"
copyright = "2022, Project Jupyter"  # noqa
author = "Jupyter Server Team"

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
_version_py = os.path.join("..", "..", "enterprise_gateway", "_version.py")
version_ns = {}

with open(_version_py) as version_file:
    exec(version_file.read(), version_ns)  # noqa

# The short X.Y version.
version = version_ns["__version__"][:3]
# The full version, including alpha/beta/rc tags.
release = version_ns["__version__"]

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]

# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None

# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True

# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True

# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False

# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "default"

# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []

# If true, keep warnings as "system message" paragraphs in the built documents.
# eep_warnings = False

# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False


# -- Options for HTML output ----------------------------------------------

# The theme to use for HTML and HTML Help pages.  See the documentation for
# a list of builtin themes.
html_theme = "pydata_sphinx_theme"
# html_theme = "sphinx_book_theme"
html_logo = "_static/jupyter-logo.png"

# Theme options are theme-specific and customize the look and feel of a theme
# further.  For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
#   'logo_only': html_logo
#     'description': "Enterprise Gateway",
#     'fixed_sidebar': False,
#     'show_relbars': True,
#     'github_user': 'jupyter',
#     'github_repo': 'enterprise_gateway',
#     'github_type': 'star',
#     'logo': 'jupyter-logo.png',
#     'logo_text_align': 'left',
#     'analytics_id': 'UA-130853690-1',


# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []

# The name for this set of Sphinx documents.  If None, it defaults to
# "<project> v<release> documentation".
# html_title = None

# A shorter title for the navigation bar.  Default is the same as html_title.
# html_short_title = None

# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None

# The name of an image file (within the static path) to use as favicon of the
# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]

# These paths are either relative to html_static_path
# or fully qualified paths (eg. https://...)
html_css_files = [
    "custom.css",
]

# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []

# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'

# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True

# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}

# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}

# If false, no module index is generated.
# html_domain_indices = True

# If false, no index is generated.
# html_use_index = True

# If true, the index is split into individual pages for each letter.
# html_split_index = False

# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True

# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True

# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True

# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.  The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''

# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None

# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
#   'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
#   'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'

# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}

# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'

# Output file base name for HTML help builder.
htmlhelp_basename = "EnterpriseGatewaydoc"

# -- Options for LaTeX output ---------------------------------------------

latex_elements = {
    # The paper size ('letterpaper' or 'a4paper').
    # 'papersize': 'letterpaper',
    # The font size ('10pt', '11pt' or '12pt').
    # 'pointsize': '10pt',
    # Additional stuff for the LaTeX preamble.
    # 'preamble': '',
    # Latex figure (float) alignment
    # 'figure_align': 'htbp',
}

# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
#  author, documentclass [howto, manual, or own class]).
latex_documents = [
    (
        master_doc,
        "EnterpriseGateway.tex",
        "Enterprise Gateway Documentation",
        "https://jupyter.org",
        "manual",
    ),
]

# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None

# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False

# If true, show page references after internal links.
# latex_show_pagerefs = False

# If true, show URL addresses after external links.
# latex_show_urls = False

# Documents to append as an appendix to all manuals.
# latex_appendices = []

# If false, no module index is generated.
# latex_domain_indices = True


# -- Options for manual page output ---------------------------------------

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "enterprise_gateway", "Enterprise Gateway Documentation", [author], 1)]

# If true, show URL addresses after external links.
# man_show_urls = False


# -- Options for Texinfo output -------------------------------------------

# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
#  dir menu entry, description, category)
texinfo_documents = [
    (
        master_doc,
        "enterprise_gateway",
        "Enterprise Gateway Documentation",
        author,
        "EnterpriseGateway",
        "One line description of project.",
        "Miscellaneous",
    ),
]

# Documents to append as an appendix to all manuals.
# texinfo_appendices = []

# If false, no module index is generated.
# texinfo_domain_indices = True

# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'

# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False


# -- Options for Epub output ----------------------------------------------

# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright

# The basename for the epub file. It defaults to the project name.
# epub_basename = project

# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
# epub_theme = 'epub'

# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
# epub_language = ''

# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''

# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''

# A unique identification for the text.
# epub_uid = ''

# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()

# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()

# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []

# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []

# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]

# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3

# Allow duplicate toc entries.
# epub_tocdup = True

# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'

# Fix unsupported image types using the Pillow.
# epub_fix_images = False

# Scale large images.
# epub_max_image_width = 0

# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'

# If false, no index is generated.
# epub_use_index = True


# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
    "python": ("https://docs.python.org/", None),
    "ipython": ("https://ipython.readthedocs.io/en/stable/", None),
    "jupyter": ("https://jupyter.readthedocs.io/en/latest/", None),
}

spelling_lang = "en_US"
spelling_word_list_filename = "spelling_wordlist.txt"

# Read The Docs
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get("READTHEDOCS", None) == "True"

# if not on_rtd:  # only import and set the theme if we're building docs locally
#    import sphinx_rtd_theme
#    html_theme = 'alabaster'
#    html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]

# otherwise, readthedocs.org uses their theme by default, so no need to specify it


================================================
FILE: docs/source/contributors/contrib.md
================================================
# Contributing to Jupyter Enterprise Gateway

Thank you for your interest in Jupyter Enterprise Gateway! If you would like to contribute to the
project please first take a look at the
[Project Jupyter Contributor Documentation](https://jupyter.readthedocs.io/en/latest/contributing/content-contributor.html).

Enterprise Gateway has recently joined the [Jupyter Server organization](https://github.com/jupyter-server). Please check out our [team compass page](https://github.com/jupyter-server/team-compass#jupyter-server-team-compass) and try to attend our weekly dev meeting as we have a common goal of making all Jupyter server-side applications better!

Prior to your contribution, we strongly recommend getting acquainted with Enterprise Gateway by checking
out the [System Architecture](system-architecture.md) and [Development Workflow](devinstall.md) pages.


================================================
FILE: docs/source/contributors/debug.md
================================================
# Debugging Jupyter Enterprise Gateway

This page discusses how to go about debugging Enterprise Gateway. We also provide troubleshooting information
in our [Troubleshooting Guide](../other/troubleshooting.md).

## Configuring your IDE

While your mileage may vary depending on which IDE you are using, the steps below (using PyCharm as an example) should be useful for configuring a debugging session for Enterprise Gateway with minimum
adjustments for different IDEs.

### Creating a new Debug Configuration

Go to Run->Edit Configuration and create a new python configuration with the following settings:

![Enterprise Gateway debug configuration](../images/debug_configuration.png)

**Script Path:**

```bash
/Users/jovyan/opensource/jupyter/elyra/scripts/jupyter-enterprisegateway
```

**Parameters:**

```bash
--ip=0.0.0.0
--log-level=DEBUG
--EnterpriseGatewayApp.yarn_endpoint=“http://elyra-fyi-node-1.fyre.ibm.com:8088/ws/v1/cluster”
--EnterpriseGatewayApp.remote_hosts=['localhost']
```

**Environment Variables:**

```bash
EG_ENABLE_TUNNELING=False
```

**Working Directory:**

```bash
/Users/jovyan/opensource/jupyter/elyra/scripts
```

### Running in debug mode

Now that you have handled the necessary configuration, use Run-Debug and select the debug configuration
you just created and happy debugging!


================================================
FILE: docs/source/contributors/devinstall.md
================================================
# Development Workflow

Here are instructions for setting up a development environment for the [Jupyter Enterprise Gateway](https://github.com/jupyter-server/enterprise_gateway)
server. It also includes common steps in the developer workflow such as building Enterprise Gateway,
running tests, building docs, packaging kernel specifications, etc.

## Prerequisites

Install GNU make on your system.

## Clone the repo

Clone this repository into a local directory.

```bash
# make a directory under your HOME directory to put the source code
mkdir -p ~/projects
cd !$

# clone this repo
git clone https://github.com/jupyter-server/enterprise_gateway.git
```

## Make

Enterprise Gateway's build environment is centered around `make` and the corresponding [`Makefile`](https://github.com/jupyter-server/enterprise_gateway/blob/main/Makefile).

Entering `make` with no parameters yields the following:

```
activate                       Print instructions to activate the virtualenv (default: enterprise-gateway-dev)
clean-env                      Remove conda env
clean-images                   Remove docker images (includes kernel-based images)
clean-kernel-images            Remove kernel-based images
clean                          Make a clean source tree
dist                           Make source, binary, kernelspecs and helm chart distributions to dist folder
docker-images                  Build docker images (includes kernel-based images)
docs                           Make HTML documentation
env                            Make a dev environment
helm-chart                     Make helm chart distribution
itest-docker-debug             Run integration tests (optionally) against docker container with print statements
itest-docker                   Run integration tests (optionally) against docker swarm
itest-yarn-debug               Run integration tests (optionally) against docker demo (YARN) container with print statements
itest-yarn                     Run integration tests (optionally) against docker demo (YARN) container
kernel-images                  Build kernel-based docker images
kernelspecs                    Create archives with sample kernelspecs
lint                           Check code style
release                        Make a wheel + source release on PyPI
run-dev                        Make a server in jupyter_websocket mode
test-install                   Install and minimally run EG with the wheel and tar distributions
test                           Run unit tests
```

Some of the more useful commands are listed below.

## Build the conda environment

Build a Python 3 conda environment that can be used to run
the Enterprise Gateway server within an IDE. May be necessary prior
to [debugging Enterprise Gateway](./debug.md) based on your local Python environment.
See [Conda's Managing environments](https://docs.conda.io/projects/conda/en/stable/user-guide/tasks/manage-environments.html#managing-environments)
for background on environments and why you may find them useful as you develop on Enterprise Gateway.

```bash
make env
```

By default, the env built will be named `enterprise-gateway-dev`. To produce a different conda env,
you can specify the name via the `ENV=` parameter.

```bash
make ENV=my-conda-env env
```

To delete your existing environment, use `clean-env` task.

```bash
make clean-env
```

## Build the wheel file

Build a wheel file that can then be installed via `pip install`

```
make bdist
```

The wheel file will reside in the `dist` directory.

## Build the kernelspec tar file

Enterprise Gateway includes several sets of kernel specifications for each of the three primary kernels: `IPython Kernel`,`IRkernel`,
and `Apache Toree` to demonstrate remote kernels and their corresponding launchers. These sets of files are then added to tar files corresponding to their target resource managers. In addition, a _combined_ tar file is also built containing all kernel specifications. Like the wheel file, these tar files will reside in the `dist` directory.

```bash
make kernelspecs
```

```{note}
Because the scala launcher requires a jar file, `make kernelspecs` requires the use of `sbt` to build the
scala launcher jar. Please consult the [sbt site](https://www.scala-sbt.org/) for directions to
install/upgrade `sbt` on your platform. We currently use version 1.3.12.
```

## Build distribution files

Builds the files necessary for a given release: the wheel file, the source tar file, and the kernel specification tar
files. This is essentially a helper target consisting of the `bdist` `sdist` and `kernelspecs` targets.

```bash
make dist
```

## Run the Enterprise Gateway server

Run an instance of the Enterprise Gateway server.

```bash
make run-dev
```

Then access the running server at the URL printed in the console.

## Build the docs

Run Sphinx to build the HTML documentation.

```bash
make docs
```

This command actually issues `make requirements html` from the `docs` sub-directory.

## Run the unit tests

Run the unit test suite.

```
make test
```

To Run a test a subset of tests, we support passing "TEST" argument to the make command as below

```
make test TEST="test_gatewayapp.py"
make test TEST="test_gatewayapp.py::TestGatewayAppConfig
make test TEST="test_gatewayapp.py::TestGatewayAppConfig::test_config_env_vars_bc"
```

## Run the integration tests

Run the integration tests suite.

These tests will bootstrap the [`elyra/enterprise-gateway-demo`](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) docker image with Apache Spark using YARN resource manager and
Jupyter Enterprise Gateway and perform various tests for each kernel in local, YARN client, and YARN cluster modes.

```bash
make itest-yarn
```

## Build the docker images

The following can be used to build all docker images used within the project. See [docker images](docker.md) for specific details.

```bash
make docker-images
```

If you only want to build the kernel images, use

```bash
make kernel-images
```


================================================
FILE: docs/source/contributors/docker.md
================================================
# Docker Images

All docker images can be pulled from docker hub's [elyra organization](https://hub.docker.com/u/elyra/) and their docker files can be found in the github repository in the appropriate directory of [etc/docker](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/docker).

Local images can also be built via `make docker-images`.

```{note}
Base images and versions change over time. Check the Dockerfiles in [etc/docker](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/docker) for the current base images used in each build.
```

The following sections describe the docker images used within Kubernetes and Docker Swarm environments.

## elyra/enterprise-gateway

The primary image for Kubernetes and Docker Swarm support, [elyra/enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) contains the Enterprise Gateway server software and default kernel specifications. For Kubernetes it is deployed using the [helm chart](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/kubernetes/helm/enterprise-gateway). For Docker Swarm, deployment can be accomplished using [docker-componse.yml](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/docker/docker-compose.yml).

We recommend that a persistent/mounted volume be used so that the kernel specifications can be accessed outside the container since we've found those to require post-deployment modifications from time to time.

## elyra/kernel-py

Image [elyra/kernel-py](https://hub.docker.com/r/elyra/kernel-py/) contains the IPython kernel. It is currently built on the [jupyter/scipy-notebook](https://hub.docker.com/r/jupyter/scipy-notebook) image with additional support necessary for remote operation.

## elyra/kernel-spark-py

Image [elyra/kernel-spark-py](https://hub.docker.com/r/elyra/kernel-spark-py/) is built on [elyra/kernel-py](https://hub.docker.com/r/elyra/kernel-py) and includes the Spark 2.4 distribution for use in Kubernetes clusters. Please note that the ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results.

## elyra/kernel-tf-py

Image [elyra/kernel-tf-py](https://hub.docker.com/r/elyra/kernel-tf-py/) contains the IPython kernel. It is currently built on the [jupyter/tensorflow-notebook](https://hub.docker.com/r/jupyter/tensorflow-notebook) image with additional support necessary for remote operation.

## elyra/kernel-scala

Image [elyra/kernel-scala](https://hub.docker.com/r/elyra/kernel-scala/) contains the Scala (Apache Toree) kernel and is built on [elyra/spark](https://hub.docker.com/r/elyra/spark) which is, itself, built using the scripts provided by the Spark 2.4 distribution for use in Kubernetes clusters. As a result, the ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results.

Since Apache Toree is currently tied to Spark, creation of a _vanilla_ mode Scala kernel is not high on our current set of priorities.

## elyra/kernel-r

Image [elyra/kernel-r](https://hub.docker.com/r/elyra/kernel-r/) contains the IRKernel and is currently built on the [jupyter/r-notebook](https://hub.docker.com/r/jupyter/r-notebook/) image.

## elyra/kernel-spark-r

Image [elyra/kernel-spark-r](https://hub.docker.com/r/elyra/kernel-spark-r/) also contains the IRKernel but is built on [elyra/kernel-r](https://hub.docker.com/r/elyra/kernel-r) and includes the Spark 2.4 distribution for use in Kubernetes clusters.

## Ancillary Docker Images

The project produces two docker images to make testing easier: `elyra/demo-base` and `elyra/enterprise-gateway-demo`.

### elyra/demo-base

The [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/) image is considered the base image upon which [elyra/enterprise-gateway-demo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) is built. It consists of a Hadoop YARN installation that includes Spark, Java, miniconda, and various kernel installations.

The primary use of this image is to quickly build elyra/enterprise-gateway images for testing and development purposes. To build a local image, run `make demo-base`.

This image can be used to start a separate Hadoop YARN cluster that, when combined with another instance of elyra/enterprise-gateway can better demonstrate remote kernel functionality.

### elyra/enterprise-gateway-demo

Built on [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/), [elyra/enterprise-gateway-demo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) also includes the various example kernel specifications contained in the repository.

By default, this container will start with enterprise gateway running as a service user named `jovyan`. This user is enabled for `sudo` so that it can emulate other users where necessary. Other users included in this image are `elyra`, `bob` and `alice` (names commonly used in security-based examples).

We plan on producing one image per release to the [enterprise-gateway-demo docker repo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) where the image's tag reflects the corresponding release.

To build a local image, run `make enterprise-gateway-demo`. Because this is a development build, the tag for this image will not reflect the value of the VERSION variable in the root `Makefile` but will be 'dev'.


================================================
FILE: docs/source/contributors/index.rst
================================================
Contributors Guide
==================

These pages target people who are interested in contributing directly to the Jupyter Enterprise Gateway Project.

.. admonition:: Use cases

    - *As a contributor, I want to learn more about kernel management within the Jupyter ecosystem.*
    - *As a contributor, I want to make Enterprise Gateway a more stable service for my organization and the community as a whole.*
    - *As a contributor, I'm interested in adding the ability for Enterprise Gateway to be highly available and fault tolerant.*

.. note::
   As a *contributor*, we encourage you to be familiar with all of the guides (Users, Developers, Operators) to best support Enterprise Gateway.  This guide provides an overview of Enterprise Gateway along with instructions on how to get set up.


.. toctree::
   :maxdepth: 1
   :name: contributors

   contrib
   system-architecture
   docker
   devinstall
   sequence-diagrams
   debug
   roadmap


================================================
FILE: docs/source/contributors/roadmap.md
================================================
# Project Roadmap

We have plenty to do, now and in the future. Here's where we're headed:

## Completed in 3.x

- Spark 3.0 support (including pod template files)
- Spark Operator support via `SparkOperatorProcessProxy`
- Custom Resource Definition support via `CustomResourceProcessProxy`
- Session persistence (file-based and webhook-based)
- `KERNEL_VOLUMES` and `KERNEL_VOLUME_MOUNTS` for Kubernetes and Spark Operator kernels
- Authorizer class override support (`EG_AUTHORIZER_CLASS`)
- SSTI prevention in `KERNEL_POD_NAME` template substitution
- Python 3.9 and below dropped; Python 3.10+ required

## Planned for 4.0

- Kernel Provisioners
  - Provisioners will replace process proxies and enable Enterprise Gateway to remove its cap on `jupyter_client < 7` and `jupyter_server < 2`.
- Parameterized Kernels
  - Enable the ability to prompt for parameters
  - These will likely be based on kernel provisioners

## Wish list

- High Availability
  - Session persistence using a shared location (NoSQL DB) (file-based persistence has been implemented)
  - Active/active support
- Multi-gateway support on client-side
  - Enables the ability for a single Jupyter Server to be configured against multiple Gateway servers simultaneously. This work will primarily be in Jupyter Server.
- Pluggable load-balancers into `DistributedProcessProxy` (currently uses simple round-robin)
- Support for other resource managers
  - Slurm?
  - Mesos?
- User Environments
  - Improve the way user files are made available to remote kernels
- Administration UI
  - Dashboard with running kernels
  - Lifecycle management
  - Time running, stop/kill, Profile Management, etc

We'd love to hear any other use cases you might have and look forward to your contributions to Jupyter Enterprise Gateway!


================================================
FILE: docs/source/contributors/sequence-diagrams.md
================================================
# Sequence Diagrams

The following consists of various sequence diagrams you might find helpful. We plan to add
diagrams based on demand and contributions.

## Kernel launch: Jupyter Lab to Enterprise Gateway

This diagram depicts the interactions between components when a kernel start request
is submitted from Jupyter Lab running against [Jupyter Server configured to use
Enterprise Gateway](../users/connecting-to-eg.md). The diagram also includes the
retrieval of kernel specifications (kernelspecs) prior to the kernel's initialization.

```{mermaid}
    sequenceDiagram
        participant JupyterLab
        participant JupyterServer
        participant EnterpriseGateway
        participant ProcessProxy
        participant Kernel
        participant ResourceManager
        Note left of JupyterLab: fetch kernelspecs
        JupyterLab->>JupyterServer: https GET api/kernelspecs
        JupyterServer->>EnterpriseGateway: https GET api/kernelspecs
        EnterpriseGateway-->>JupyterServer: api/kernelspecs response
        JupyterServer-->>JupyterLab: api/kernelspecs response

        Note left of JupyterLab: kernel initialization
        JupyterLab->>JupyterServer: https POST api/sessions
        JupyterServer->>EnterpriseGateway: https POST api/kernels
        EnterpriseGateway->>ProcessProxy: launch_process()
        ProcessProxy->>Kernel: launch kernel
        ProcessProxy->>ResourceManager: confirm startup
        Kernel-->>ProcessProxy: connection info
        ResourceManager-->>ProcessProxy: state & host info
        ProcessProxy-->>EnterpriseGateway: complete connection info
        EnterpriseGateway->>Kernel: TCP socket requests
        Kernel-->>EnterpriseGateway: TCP socket handshakes
        EnterpriseGateway-->>JupyterServer: api/kernels response
        JupyterServer-->>JupyterLab: api/sessions response

        JupyterLab->>JupyterServer: ws GET api/kernels
        JupyterServer->>EnterpriseGateway: ws GET api/kernels
        EnterpriseGateway->>Kernel: kernel_info_request message
        Kernel-->>EnterpriseGateway: kernel_info_reply message
        EnterpriseGateway-->>JupyterServer: websocket upgrade response
        JupyterServer-->>JupyterLab: websocket upgrade response
```


================================================
FILE: docs/source/contributors/system-architecture.md
================================================
# System Architecture

Below are sections presenting details of the Enterprise Gateway internals and other related items. While we will attempt to maintain its consistency, the ultimate answers are in the code itself.

## Enterprise Gateway Process Proxy Extensions

Enterprise Gateway is follow-on project to Jupyter Kernel Gateway with additional abilities to support remote kernel sessions on behalf of multiple users within resource-managed frameworks such as [Apache Hadoop YARN](https://apache.github.io/hadoop/hadoop-yarn/hadoop-yarn-site/YARN.html) or [Kubernetes](https://kubernetes.io/). Enterprise Gateway introduces these capabilities by extending the existing class hierarchies for `AsyncKernelManager` and `AsyncMultiKernelManager` classes, along with an additional abstraction known as a _process proxy_.

### Overview

At its basic level, a running kernel consists of two components for its communication - a set of ports and a process.

### Kernel Ports

The first component is a set of five zero-MQ ports used to convey the Jupyter protocol between the Notebook
and the underlying kernel. In addition to the 5 ports, is an IP address, a key, and a signature scheme
indicator used to interpret the key. These eight pieces of information are conveyed to the kernel via a
json file, known as the connection file.

Within the base framework, the IP address must be a local IP address meaning that the kernel cannot be
remote from the library launching the kernel. The enforcement of this restriction is down in the `jupyter_client` module - two levels below Enterprise Gateway.

This component is the core communication mechanism between the Notebook and the kernel. All aspects, including
lifecycle management, can occur via this component. The kernel process (below) comes into play only when
port-based communication becomes unreliable or additional information is required.

### Kernel Process

When a kernel is launched, one of the fields of the kernel's associated kernel specification is used to
identify a command to invoke. In today's implementation, this command information, along with other
environment variables (also described in the kernel specification), is passed to `popen()` which returns
a process class. This class supports four basic methods following its creation:

1. `poll()` to determine if the process is still running
1. `wait()` to block the caller until the process has terminated
1. `send_signal(signum)` to send a signal to the process
1. `kill()` to terminate the process

As you can see, other forms of process communication can be achieved by abstracting the launch mechanism.

### Kernel Specifications

The primary vehicle for indicating a given kernel should be handled in a different manner is the kernel
specification, otherwise known as the _kernel spec_. Enterprise Gateway leverages the natively extensible `metadata` stanza within the kernel specification to introduce a new stanza named `process_proxy`.

The `process_proxy` stanza identifies the class that provides the kernel's process abstraction
(while allowing for future extensions). This class then provides the kernel's lifecycle management operations relative to the managed resource or functional equivalent.

Here's an example of a kernel specification that uses the `DistributedProcessProxy` class for its abstraction:

```json
{
  "language": "scala",
  "display_name": "Spark - Scala (YARN Client Mode)",
  "metadata": {
    "process_proxy": {
      "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy"
    }
  },
  "env": {
    "SPARK_HOME": "/usr/hdp/current/spark2-client",
    "__TOREE_SPARK_OPTS__": "--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID}",
    "__TOREE_OPTS__": "",
    "LAUNCH_OPTS": "",
    "DEFAULT_INTERPRETER": "Scala"
  },
  "argv": [
    "/usr/local/share/jupyter/kernels/spark_scala_yarn_client/bin/run.sh",
    "--RemoteProcessProxy.kernel-id",
    "{kernel_id}",
    "--RemoteProcessProxy.response-address",
    "{response_address}",
    "--RemoteProcessProxy.public-key",
    "{public_key}"
  ]
}
```

See the [Process Proxy](#process-proxy) section for more details on process proxies and those provided as part of the Enterprise Gateway release.

## Remote Mapping Kernel Manager

`RemoteMappingKernelManager` is a subclass of Jupyter Server's [`AsyncMappingKernelManager`](https://github.com/jupyter-server/jupyter_server/blob/745f5ba3f00280c1e1900326a7e08463d48a3912/jupyter_server/services/kernels/kernelmanager.py#L633) and provides two functions.

1. It provides the vehicle for making the `RemoteKernelManager` class known and available.
1. It overrides `start_kernel` to look at the target kernel's kernel spec to see if it contains a remote process proxy class entry. If so, it records the name of the class in its member variable to be made available to the kernel start logic.

## Remote Kernel Manager

`RemoteKernelManager` is a subclass of jupyter_client's [`AsyncIOLoopKernelManager` class](https://github.com/jupyter/jupyter_client/blob/10decd25308c306b6005cbf271b96493824a83e8/jupyter_client/ioloop/manager.py#L62) and provides the
primary integration points for remote process proxy invocations. It implements a number of methods which allow
Enterprise Gateway to circumvent functionality that might otherwise be prevented. As a result, some of these overrides may
not be necessary if lower layers of the Jupyter framework were modified. For example, some methods are required
because Jupyter makes assumptions that the kernel process is local.

Its primary functionality, however, is to override the `_launch_kernel` method (which is the method closest to
the process invocation) and instantiates the appropriate process proxy instance - which is then returned in
place of the process instance used in today's implementation. Any interaction with the process then takes
place via the process proxy.

Both `RemoteMappingKernelManager` and `RemoteKernelManager` class definitions can be found in
[remotemanager.py](https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/services/kernels/remotemanager.py)

## Process Proxy

Process proxy classes derive from the abstract base class `BaseProcessProxyABC` - which defines the four basic
process methods. There are two immediate subclasses of `BaseProcessProxyABC` - `LocalProcessProxy`
and `RemoteProcessProxy`.

`LocalProcessProxy` is essentially a pass-through to the current implementation. Kernel specifications that do not contain
a `process_proxy` stanza will use `LocalProcessProxy`.

`RemoteProcessProxy` is an abstract base class representing remote kernel processes. Currently, there are seven
built-in subclasses of `RemoteProcessProxy` ...

- `DistributedProcessProxy` - largely a proof of concept class, `DistributedProcessProxy` is responsible for the launch
  and management of kernels distributed across an explicitly defined set of hosts using ssh. Hosts are determined
  via a round-robin algorithm (that we should make pluggable someday).
- `YarnClusterProcessProxy` - is responsible for the discovery and management of kernels hosted as Hadoop YARN applications
  within a managed cluster.
- `KubernetesProcessProxy` - is responsible for the discovery and management of kernels hosted
  within a Kubernetes cluster.
- `DockerSwarmProcessProxy` - is responsible for the discovery and management of kernels hosted
  within a Docker Swarm cluster.
- `DockerProcessProxy` - is responsible for the discovery and management of kernels hosted
  within Docker configuration. Note: because these kernels will always run local to the corresponding Enterprise Gateway instance, these process proxies are of limited use.
- `ConductorClusterProcessProxy` - is responsible for the discovery and management of kernels hosted
  within an IBM Spectrum Conductor cluster.
- `SparkOperatorProcessProxy` - is responsible for the discovery and management of kernels hosted
  within a Kubernetes cluster but created as a `SparkApplication` instead of a Pod. The `SparkApplication` is a Kubernetes custom resource
  defined inside the project [spark-on-k8s-operator](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator), which
  makes all kinds of spark on k8s components better organized and easy to configure.

```{note}
Before you run a kernel associated with `SparkOperatorProcessProxy`, ensure that the [Kubernetes Operator for Apache Spark is installed](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator#installation) in your Kubernetes cluster.
```

You might notice that the last six process proxies do not necessarily control the _launch_ of the kernel. This is
because the native jupyter framework is utilized such that the script that is invoked by the framework is what
launches the kernel against that particular resource manager. As a result, the _startup time_ actions of these process
proxies is more about discovering where the kernel _landed_ within the cluster in order to establish a mechanism for
determining lifetime. _Discovery_ typically consists of using the resource manager's API to locate the kernel whose name includes its kernel ID
in some fashion.

On the other hand, the `DistributedProcessProxy` essentially wraps the kernel specification's argument vector (i.e., invocation
string) in a remote shell since the host is determined by Enterprise Gateway, eliminating the discovery step from
its implementation.

These class definitions can be found in the
[processproxies package](https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/services/processproxies). However,
Enterprise Gateway is architected such that additional process proxy implementations can be provided and are not
required to be located within the Enterprise Gateway hierarchy - i.e., we embrace a _bring your own process proxy_ model.

![Process Class Hierarchy](../images/process_proxy_hierarchy.png)

The complete process proxy class hierarchy is:

```text
BaseProcessProxyABC
├── LocalProcessProxy
└── RemoteProcessProxy
    ├── DistributedProcessProxy
    ├── YarnClusterProcessProxy
    ├── ConductorClusterProcessProxy
    └── ContainerProcessProxy
        ├── DockerSwarmProcessProxy
        ├── DockerProcessProxy
        └── KubernetesProcessProxy
            └── CustomResourceProcessProxy
                └── SparkOperatorProcessProxy
```

The process proxy constructor looks as follows:

```python
def __init__(self, kernel_manager, proxy_config):
```

where

- `kernel_manager` is an instance of a `RemoteKernelManager` class.
- `proxy_config` is a dictionary of configuration values present in the `kernel.json` file. These
  values can be used to override or amend various global configuration values on a per-kernel basis. See
  [Process Proxy Configuration](#process-proxy-configuration) for more information.

```python
@abstractmethod
def launch_process(self, kernel_cmd, *kw):
```

where

- `kernel_cmd` is a list (argument vector) that should be invoked to launch the kernel. This parameter is
  an artifact of the kernel manager `_launch_kernel()` method.
- `**kw` is a set keyword arguments which includes an `env` dictionary element consisting of the names
  and values of which environment variables to set at launch time.

The `launch_process()` method is the primary method exposed on the Process Proxy classes. It's responsible for
performing the appropriate actions relative to the target type. The process must be in a running state prior
to returning from this method - otherwise attempts to use the connections will not be successful since the
(remote) kernel needs to have created the sockets.

All process proxy subclasses should ensure `BaseProcessProxyABC.launch_process()` is called - which will automatically
place a variable named `KERNEL_ID` (consisting of the kernel's unique ID) into the corresponding kernel's environment
variable list since `KERNEL_ID` is a primary mechanism for associating remote applications to a specific kernel instance.

```python
def poll(self):
```

The `poll()` method is used by the Jupyter framework to determine if the process is still alive. By default, the
framework's heartbeat mechanism calls `poll()` every 3 seconds. This method returns `None` if the process is still running, `False` otherwise (per the `popen()` contract).

```python
def wait(self):
```

The `wait()` method is used by the Jupyter framework when terminating a kernel. Its purpose is to block return
to the caller until the process has terminated. Since this could be awhile, it's best to return control in a
reasonable amount of time since the kernel instance is destroyed anyway. This method does not return a value.

```python
def send_signal(self, signum):
```

The `send_signal()` method is used by the Jupyter framework to send a signal to the process. Currently, `SIGINT (2)`
(to interrupt the kernel) is the signal sent.

It should be noted that for normal processes - both local and remote - `poll()` and `kill()` functionality can
be implemented via `send_signal` with `signum` values of `0` and `9`, respectively.

This method returns `None` if the process is still running, `False` otherwise.

```python
def kill(self):
```

The `kill()` method is used by the Jupyter framework to terminate the kernel process. This method is only necessary when the request to shutdown the kernel - sent via the control port of the zero-MQ ports - does not respond in an appropriate amount of time.

This method returns `None` if the process is killed successfully, `False` otherwise.

### RemoteProcessProxy

As noted above, `RemoteProcessProxy` is an abstract base class that derives from `BaseProcessProxyABC`. Subclasses
of `RemoteProcessProxy` must implement two methods - `confirm_remote_startup()` and `handle_timeout()`:

```python
@abstractmethod
def confirm_remote_startup(self, kernel_cmd, **kw):
```

where

- `kernel_cmd` is a list (argument vector) that should be invoked to launch the kernel. This parameter is an
  artifact of the kernel manager `_launch_kernel()` method.
- `**kw` is a set key-word arguments.

`confirm_remote_startup()` is responsible for detecting that the remote kernel has been appropriately launched and is ready to receive requests. This can include gathering application status from the remote resource manager but is really a function of having received the connection information from the remote kernel launcher. (See [Kernel Launchers](#kernel-launchers))

```python
@abstractmethod
def handle_timeout(self):
```

`handle_timeout()` is responsible for detecting that the remote kernel has failed to startup in an acceptable time. It
should be called from `confirm_remote_startup()`. If the timeout expires, `handle_timeout()` should throw HTTP
Error 500 (`Internal Server Error`).

Kernel launch timeout expiration is expressed via the environment variable `KERNEL_LAUNCH_TIMEOUT`. If this
value does not exist, it defaults to the Enterprise Gateway process environment variable `EG_KERNEL_LAUNCH_TIMEOUT` - which
defaults to 30 seconds if unspecified. Since all `KERNEL_` environment variables "flow" from the Notebook server, the launch
timeout can be specified as a client attribute of the Notebook session.

#### YarnClusterProcessProxy

As part of its base offering, Enterprise Gateway provides an implementation of a process proxy that communicates with the Hadoop YARN resource manager that has been instructed to launch a kernel on one of its worker nodes. The node on which the kernel is launched is up to the resource manager - which enables an optimized distribution of kernel resources.

Derived from `RemoteProcessProxy`, `YarnClusterProcessProxy` uses the `yarn-api-client` library to locate the kernel and monitor its lifecycle. However, once the kernel has returned its connection information, the primary kernel operations naturally take place over the ZeroMQ ports.

This process proxy is reliant on the `--EnterpriseGatewayApp.yarn_endpoint` command line option or the `EG_YARN_ENDPOINT` environment variable to determine where the YARN resource manager is located. To accommodate increased flexibility, the endpoint definition can be defined within the process proxy stanza of the kernel specification, enabling the ability to direct specific kernels to different YARN clusters.

In cases where the YARN cluster is configured for high availability, then the `--EnterpriseGatewayApp.alt_yarn_endpoint` command line option or the `EG_ALT_YARN_ENDPOINT` environment variable should also be defined. When set, the underlying `yarn-api-client` library will choose the active Resource Manager between the two.

```{note}
If Enterprise Gateway is running on an edge node of the cluster and has a valid `yarn-site.xml` file in HADOOP_CONF_DIR, neither of these values are required (default = None).  In such cases, the `yarn-api-client` library will choose the active Resource Manager from the configuration files.
```

```{seealso}
[Hadoop YARN deployments](../operators/deploy-yarn-cluster.md) in the Operators Guide for details.
```

#### DistributedProcessProxy

Like `YarnClusterProcessProxy`, Enterprise Gateway also provides an implementation of a basic
round-robin remoting mechanism that is part of the `DistributedProcessProxy` class. This class
uses the `--EnterpriseGatewayApp.remote_hosts` command line option (or `EG_REMOTE_HOSTS`
environment variable) to determine on which hosts a given kernel should be launched. It uses
a basic round-robin algorithm to index into the list of remote hosts for selecting the target
host. It then uses ssh to launch the kernel on the target host. As a result, all kernel specification
files must reside on the remote hosts in the same directory structure as on the Enterprise
Gateway server.

It should be noted that kernels launched with this process proxy run in YARN _client_ mode - so their
resources (within the kernel process itself) are not managed by the Hadoop YARN resource manager.

Like the yarn endpoint parameter the `remote_hosts` parameter can be specified within the
process proxy configuration to override the global value - enabling finer-grained kernel distributions.

```{seealso}
[Distributed deployments](../operators/deploy-distributed.md) in the Operators Guide for details.
```

#### KubernetesProcessProxy

With the popularity of Kubernetes within the enterprise, Enterprise Gateway provides an implementation
of a process proxy that communicates with the Kubernetes resource manager via the Kubernetes API. Unlike
the other offerings, in the case of Kubernetes, Enterprise Gateway is itself deployed within the Kubernetes
cluster as a _Service_ and _Deployment_. The primary vehicle by which this is accomplished is via [Helm](https://helm.sh/) and Enterprise Gateway provides a set of [helm chart](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/kubernetes/helm/enterprise-gateway) files to simplify deployment.

```{seealso}
[Kubernetes deployments](../operators/deploy-kubernetes.md) in the Operators Guide for details.
```

#### DockerSwarmProcessProxy

Enterprise Gateway provides an implementation of a process proxy that communicates with the Docker Swarm resource manager via the Docker API. When used, the kernels are launched as swarm services and can reside anywhere in the managed cluster. To leverage kernels configured in this manner, Enterprise Gateway can be deployed
either as a Docker Swarm _service_ or a traditional Docker container.

A similar `DockerProcessProxy` implementation has also been provided. When used, the corresponding kernel will be launched as a traditional docker container that runs local to the launching Enterprise Gateway instance. As a result, its use has limited value.

```{seealso}
[Docker and Docker Swarm deployments](../operators/deploy-docker.md) in the Operators Guide for details.
```

#### ConductorClusterProcessProxy

Enterprise Gateway also provides an implementation of a process proxy
that communicates with an IBM Spectrum Conductor resource manager that has been instructed to launch a kernel
on one of its worker nodes. The node on which the kernel is launched is up to the resource
manager - which enables an optimized distribution of kernel resources.

Derived from `RemoteProcessProxy`, `ConductorClusterProcessProxy` uses Conductor's REST-ful API
to locate the kernel and monitor its life-cycle. However, once the kernel has returned its
connection information, the primary kernel operations naturally take place over the ZeroMQ ports.

This process proxy is reliant on the `--EnterpriseGatewayApp.conductor_endpoint` command line
option or the `EG_CONDUCTOR_ENDPOINT` environment variable to determine where the Conductor resource manager is
located.

```{seealso}
[IBM Spectrum Conductor deployments](../operators/deploy-conductor.md) in the Operators Guide for details.
```

#### CustomResourceProcessProxy

Enterprise Gateway also provides a implementation of a process proxy derived from `KubernetesProcessProxy`
called `CustomResourceProcessProxy`.

Instead of creating kernels based on a Kubernetes pod, `CustomResourceProcessProxy`
manages kernels via a custom resource definition (CRD). For example, `SparkApplication` is a CRD that includes
many components of a Spark-on-Kubernetes application.

If you are going to extend `CustomResourceProcessProxy`, just follow steps below:

- override custom resource related variables(i.e. `group`, `version` and `plural`
  and `get_container_status` method, wrt [launch_kubernetes.py](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py).

- define a jinja template like
  [kernel-pod.yaml.j2](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2).
  As a generic design, the template file should be named as {crd_group}-{crd_version} so that you can reuse
  [launch_kubernetes.py](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py) in the kernelspec.

- define a kernel specification like [spark_python_operator/kernel.json](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_operator/kernel.json).

### Process Proxy Configuration

Each `kernel.json`'s `process-proxy` stanza can specify an optional `config` stanza that is converted
into a dictionary of name/value pairs and passed as an argument to each process-proxy constructor
relative to the class identified by the `class_name` entry.

How each dictionary entry is interpreted is completely a function of the constructor relative to that process-proxy
class or its superclass. For example, an alternate list of remote hosts has meaning to the `DistributedProcessProxy` but
not to its superclasses. As a result, the superclass constructors will not attempt to interpret that value.

In addition, certain dictionary entries can override or amend system-level configuration values set on the command-line, thereby
allowing administrators to tune behaviors down to the kernel level. For example, an administrator might want to
constrain Python kernels configured to use specific resources to an entirely different set of hosts (and ports) that other
remote kernels might be targeting in order to isolate valuable resources. Similarly, an administrator might want to
only authorize specific users to a given kernel.

In such situations, one might find the following `process-proxy` stanza:

```json
{
  "metadata": {
    "process_proxy": {
      "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy",
      "config": {
        "remote_hosts": "priv_host1,priv_host2",
        "port_range": "40000..41000",
        "authorized_users": "bob,alice"
      }
    }
  }
}
```

In this example, the kernel associated with this `kernel.json` file is relegated to the hosts `priv_host1` and `priv_host2`
where kernel ports will be restricted to a range between `40000` and `41000` and only users `bob` and `alice` can
launch such kernels (provided neither appear in the global set of `unauthorized_users` since denial takes precedence).

For a current enumeration of which system-level configuration values can be overridden or amended on a per-kernel basis
see [Per-kernel overrides](../operators/config-kernel-override.md).

## Kernel Launchers

As noted above, a kernel is considered started once the `launch_process()` method has conveyed its connection information back to the Enterprise Gateway server process. Conveyance of connection information from a remote kernel is the responsibility of the remote kernel _launcher_.

Kernel launchers provide a means of normalizing behaviors across kernels while avoiding kernel modifications. Besides providing a location where connection file creation can occur, they also provide a 'hook' for other kinds of behaviors - like establishing virtual environments or sandboxes, providing collaboration behavior, adhering to port range restrictions, etc.

There are four primary tasks of a kernel launcher:

1. Creation of the connection file and ZMQ ports on the remote (target) system along with a _gateway listener_ socket
1. Conveyance of the connection (and listener socket) information back to the Enterprise Gateway process
1. Invocation of the target kernel
1. Listen for interrupt and shutdown requests from Enterprise Gateway and carry out the action when appropriate

Kernel launchers are minimally invoked with three parameters (all of which are conveyed by the `argv` stanza of the corresponding `kernel.json` file) - the kernel's ID as created by the server and conveyed via the placeholder `{kernel_id}`, a response address consisting of the Enterprise Gateway server IP and port on which to return the connection information similarly represented by the placeholder `{response_address}`, and a public-key used by the launcher to encrypt an AES key that encrypts the kernel's connection information back to the server and represented by the placeholder `{public_key}`.

The kernel's ID is identified by the parameter `--RemoteProcessProxy.kernel-id`. Its value (`{kernel_id}`) is essentially used to build a connection file to pass to the to-be-launched kernel, along with any other things - like log files, etc.

The response address is identified by the parameter `--RemoteProcessProxy.response-address`. Its value (`{response_address}`) consists of a string of the form `<IPV4:port>` where the IPV4 address points back to the Enterprise Gateway server - which is listening for a response on the provided port. The port's default value is `8877`, but can be specified via the environment variable `EG_RESPONSE_PORT`.

The public key is identified by the parameter `--RemoteProcessProxy.public-key`. Its value (`{public_key}`) is used to encrypt an AES key created by the launcher to encrypt the kernel's connection information. The server, upon receipt of the response, uses the corresponding private key to decrypt the AES key, which it then uses to decrypt the connection information. Both the public and private keys are ephemeral; created upon Enterprise Gateway's startup. They can be ephemeral because they are only needed during a kernel's startup and never again.

Here's a [kernel.json](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_cluster/kernel.json) file illustrating these parameters...

```json
{
  "language": "python",
  "display_name": "Spark - Python (YARN Cluster Mode)",
  "metadata": {
    "process_proxy": {
      "class_name": "enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy"
    }
  },
  "env": {
    "SPARK_HOME": "/usr/hdp/current/spark2-client",
    "SPARK_OPTS": "--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false",
    "LAUNCH_OPTS": ""
  },
  "argv": [
    "/usr/local/share/jupyter/kernels/spark_python_yarn_cluster/bin/run.sh",
    "--RemoteProcessProxy.kernel-id",
    "{kernel_id}",
    "--RemoteProcessProxy.response-address",
    "{response_address}",
    "--RemoteProcessProxy.public-key",
    "{public_key}"
  ]
}
```

Other options supported by launchers include:

- `--RemoteProcessProxy.port-range {port_range}` - passes configured port-range to launcher where launcher applies that range to kernel ports. The port-range may be configured globally or on a per-kernel specification basis, as previously described.

- `--RemoteProcessProxy.spark-context-initialization-mode [lazy|eager|none]` - indicates the _timeframe_ in which the spark context will be created.

  - `lazy` (default) attempts to defer initialization as late as possible - although this can vary depending on the
    underlying kernel and launcher implementation.
  - `eager` attempts to create the spark context as soon as possible.
  - `none` skips spark context creation altogether.

  Note that some launchers may not be able to support all modes. For example, the scala launcher uses the Apache Toree
  kernel - which currently assumes a spark context will exist. As a result, a mode of `none` doesn't apply.
  Similarly, the `lazy` and `eager` modes in the Python launcher are essentially the same, with the spark context
  creation occurring immediately, but in the background thereby minimizing the kernel's startup time.

Kernel.json files also include a `LAUNCH_OPTS:` section in the `env` stanza to allow for custom
parameters to be conveyed in the launcher's environment. `LAUNCH_OPTS` are then referenced in
the [run.sh](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_cluster/bin/run.sh)
script as the initial arguments to the launcher
(see [launch_ipykernel.py](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/python/scripts/launch_ipykernel.py)) ...

```bash
eval exec \
     "${SPARK_HOME}/bin/spark-submit" \
     "${SPARK_OPTS}" \
     "${PROG_HOME}/scripts/launch_ipykernel.py" \
     "${LAUNCH_OPTS}" \
     "$@"
```

## Extending Enterprise Gateway

Theoretically speaking, enabling a kernel for use in other frameworks amounts to the following:

1. Build a kernel specification file that identifies the process proxy class to be used.
1. Implement the process proxy class such that it supports the four primitive functions of
   `poll()`, `wait()`, `send_signal(signum)` and `kill()` along with `launch_process()`.
1. If the process proxy corresponds to a remote process, derive the process proxy class from
   `RemoteProcessProxy` and implement `confirm_remote_startup()` and `handle_timeout()`.
1. Insert invocation of a launcher (if necessary) which builds the connection file and
   returns its contents on the `{response_address}` socket and following the encryption protocol set forth in the other launchers.

```{seealso}
This topic is covered in the [Developers Guide](../developers/index.rst).
```


================================================
FILE: docs/source/developers/custom-images.md
================================================
# Custom Kernel Images

This section presents information needed for how a custom kernel image could be built for your own uses with Enterprise Gateway. This is typically necessary if one desires to extend the existing image with additional supporting libraries or an image that encapsulates a different set of functionality altogether.

## Extending Existing Kernel Images

A common form of customization occurs when the existing kernel image is serving the fundamentals but the user wishes it be extended with additional libraries to prevent the need of their imports within the Notebook interactions. Since the image already meets the [basic requirements](#requirements-for-custom-kernel-images), this is really just a matter of referencing the existing image in the `FROM` statement and installing additional libraries. Because the EG kernel images do not run as the `root` user, you may need to switch users to perform the update.

```dockerfile
FROM elyra/kernel-py:VERSION

USER root  # switch to root user to perform installation (if necessary)

RUN pip install my-libraries

USER $NB_UID  # switch back to the jovyan user
```

## Bringing Your Own Kernel Image

Users that do not wish to extend an existing kernel image must be cognizant of a couple of things.

1. Requirements of a kernel-based image to be used by Enterprise Gateway.
1. Is the base image one from [Jupyter Docker-stacks](https://github.com/jupyter/docker-stacks)?

### Requirements for Custom Kernel Images

Custom kernel images require some support files from the Enterprise Gateway repository. These are packaged into a tar file for each release starting in `2.5.0`. This tar file (named `jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz`) is composed of a few files - one bootstrap script and a kernel launcher (one per kernel type).

#### Bootstrap-kernel.sh

Enterprise Gateway provides a single [bootstrap-kernel.sh](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/bootstrap/bootstrap-kernel.sh) script that handles the three kernel languages supported out of the box - Python, R, and Scala. When a kernel image is started by Enterprise Gateway, parameters used within the bootstrap-kernel.sh script are conveyed via environment variables. The bootstrap script is then responsible for validating and converting those parameters to meaningful arguments to the appropriate launcher.

#### Kernel Launcher

The kernel launcher, as discussed [here](kernel-launcher.md) does a number of things. In particular, it creates the connection ports and conveys that connection information back to Enterprise Gateway via the socket identified by the response address parameter. Although not a requirement for container-based usage, it is recommended that the launcher be written in the same language as the kernel. (This is more of a requirement when used in applications like Hadoop YARN.)

### About Jupyter Docker-stacks Images

Most of what is presented assumes the base image for your custom image is derived from the [Jupyter Docker-stacks](https://github.com/jupyter/docker-stacks) repository. As a result, it's good to cover what makes up those assumptions so you can build your own image independently of the docker-stacks repository.

All images produced from the docker-stacks repository come with a certain user configured. This user is named `jovyan` and is mapped to a user id (UID) of `1000` and a group id (GID) of `100` - named `users`.

The various startup scripts and commands typically reside in `/usr/local/bin` and we recommend trying to adhere to that policy.

The base jupyter image, upon which most all images from docker-stacks are built, also contains a `fix-permissions` script that is responsible for _gracefully_ adjusting permissions based on its given parameters. By only changing the necessary permissions, use of this script minimizes the size of the docker layer in which that command is invoked during the build of the docker image.

### Sample Dockerfiles for Custom Kernel Images

Below we provide two working Dockerfiles that produce custom kernel images. One based on an existing image from Jupyter docker-stacks, the other from an independent base image.

#### Custom Kernel Image Built on Jupyter Image

Here's an example Dockerfile that installs the minimally necessary items for a Python-based kernel image built on the docker-stack image `jupyter/scipy-notebook`. Note: the string `VERSION` must be replaced with the appropriate value.

```dockerfile
# Choose a base image.  Preferrably one from https://github.com/jupyter/docker-stacks
FROM jupyter/scipy-notebook:61d8aaedaeaf

# Switch user to root since, if from docker-stacks, its probably jovyan
USER root

# Install any packages required for the kernel-wrapper.  If the image
# does not contain the target kernel (i.e., IPython, IRkernel, etc.,
# it should be installed as well.
RUN pip install pycrypto

# Download and extract the enterprise gateway kernel launchers and bootstrap
# files and deploy to /usr/local/bin. Change permissions to NB_UID:NB_GID.
RUN wget https://github.com/jupyter-server/enterprise_gateway/releases/download/vVERSION/jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz &&\
        tar -xvf jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz -C /usr/local/bin &&\
        rm -f jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz &&\
        fix-permissions /usr/local/bin

# Switch user back to jovyan and setup language and default CMD
USER $NB_UID
ENV KERNEL_LANGUAGE python
CMD /usr/local/bin/bootstrap-kernel.sh
```

#### Independent Custom Kernel Image

If your base image is not from docker-stacks, it is recommended that you NOT run the image as USER `root` and create an _image user_ that is not UID 0. For this example, we will create the `jovyan` user with UID `1000` and a primary group of `users`, GID `100`. Note that Enterprise Gateway makes no assumption relative to the user in which the kernel image is running.

Aside from configuring the image user, all other aspects of customization are the same. In this case, we'll use the tensorflow-gpu image and convert it to be usable via Enterprise Gateway as a custom kernel image. Note that because this image didn't have `wget` we used `curl` to download the supporting kernel-image files.

```dockerfile
FROM tensorflow/tensorflow:2.5.0-gpu-jupyter

USER root

# Install OS dependencies required for the kernel-wrapper. Missing
# packages can be installed later only if container is running as
# privileged user.
RUN apt-get update && apt-get install -yq --no-install-recommends \
    build-essential \
    libsm6 \
    libxext-dev \
    libxrender1 \
    netcat \
    python3-dev \
    tzdata \
    unzip \
    && rm -rf /var/lib/apt/lists/*

# Install any packages required for the kernel-wrapper.  If the image
# does not contain the target kernel (i.e., IPython, IRkernel, etc.,
# it should be installed as well.
RUN pip install pycrypto

# Download and extract the enterprise gateway kernel launchers and bootstrap
# files and deploy to /usr/local/bin. Change permissions to NB_UID:NB_GID.
RUN curl -L https://github.com/jupyter-server/enterprise_gateway/releases/download/vVERSION/jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz | \
    tar -xz -C /usr/local/bin

RUN adduser --system --uid 1000 --gid 100 jovyan && \
    chown jovyan:users /usr/local/bin/bootstrap-kernel.sh && \
    chmod 0755 /usr/local/bin/bootstrap-kernel.sh && \
    chown -R jovyan:users /usr/local/bin/kernel-launchers

ENV NB_UID 1000
ENV NB_GID 100
USER jovyan
ENV KERNEL_LANGUAGE python
CMD /usr/local/bin/bootstrap-kernel.sh
```

## Deploying Your Custom Kernel Image

The final step in deploying a customer kernel image is creating a corresponding kernel specifications directory that is available to Enterprise Gateway. Since Enterprise Gateway is also running in a container, its import that its kernel specifications directory either be mounted externally or a new Enterprise Gateway image is created with the appropriate directory in place. For the purposes of this discussion, we'll assume the kernel specifications directory, `/usr/local/share/jupyter/kernels`, is externally mounted.

- Find a similar kernel specification directory from which to create your custom kernel specification. The most important aspect to this is matching the language of your kernel since it will use the same [kernel launcher](#kernel-launcher). Another important question is whether your custom kernel uses Spark, because those kernel specifications will vary significantly since many of the spark options reside in the `kernel.json`'s `env` stanza. Since our examples use _vanilla_ (non-Spark) python kernels we'll use the `python_kubernetes` kernel specification as our basis.

```bash
cd /usr/local/share/jupyter/kernels
cp -r python_kubernetes python_myCustomKernel
```

- Edit the `kernel.json` file and change the `display_name:`, `image_name:` and path to `launch_kubernetes.py` script.

```json
{
  "language": "python",
  "display_name": "My Custom Kernel",
  "metadata": {
    "process_proxy": {
      "class_name": "enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy",
      "config": {
        "image_name": "myDockerHub/myCustomKernelImage:myTag"
      }
    }
  },
  "env": {},
  "argv": [
    "python",
    "/usr/local/share/jupyter/kernels/python_myCustomKernel/scripts/launch_kubernetes.py",
    "--RemoteProcessProxy.kernel-id",
    "{kernel_id}",
    "--RemoteProcessProxy.response-address",
    "{response_address}",
    "--RemoteProcessProxy.public-key",
    "{public_key}"
  ]
}
```

- If using kernel filtering (`EG_ALLOWED_KERNELS`), be sure to update it with the new kernel specification directory name (e.g., `python_myCustomKernel`) and restart/redeploy Enterprise Gateway.
- Launch or refresh your Notebook session and confirm `My Custom Kernel` appears in the _new kernel_ drop-down.
- Create a new notebook using `My Custom Kernel`.


================================================
FILE: docs/source/developers/dev-process-proxy.md
================================================
# Implementing a process proxy

A process proxy implementation is necessary if you want to interact with a resource manager that is not currently supported or extend some existing behaviors. For example, recently, we've had [contributions](https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/services/processproxies/crd.py#L18) that interact with [Kubernetes Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions), which is an example of _extending_ the `KubernetesProcessProxy` to accomplish a slightly different task.

Examples of resource managers in which there's been some interest include [Slurm Workload Manager](https://slurm.schedmd.com/documentation.html) and [Apache Mesos](https://mesos.apache.org/), for example. In the end, it's really a matter of having access to an API and the ability to apply "tags" or "labels" in order to _discover_ where the kernel is running within the managed cluster. Once you have that information, then it becomes of matter of implementing the appropriate methods to control the kernel's lifecycle.

```{admonition} Important!
:class: error

Before continuing, it is important to consider timeframes here.  You may instead want to implement a [_Kernel Provisioner_](https://jupyter-client.readthedocs.io/en/latest/provisioning.html) rather an a Process Proxy since _provisioners_ are available to the general framework!

The [Enterprise Gateway 4.0 release is slated to adopt Kernel Provisioners](../contributors/roadmap.md) but must remain on a down-level `jupyter_client` release (< 7.x) until that time as Enterprise Gateway (and process proxies) are currently incompatible.

That said, if you and your organization plan to stay on Enterprise Gateway 2.x or 3.x for the next couple years, then implementing a process proxy may be in your best interest.  Fortunately, the two constructs are nearly identical since Kernel Provisioners are essentially Process Proxies _properly_ integrated into the Jupyter framework thereby eliminating the need for various `KernelManager` hooks.
```

## General approach

Please refer to the [Process Proxy section](../contributors/system-architecture.md#process-proxy) in the System Architecture pages for descriptions and structure of existing process proxies. Here is the general guideline for the process of implementing a process proxy.

1. Identify and understand how to _decorate_ your "job" within the resource manager. In Hadoop YARN, this is done by using the kernel's ID as the _application name_ by setting the [`--name` parameter to `${KERNEL_ID}`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_cluster/kernel.json). In Kubernetes, we apply the kernel's ID to the [`kernel-id` label on the POD](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2).
1. Today, all invocations of kernels into resource managers use a shell or python script mechanism configured into the `argv` stanza of the kernelspec. If you take this approach, you need to apply the necessary changes to integrate with your resource manager.
1. Determine how to interact with the resource manager's API to _discover_ the kernel and determine on which host it's running. This interaction should occur immediately following Enterprise Gateway's receipt of the kernel's connection information in its response from the kernel launcher. This extra step, performed within `confirm_remote_startup()`, is necessary to get the appropriate host name as reflected in the resource manager's API.
1. Determine how to monitor the "job" using the resource manager API. This will become part of the `poll()` implementation to determine if the kernel is still running. This should be as quick as possible since it occurs every 3 seconds. If this is an expensive call, you may need to make some adjustments like skip the call every so often.
1. Determine how to terminate "jobs" using the resource manager API. This will become part of the termination sequence, but probably only necessary if the message-based shutdown does not work (i.e., a last resort).

```{tip}
Because kernel IDs are globally unique, they serve as ideal identifiers for discovering where in the cluster the kernel is running.
```

You will likely need to provide implementations for `launch_process()`, `poll()`, `wait()`, `send_signal()`, and `kill()`, although, depending on where your process proxy resides in the class hierarchy, some implementations may be reused.

For example, if your process proxy is going to service remote kernels, you should consider deriving your implementation from the [`RemoteProcessProxy` class](https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/services/processproxies/processproxy.py#L1070). If this is the case, then you'll need to implement `confirm_remote_startup()`.

Likewise, if your process proxy is based on containers, you should consider deriving your implementation from the [`ContainerProcessProxy`](https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/services/processproxies/container.py#L39). If this is the case, then you'll need to implement `get_container_status()` and `terminate_container_resources()` rather than `confirm_remote_startup()`, etc.

Once the process proxy has been implemented, construct an appropriate kernel specification that references your process proxy and iterate until you are satisfied with how your remote kernels behave.


================================================
FILE: docs/source/developers/index.rst
================================================
Developers Guide
================

These pages target *developers* writing applications against the REST API, authoring process proxies for other resource managers, or integrating applications with remote kernel functionality.

.. admonition:: Use cases

    - *As a developer, I want to explore supporting a different resource manager with Enterprise Gateway, by implementing a new `ProcessProxy` class such that I can easily take advantage of specific functionality provided by the resource manager.*
    - *As a developer, I want to extend the `nbclient` application to use a `KernelManager` that can leverage remote kernels spawned from Enterprise Gateway.*
    - *As a developer, I want to easily integrate the ability to launch remote kernels with existing platforms, so I can leverage my compute cluster in a customizable way.*
    - *As a developer, I am currently using Golang and need to implement a kernel launcher to allow the Go kernel I use to run remotely in my Kubernetes cluster.*
    - *As a developer, I'd like to extend some of the kernel container images and, eventually, create my own to better enable the data scientists I support.*
    - *As a developer, I need want to author my own Kernel-as-a-Service application.*

.. toctree::
   :maxdepth: 1
   :name: developers

   dev-process-proxy
   kernel-launcher
   kernel-specification
   custom-images
   kernel-library
   kernel-manager
   rest-api


================================================
FILE: docs/source/developers/kernel-launcher.md
================================================
# Implementing a kernel launcher

A new implementation for a [_kernel launcher_](../contributors/system-architecture.md#kernel-launchers) becomes necessary when you want to introduce another kind of kernel to an existing configuration. Out of the box, Enterprise Gateway provides [kernel launchers](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/kernel-launchers) that support the IPython kernel, the Apache Toree scala kernel, and the R kernel - IRKernel. There are other "language-agnostic kernel launchers" provided by Enterprise Gateway, but those are used in container environments to start the container or pod where the "kernel image" uses on the three _language-based_ launchers to start the kernel within the container.

Its generally recommended that the launcher be written in the language of the kernel, but that is not a requirement so long as the launcher can start and manage the kernel's lifecycle and issue interrupts (if the kernel does not support message-based interrupts itself).

To reiterate, the four tasks of a kernel launcher are:

1. Create the necessary connection information based on the 5 zero-mq ports, a signature key and algorithm specifier, along with a _gateway listener_ socket.
1. Conveyance of the connection (and listener socket) information back to the Enterprise Gateway process after encrypting the information using AES, then encrypting the AES key using the provided public key.
1. Invocation of the target kernel.
1. Listen for interrupt and shutdown requests from Enterprise Gateway on the communication socket and carry out the action when appropriate.

## Creating the connection information

If your target kernel exists, then there is probably support for creating ZeroMQ ports. If this proves difficult, you may be able to take a _hybrid approach_ where the connection information, encryption and listener portion of things is implemented in Python, while invocation takes place in the native language. This is how the [R kernel-launcher](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/kernel-launchers/R/scripts) support is implemented.

When creating the connection information, your kernel launcher should handle the possibility that the `--port-range` option has been specified such that each port should reside within the specified range.

The port used between Enterprise Gateway and the launcher, known as the _communication port_ should also adhere to the port range. It is not required that this port be ZeroMQ (and is not a ZMQ port in existing implementations).

## Encrypting the connection information

The next task of the kernel launcher is sending the connection information back to the Enterprise Gateway server. Prior to doing this, the connection information, including the communication port, are encrypted using AES encryption and a 16-byte key. The AES key is then encrypted using the public key specified in the `public_key` parameter. These two fields (the AES-encrypted payload and the publice-key-encrypted AES key) are then included into a JSON structure that also include the launcher's version information and base64 encoded. Here's such an example from the [Python kernel launcher](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/python/scripts/launch_ipykernel.py#L207).

The payload is then [sent back on a socket](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/python/scripts/launch_ipykernel.py#L235) identified by the `--response-address` option.

## Invoking the target kernel

For the R kernel launcher, the kernel is started using [`IRKernel::main()`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/R/scripts/launch_IRkernel.R#L256) after the `SparkContext` is initialized based on the `spark-context-initialization-mode` parameter.

The scala kernel launcher works similarly in that the Apache Toree kernel provides an ["entrypoint" to start the kernel](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/scala/toree-launcher/src/main/scala/launcher/ToreeLauncher.scala#L315), however, because the Toree kernel initializes a `SparkContext` itself, the need to do so is conveyed directly to the kernel.

For the Python kernel launcher, it creates a namespace instance that contains the `SparkContext` information, if requested to do so via the `spark-context-initialization-mode` parameter, instantiates an `IPKernelApp` instance using the configured namespace, then calls the [`start()`](https://github.com/ipython/ipykernel/blob/6f448d280dadbff7245f4b28b5e210c899d79342/ipykernel/kernelapp.py#L694) method.

### Invoking subclasses of `ipykernel.kernelbase.Kernel`

Because the python kernel launcher uses `IPKernelApp`, support for any subclass of `ipykernel.kernelbase.Kernel` can be launched by EG's Python kernel launcher.

To specify an alternate subclass, add `--kernel-class-name` (along with the specified dotted class string) to the `kernel.json` file's `argv` stanza. EG's Python launcher will import that class and pass it as a parameter to `IPKernelApp.initialize()`.

Here's an example `kernel.json` file that launches the "echo" kernel using the `DistributedProcessProxy`:

```JSON
{
  "display_name": "Echo",
  "language": "text",
  "metadata": {
    "process_proxy": {
      "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy"
    }
  },
  "argv": [
    "python",
    "/usr/local/share/jupyter/kernels/echo/scripts/launch_ipykernel.py",
    "--RemoteProcessProxy.kernel-id",
    "{kernel_id}",
    "--RemoteProcessProxy.response-address",
    "{response_address}",
    "--RemoteProcessProxy.public-key",
    "{public_key}",
    "--RemoteProcessProxy.spark-context-initialization-mode",
    "none",
    "--kernel-class-name",
    "echo_kernel.kernel.EchoKernel"
  ]
}
```

```{admonition} Important!
The referenced `kernel-class-name` package must first be properly installed on all nodes where the associated process-proxy will run.
```

## Listening for interrupt and shutdown requests

The last task that must be performed by a kernel launcher is to listen on the communication port for work. There are currently two requests sent on the port, a signal event and a shutdown request.

The signal event is of the form `{"signum": n}` where the string `'signum'` indicates a signal event and `'n'` is an integer specifying the signal number to send to the kernel. Typically, the value of 'n' is `2` representing `SIGINT` and used to interrupt any current processing. As more kernels adopt a message-based interrupt approach, this will not be as common. Enterprise Gateway also uses this event to perform its `poll()` implementation by sending `{"signum": 0}`. Raising a signal of 0 to a process is common way to determine the process is still alive.

The event is a shutdown request. This is sent when the process proxy has typically terminated the kernel and it's just performing its final cleanup. The form of this request is `{"shutdown": 1}`. This is what instructs the launcher to abandon listening on the communication socket and to exit.

## Other parameters

Besides `--port-range`, `--public-key`, and `--response-address`, the kernel launcher needs to support `--kernel-id` that indicates the kernel's ID as known to the Gateway server. It should also tolerate the existence of `--spark-context-initialization-mode` but, unless applicable for Spark enviornments, should only support values of `"none"` for this option.


================================================
FILE: docs/source/developers/kernel-library.md
================================================
# Standalone Remote Kernel Execution

Remote kernels can be executed by using the `RemoteKernelManager` class directly. This enables running kernels using `ProcessProxy`s without requiring deployment of the Enterprise Gateway web application. This approach is also known as _Library Mode_.

This can be useful in niche situations, for example, using [nbconvert](https://nbconvert.readthedocs.io/) or [nbclient](https://nbclient.readthedocs.io/) to execute a kernel on a remote cluster.

Sample code using nbclient 0.2.0:

```python
import nbformat
from nbclient import NotebookClient
from enterprise_gateway.services.kernels.remotemanager import RemoteKernelManager

with open("my_notebook.ipynb") as fp:
    test_notebook = nbformat.read(fp, as_version=4)

client = NotebookClient(nb=test_notebook, kernel_manager_class=RemoteKernelManager)
client.execute(kernel_name='my_remote_kernel')
```

The above code will execute the notebook on a kernel named `my_remote_kernel` using its configured `ProcessProxy`.

Depending on the process proxy, the _hosting application_ (e.g., `nbclient`) will likely need to be configured to run on the same network as the remote kernel. So, for example, with Kubernetes, `nbclient` would need to be configured as a Kubernetes POD.


================================================
FILE: docs/source/developers/kernel-manager.md
================================================
# Using Jupyter Server's `GatewayKernelManager`

Another way to expose other Jupyter applications like `nbclient` or `papermill` to remote kernels is to use the [`GatewayKernelManager`](https://github.com/jupyter-server/jupyter_server/blob/745f5ba3f00280c1e1900326a7e08463d48a3912/jupyter_server/gateway/managers.py#L317) (and, implicitly, [`GatewayKernelClient`](https://github.com/jupyter-server/jupyter_server/blob/745f5ba3f00280c1e1900326a7e08463d48a3912/jupyter_server/gateway/managers.py#L562)) classes that are embedded in Jupyter Server.

These classes essentially emulate the lower level [`KernelManager`](https://github.com/jupyter/jupyter_client/blob/10decd25308c306b6005cbf271b96493824a83e8/jupyter_client/manager.py#L84) and [`KernelClient`](https://github.com/jupyter/jupyter_client/blob/10decd25308c306b6005cbf271b96493824a83e8/jupyter_client/client.py#L75) classes but _forward_ their requests to/from a configured gateway server. Their necessary configuration for interacting with the gateway server is set on the [`GatewayClient` configurable](../users/client-config.md#gateway-client-configuration).

This allows for the _hosting application_ to remain **outside** the resource-managed cluster since the kernel is actually being managed by the target gateway server.

So, using the previous example, one my have...

```python
import nbformat
from nbclient import NotebookClient
from jupyter_server.gateway.gateway_client import GatewayClient
from jupyter_server.gateway.managers import GatewayKernelManager

with open("my_notebook.ipynb") as fp:
    test_notebook = nbformat.read(fp, as_version=4)

# Set any other gateway-specific parameters on the GatewayClient (singleton) instance
gw_client = GatewayClient.instance()
gw_client.url = "http://my-gateway-server.com:8888"

client = NotebookClient(nb=test_notebook, kernel_manager_class=GatewayKernelManager)
client.execute(kernel_name='my_remote_kernel')
```

In this case, `my_remote_kernel`'s kernel specification file actually resides on the Gateway server. `NotebookClient` will _think_ its talking to local `KernelManager` and `KernelClient` instances, when, in actuality, they are forwarding requests to (and getting response from) the Gateway server at `http://my-gateway-server.com:8888`.


================================================
FILE: docs/source/developers/kernel-specification.md
================================================
# Implementing a kernel specification

If you find yourself [implementing a kernel launcher](kernel-launcher.md), you'll need a way to make that kernel and kernel launcher available to applications. This is accomplished via the _kernel specification_ or _kernelspec_.

Kernelspecs reside in well-known directories. For Enterprise Gateway, we generally recommend they reside in `/usr/local/share/jupyter/kernels` where each entry in this directory is a directory representing the name of the kernel. The kernel specification is represented by the file `kernel.json`, the contents of which essentially indicate what environment variables should be present in the kernel process (via the `env` _stanza_) and which command (and arguments) should be issued to start the kernel process (via the `argv` _stanza_). The JSON also includes a `metadata` stanza that contains the process_proxy configuration, along with which process proxy class to instantiate to help manage the kernel process's lifecycle.

One approach the sample Enterprise Gateway kernel specifications take is to include a shell script that actually issues the `spark-submit` request. It is this shell script (typically named `run.sh`) that is referenced in the `argv` stanza.

Here's an example from the [`spark_python_yarn_cluster`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_cluster/kernel.json) kernel specification:

```JSON
{
  "language": "python",
  "display_name": "Spark - Python (YARN Cluster Mode)",
  "metadata": {
    "process_proxy": {
      "class_name": "enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy"
    },
    "debugger": true
  },
  "env": {
    "SPARK_HOME": "/usr/hdp/current/spark2-client",
    "PYSPARK_PYTHON": "/opt/conda/bin/python",
    "PYTHONPATH": "${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip",
    "SPARK_OPTS": "--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.appMasterEnv.PYTHONUSERBASE=/home/${KERNEL_USERNAME}/.local --conf spark.yarn.appMasterEnv.PYTHONPATH=${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip --conf spark.yarn.appMasterEnv.PATH=/opt/conda/bin:$PATH ${KERNEL_EXTRA_SPARK_OPTS}",
    "LAUNCH_OPTS": ""
  },
  "argv": [
    "/usr/local/share/jupyter/kernels/spark_python_yarn_cluster/bin/run.sh",
    "--RemoteProcessProxy.kernel-id",
    "{kernel_id}",
    "--RemoteProcessProxy.response-address",
    "{response_address}",
    "--RemoteProcessProxy.public-key",
    "{public_key}",
    "--RemoteProcessProxy.port-range",
    "{port_range}",
    "--RemoteProcessProxy.spark-context-initialization-mode",
    "lazy"
  ]
}
```

where [`run.sh`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_cluster/bin/run.sh) issues `spark-submit` specifying the kernel launcher as the "application":

```bash
eval exec \
     "${SPARK_HOME}/bin/spark-submit" \
     "${SPARK_OPTS}" \
     "${IMPERSONATION_OPTS}" \
     "${PROG_HOME}/scripts/launch_ipykernel.py" \
     "${LAUNCH_OPTS}" \
     "$@"
```

For container-based environments, the `argv` may instead reference a script that is meant to create the container pod (for Kubernetes). For these, we use a [template file](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2) that operators can adjust to meet the needs of their environment. Here's how that `kernel.json` looks:

```json
{
  "language": "python",
  "display_name": "Python on Kubernetes",
  "metadata": {
    "process_proxy": {
      "class_name": "enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy",
      "config": {
        "image_name": "elyra/kernel-py:VERSION"
      }
    },
    "debugger": true
  },
  "env": {},
  "argv": [
    "python",
    "/usr/local/share/jupyter/kernels/python_kubernetes/scripts/launch_kubernetes.py",
    "--RemoteProcessProxy.kernel-id",
    "{kernel_id}",
    "--RemoteProcessProxy.port-range",
    "{port_range}",
    "--RemoteProcessProxy.response-address",
    "{response_address}",
    "--RemoteProcessProxy.public-key",
    "{public_key}"
  ]
}
```

When using the `launch_ipykernel` launcher (aka the Python kernel launcher), subclasses of `ipykernel.kernelbase.Kernel` can be launched. By default, this launcher uses the classname `"ipykernel.ipkernel.IPythonKernel"`, but other subclasses of `ipykernel.kernelbase.Kernel` can be specified by adding a `--kernel-class-name` parameter to the `argv` stanza. See [Invoking subclasses of `ipykernel.kernelbase.Kernel`](kernel-launcher.md#invoking-subclasses-of-ipykernelkernelbasekernel) for more information.

As should be evident, kernel specifications are highly tuned to the runtime environment so your needs may be different, but _should_ resemble the approaches we've taken so far.


================================================
FILE: docs/source/developers/rest-api.rst
================================================
Using the REST API
===============================

The REST API is used to author new applications that need to interact with
Enterprise Gateway.  Generally speaking, only the ``/api/kernels`` and
``/api/kernelspecs`` endpoints are used.  The ``/api/sessions`` endpoint *can*
be used to manage a kernel's lifecycle, but it is not necessary.  For example,
while the Jupyter Notebook and JupyterLab applications start kernels using
``/api/sessions``, the only interaction they perform with Enterprise Gateway is
via the ``/api/kernelspecs`` to retrieve a list of available kernel
specifications, and ``/api/kernels`` to start, stop, interrupt and restart a
kernel.  The "session" remains on the client.

General sequence
----------------
Here's the general sequence of events to implement a REST-based application to *discover*, *start*, *execute code*, *interrupt*, and *shutdown* a kernel.  To demonstrate each call, we'll use `curl` against a running Enterprise Gateway server at ``http://my-gateway-server.com:8888``.

Kernel discovery
~~~~~~~~~~~~~~~~
Issue a `GET` request against the ``/api/kernelspecs`` endpoint to discover
available kernel specifications. Each entry corresponds to a ``kernel.json``
file located in a directory that corresponds to the kernel's name.  This *name*
is what will be used in the subsequent start request.

The response is a JSON object where the ``default`` is a string specifying the
name of the default kernel.  This kernel specification will be used if the
start request (e.g., ``POST /api/kernels``) does not specify a kernel name in
its JSON body.

The other key in the response is `kernelspecs` and consists of a JSON indexed
by kernel name with a value corresponding to the corresponding ``kernel.json``
in addition to any *resources* associated with the kernel.  These are typically
the icon filenames to be used by the front-end application.

.. code-block:: console

    curl http://my-gateway-server.com:8888/api/kernelspecs

.. raw:: html

   <details>
   <summary><a><span style="font-family:'Courier New'">GET /api/kernelspecs</span> response</a></summary>

.. code-block:: json

    {
      "default": "python3",
      "kernelspecs": {
        "python3": {
          "name": "python3",
          "spec": {
            "argv": [
              "/usr/bin/env",
              "/opt/anaconda2/envs/py3/bin/python",
              "-m",
              "ipykernel_launcher",
              "-f",
              "{connection_file}"
            ],
            "display_name": "Python 3",
            "language": "python",
            "interrupt_mode": "signal",
            "metadata": {}
          },
          "resources": {
            "logo-32x32": "/kernelspecs/python3/logo-32x32.png",
            "logo-64x64": "/kernelspecs/python3/logo-64x64.png"
          }
        },
        "ir": {
          "name": "ir",
          "spec": {
            "argv": [
              "R",
              "--slave",
              "-e",
              "IRkernel::main()",
              "--args",
              "{connection_file}"
            ],
            "env": {},
            "display_name": "R",
            "language": "R",
            "interrupt_mode": "signal",
            "metadata": {}
          },
          "resources": {
            "kernel.js": "/kernelspecs/ir/kernel.js",
            "logo-64x64": "/kernelspecs/ir/logo-64x64.png"
          }
        },
        "spark_r_yarn_client": {
          "name": "spark_r_yarn_client",
          "spec": {
            "argv": [
              "/usr/local/share/jupyter/kernels/spark_R_yarn_client/bin/run.sh",
              "--RemoteProcessProxy.kernel-id",
              "{kernel_id}",
              "--RemoteProcessProxy.response-address",
              "{response_address}",
              "--RemoteProcessProxy.public-key",
              "{public_key}",
              "--RemoteProcessProxy.port-range",
              "{port_range}",
              "--RemoteProcessProxy.spark-context-initialization-mode",
              "lazy"
            ],
            "env": {
              "SPARK_HOME": "/usr/hdp/current/spark2-client",
              "SPARK_OPTS": "--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.sparkr.r.command=/opt/conda/lib/R/bin/Rscript ${KERNEL_EXTRA_SPARK_OPTS}",
              "LAUNCH_OPTS": ""
            },
            "display_name": "Spark - R (YARN Client Mode)",
            "language": "R",
            "interrupt_mode": "signal",
            "metadata": {
              "process_proxy": {
                "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy"
              }
            }
          },
          "resources": {
            "kernel.js": "/kernelspecs/spark_r_yarn_client/kernel.js",
            "logo-64x64": "/kernelspecs/spark_r_yarn_client/logo-64x64.png"
          }
        },
        "spark_r_yarn_cluster": {
          "name": "spark_r_yarn_cluster",
          "spec": {
            "argv": [
              "/usr/local/share/jupyter/kernels/spark_R_yarn_cluster/bin/run.sh",
              "--RemoteProcessProxy.kernel-id",
              "{kernel_id}",
              "--RemoteProcessProxy.response-address",
              "{response_address}",
              "--RemoteProcessProxy.public-key",
              "{public_key}",
              "--RemoteProcessProxy.port-range",
              "{port_range}",
              "--RemoteProcessProxy.spark-context-initialization-mode",
              "eager"
            ],
            "env": {
              "SPARK_HOME": "/usr/hdp/current/spark2-client",
              "SPARK_OPTS": "--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.am.waitTime=1d --conf spark.yarn.appMasterEnv.PATH=/opt/conda/bin:$PATH --conf spark.sparkr.r.command=/opt/conda/lib/R/bin/Rscript ${KERNEL_EXTRA_SPARK_OPTS}",
              "LAUNCH_OPTS": ""
            },
            "display_name": "Spark - R (YARN Cluster Mode)",
            "language": "R",
            "interrupt_mode": "signal",
            "metadata": {
              "process_proxy": {
                "class_name": "enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy"
              }
            }
          },
          "resources": {
            "kernel.js": "/kernelspecs/spark_r_yarn_cluster/kernel.js",
            "logo-64x64": "/kernelspecs/spark_r_yarn_cluster/logo-64x64.png"
          }
        },
        "spark_python_yarn_client": {
          "name": "spark_python_yarn_client",
          "spec": {
            "argv": [
              "/usr/local/share/jupyter/kernels/spark_python_yarn_client/bin/run.sh",
              "--RemoteProcessProxy.kernel-id",
              "{kernel_id}",
              "--RemoteProcessProxy.response-address",
              "{response_address}",
              "--RemoteProcessProxy.public-key",
              "{public_key}",
              "--RemoteProcessProxy.port-range",
              "{port_range}",
              "--RemoteProcessProxy.spark-context-initialization-mode",
              "lazy"
            ],
            "env": {
              "SPARK_HOME": "/usr/hdp/current/spark2-client",
              "PYSPARK_PYTHON": "/opt/conda/bin/python",
              "PYTHONPATH": "${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip",
              "SPARK_OPTS": "--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} ${KERNEL_EXTRA_SPARK_OPTS}",
              "LAUNCH_OPTS": ""
            },
            "display_name": "Spark - Python (YARN Client Mode)",
            "language": "python",
            "interrupt_mode": "signal",
            "metadata": {
              "process_proxy": {
                "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy"
              },
              "debugger": true
            }
          },
          "resources": {
            "logo-64x64": "/kernelspecs/spark_python_yarn_client/logo-64x64.png"
          }
        },
        "spark_python_yarn_cluster": {
          "name": "spark_python_yarn_cluster",
          "spec": {
            "argv": [
              "/usr/local/share/jupyter/kernels/spark_python_yarn_cluster/bin/run.sh",
              "--RemoteProcessProxy.kernel-id",
              "{kernel_id}",
              "--RemoteProcessProxy.response-address",
              "{response_address}",
              "--RemoteProcessProxy.public-key",
              "{public_key}",
              "--RemoteProcessProxy.port-range",
              "{port_range}",
              "--RemoteProcessProxy.spark-context-initialization-mode",
              "lazy"
            ],
            "env": {
              "SPARK_HOME": "/usr/hdp/current/spark2-client",
              "PYSPARK_PYTHON": "/opt/conda/bin/python",
              "PYTHONPATH": "${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip",
              "SPARK_OPTS": "--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.appMasterEnv.PYTHONUSERBASE=/home/${KERNEL_USERNAME}/.local --conf spark.yarn.appMasterEnv.PYTHONPATH=${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip --conf spark.yarn.appMasterEnv.PATH=/opt/conda/bin:$PATH ${KERNEL_EXTRA_SPARK_OPTS}",
              "LAUNCH_OPTS": ""
            },
            "display_name": "Spark - Python (YARN Cluster Mode)",
            "language": "python",
            "interrupt_mode": "signal",
            "metadata": {
              "process_proxy": {
                "class_name": "enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy"
              },
              "debugger": true
            }
          },
          "resources": {
            "logo-64x64": "/kernelspecs/spark_python_yarn_cluster/logo-64x64.png"
          }
        },
        "spark_scala_yarn_client": {
          "name": "spark_scala_yarn_client",
          "spec": {
            "argv": [
              "/usr/local/share/jupyter/kernels/spark_scala_yarn_client/bin/run.sh",
              "--RemoteProcessProxy.kernel-id",
              "{kernel_id}",
              "--RemoteProcessProxy.response-address",
              "{response_address}",
              "--RemoteProcessProxy.public-key",
              "{public_key}",
              "--RemoteProcessProxy.port-range",
              "{port_range}",
              "--RemoteProcessProxy.spark-context-initialization-mode",
              "lazy"
            ],
            "env": {
              "SPARK_HOME": "/usr/hdp/current/spark2-client",
              "__TOREE_SPARK_OPTS__": "--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} ${KERNEL_EXTRA_SPARK_OPTS}",
              "__TOREE_OPTS__": "--alternate-sigint USR2",
              "LAUNCH_OPTS": "",
              "DEFAULT_INTERPRETER": "Scala"
            },
            "display_name": "Spark - Scala (YARN Client Mode)",
            "language": "scala",
            "interrupt_mode": "signal",
            "metadata": {
              "process_proxy": {
                "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy"
              }
            }
          },
          "resources": {
            "logo-64x64": "/kernelspecs/spark_scala_yarn_client/logo-64x64.png"
          }
        },
        "spark_scala_yarn_cluster": {
          "name": "spark_scala_yarn_cluster",
          "spec": {
            "argv": [
              "/usr/local/share/jupyter/kernels/spark_scala_yarn_cluster/bin/run.sh",
              "--RemoteProcessProxy.kernel-id",
              "{kernel_id}",
              "--RemoteProcessProxy.response-address",
              "{response_address}",
              "--RemoteProcessProxy.public-key",
              "{public_key}",
              "--RemoteProcessProxy.port-range",
              "{port_range}",
              "--RemoteProcessProxy.spark-context-initialization-mode",
              "lazy"
            ],
            "env": {
              "SPARK_HOME": "/usr/hdp/current/spark2-client",
              "__TOREE_SPARK_OPTS__": "--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.am.waitTime=1d ${KERNEL_EXTRA_SPARK_OPTS}",
              "__TOREE_OPTS__": "--alternate-sigint USR2",
              "LAUNCH_OPTS": "",
              "DEFAULT_INTERPRETER": "Scala"
            },
            "display_name": "Spark - Scala (YARN Cluster Mode)",
            "language": "scala",
            "interrupt_mode": "signal",
            "metadata": {
              "process_proxy": {
                "class_name": "enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy"
              }
            }
          },
          "resources": {
            "logo-64x64": "/kernelspecs/spark_scala_yarn_cluster/logo-64x64.png"
          }
        }
      }
    }

.. raw:: html

   </details>

Kernel start
~~~~~~~~~~~~~~~~
A kernel is started by issuing a ``POST`` request against the ``/api/kernels``
endpoint.  The JSON body can take a ``name``, indicating the kernel to start,
and an ``env`` JSON, corresponding to environment variables to set in the
kernel's environment.

In this example, we will start the ``spark_python_yarn_cluster`` kernel with a ``KERNEL_USERNAME`` environment variable of ``jovyan``.

.. code-block:: console

    curl -X POST -i 'http://my-gateway-server.com:8888/api/kernels' --data '{ "name": "spark_python_yarn_cluster", "env": { "KERNEL_USERNAME": "jovyan" }}'

.. raw:: html

   <details>
   <summary><a><span style="font-family:'Courier New'">POST /api/kernels</span> response</a></summary>

.. code-block:: json

    {
      "id": "f88bdc84-04c6-4021-963d-6811a61eca18",
      "name": "spark_python_yarn_cluster",
      "last_activity": "2022-02-12T00:40:45.080107Z",
      "execution_state": "starting",
      "connections": 0
    }

.. raw:: html

   </details>

Kernel code execution
~~~~~~~~~~~~~~~~~~~~~
Upgrading the connection to a websocket and issuing code against that websocket is currently beyond the knowledge of our maintainers.  For this aspect of this discussion we will refer you to our Python `GatewayClient class <https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/client/gateway_client.py#L20>`_ that we use in our integration tests.

.. note::

   The name ``GatewayClient`` in our ``enterprise_gateway/client`` subdirectory is not to be confused with the ``GatewayClient`` class defined in the client applications in Jupyter Server and Notebook.  In addition, the internal test class ``KernelClient`` is not to be confused with the ``KernelClient`` that lives in the ``jupyter_client`` package.

Kernel interrupt
~~~~~~~~~~~~~~~~
A kernel is interrupted by issuing a ``POST`` request against the ``/api/kernels/<kernel_id>/interrupt`` endpoint.

In this example, we will interrupt the ``spark_python_yarn_cluster`` kernel with ID ``f88bdc84-04c6-4021-963d-6811a61eca18`` that was started previously.

.. note::

   Restarting a kernel is nearly identical to interrupting a kernel; just replace ``interrupt`` in the endpoint with ``restart``.

.. code-block:: console

    curl -X POST -i 'http://ymy-gateway-server.com:8888/api/kernels/f88bdc84-04c6-4021-963d-6811a61eca18/interrupt'

An expected response of ``Status Code`` equal ``204`` (No Content) is returned.  (The expected response for ``restart`` is ``200`` (OK).)


Kernel shutdown
~~~~~~~~~~~~~~~~
A kernel is shutdown by issuing a ``DELETE`` request against the ``/api/kernels/<kernel_id>`` endpoint.

In this example, we will shutdown the ``spark_python_yarn_cluster`` kernel with ID ``f88bdc84-04c6-4021-963d-6811a61eca18`` that was started previously.

.. code-block:: console

    curl -X DELETE -i 'http://my-gateway-server.com:8888/api/kernels/f88bdc84-04c6-4021-963d-6811a61eca18'

An expected response of ``Status Code`` equal ``204`` (No Content) is returned.

OpenAPI Specification
~~~~~~~~~~~~~~~~~~~~~
Here's the current `OpenAPI <https://www.openapis.org/>`_ specification available from Enterprise Gateway.  An interactive version is available `here <https://petstore.swagger.io/?url=https://raw.githubusercontent.com/jupyter-server/enterprise_gateway/main/enterprise_gateway/services/api/swagger.yaml>`_.

.. openapi:: ../../../enterprise_gateway/services/api/swagger.yaml


================================================
FILE: docs/source/index.rst
================================================
Welcome to Jupyter Enterprise Gateway!
======================================
Jupyter Enterprise Gateway is a headless web server with a pluggable framework
for anyone supporting multiple notebook users in a managed-cluster environment.
Some of the core functionality it provides is better optimization of compute
resources, improved multi-user support, and more granular security for your
Jupyter notebook environment - making it suitable for enterprise, scientific,
and academic implementations.

From a technical perspective, Jupyter Enterprise Gateway is a web server that enables the ability to
launch kernels on behalf of remote notebooks. This leads to better resource
management, as the web server is no longer the single location for kernel activity.  It essentially exposes a *Kernel as a Service* model.

By default, the Jupyter framework runs kernels locally - potentially exhausting the server of resources. By leveraging the functionality of the
underlying resource management applications like Hadoop YARN, Kubernetes, and others, Jupyter Enterprise Gateway
distributes kernels across the compute cluster, dramatically increasing the number of simultaneously active kernels while leveraging the available compute resources.

.. figure:: images/Scalability-After-JEG.gif
   :align: center

Kernel Gateway vs. Enterprise Gateway
-------------------------------------
Jupyter Enterprise Gateway was formerly built directly on Jupyter Kernel
Gateway.  At that time, it had complete feature parity with Kernel Gateway.
However, in order to address various roadmap items, Enterprise Gateway removed
its dependency on Kernel Gateway, so now the question arises, when does one
choose Enterprise Gateway over Kernel Gateway?

Use Enterprise Gateway if...
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1. You have a large compute cluster consisting of limited resources (GPUs, large memory, etc) and users require those resources from notebooks
2. You have large numbers of users requiring access to a shared compute cluster
3. You require some amount of High Availability/Disaster Recovery such that another Gateway server can be spun up to service existing (and remote) kernels

Use Kernel Gateway if...
~~~~~~~~~~~~~~~~~~~~~~~~
1. You have a small user pool where the resources of the Gateway server can be shared amongst those users (no remote kernel support)
2. You wish to configured the `notebook-http mode <https://jupyter-kernel-gateway.readthedocs.io/en/latest/http-mode.html>`_ functionality where a specific Notebook provides HTTP endpoints

Who's this for?
---------------
Jupyter Enterprise Gateway is a highly technical piece of the Jupyter Stack, so we've separated documentation to help specific personas:

1. `Users <users/index.html>`_: people using Jupyter web applications that wish to connect to an Enterprise Gateway instance.
2. `Operators <operators/index.html>`_: people deploying or serving Jupyter Enterprise Gateway to others.
3. `Developers <developers/index.html>`_: people writing applications or deploying kernels for other resource managers.
4. `Contributors <contributors/index.html>`_: people contributing directly to the Jupyter Enterprise Gateway project.

If you find gaps in our documentation, please open an issue (or better yet, a pull request) on the Jupyter Enterprise Gateway `Github repo <https://github.com/jupyter-server/enterprise_gateway>`_.

Table of Contents
-----------------

.. toctree::
   :maxdepth: 2

   Users <users/index>
   Operators <operators/index>
   Developers <developers/index>
   Contributors <contributors/index>
   Other <other/index>


================================================
FILE: docs/source/operators/config-add-env.md
================================================
# Additional environment variables

Besides those environment variables associated with configurable options, the following environment variables can also be used to influence functionality:

```text
  EG_DEFAULT_KERNEL_SERVICE_ACCOUNT_NAME=default
    Kubernetes only.  This value indicates the default service account name to use for
    kernel namespaces when the Enterprise Gateway needs to create the kernel's namespace
    and KERNEL_SERVICE_ACCOUNT_NAME has not been provided.

  EG_DOCKER_NETWORK=enterprise-gateway or bridge
    Docker only. Used by the docker deployment and launch scripts, this indicates the
    name of the docker network docker network to use.  The start scripts default this
    value to 'enterprise-gateway' because they create the network.  The docker kernel
    launcher (launch_docker.py) defaults this value to 'bridge' only in cases where it
    wasn't previously set by the deployment script.

  EG_ENABLE_TUNNELING=False
    Indicates whether tunneling (via ssh) of the kernel and communication ports
    is enabled (True) or not (False).

  EG_KERNEL_CLUSTER_ROLE=kernel-controller or cluster-admin
    Kubernetes only.  The role to use when binding with the kernel service account.
    The eg-clusterrole.yaml file creates the cluster role 'kernel-controller'
    and conveys that name via EG_KERNEL_CLUSTER_ROLE.  Should the deployment script
    not set this valuem, Enterprise Gateway will then use 'cluster-admin'.  It is
    recommended this value be set to something other than 'cluster-admin'.

  EG_KERNEL_LAUNCH_TIMEOUT=30
    The time (in seconds) Enterprise Gateway will wait for a kernel's startup
    completion status before deeming the startup a failure, at which time a second
    startup attempt will take place.  If a second timeout occurs, Enterprise
    Gateway will report a failure to the client.

  EG_KERNEL_INFO_TIMEOUT=60
    The time (in seconds) Enterprise Gateway will wait for kernel info response
    before deeming the request a failure.

  EG_SENSITIVE_ENV_KEYS=""
    A comma separated list (e.g. "secret,pwd,auth") of sensitive environment
    variables. Any environment variables that contain any of the words from this
    list will have their values as EG_REDACTION_MASK whenever logged.

  EG_REDACTION_MASK=********
    The redaction mask used if EG_SENSITIVE_ENV_KEYS is set. Sensitive environment
    variables will be logged as this redaction mask instead.

  EG_KERNEL_LOG_DIR=/tmp
    The directory used during remote kernel launches of DistributedProcessProxy
    kernels.  Files in this directory will be of the form kernel-<kernel_id>.log.

  EG_KERNEL_SESSION_PERSISTENCE=False
    **Experimental** Enables kernel session persistence.  Currently, this is purely
    experiemental and writes kernel session information to a local file.  Should
    Enterprise Gateway terminate with running kernels, a subsequent restart of
    Enterprise Gateway will attempt to reconnect to the persisted kernels.  See
    also EG_KERNEL_SESSION_LOCATION and --KernelSessionManager.enable_persistence.

  EG_KERNEL_SESSION_LOCATION=<JupyterDataDir>
    **Experimental** The location in which the kernel session information is persisted.
    By default, this is located in the configured JupyterDataDir.  See also
    EG_KERNEL_SESSION_PERSISTENCE.

  EG_MAX_PORT_RANGE_RETRIES=5
    The number of attempts made to locate an available port within the specified
    port range.  Only applies when --EnterpriseGatewayApp.port_range
    (or EG_PORT_RANGE) has been specified or is in use for the given kernel.

  EG_MIN_PORT_RANGE_SIZE=1000
    The minimum port range size permitted when --EnterpriseGatewayApp.port_range
    (or EG_PORT_RANGE) is specified or is in use for the given kernel.  Port ranges
    reflecting smaller sizes will result in a failure to launch the corresponding
    kernel (since port-range can be specified within individual kernel specifications).

  EG_MIRROR_WORKING_DIRS=False
    Containers only.  If True, kernel creation requests that specify KERNEL_WORKING_DIR
    will set the kernel container's working directory to that value.  See also
    KERNEL_WORKING_DIR.

  EG_NAMESPACE=enterprise-gateway or default
    Kubernetes only.  Used during Kubernetes deployment, this indicates the name of
    the namespace in which the Enterprise Gateway service is deployed.  The
    namespace is created prior to deployment, and is set into the EG_NAMESPACE env via
    deployment.yaml script. This value is then used within Enterprise Gateway to coordinate
    kernel configurations. Should this value not be set during deployment, Enterprise
    Gateway will default its value to namespace 'default'.

  EG_PROHIBITED_GIDS=0
    Containers only.  A comma-separated list of group ids (GID) whose values are not
    allowed to be referenced by KERNEL_GID.  This defaults to the root group id (0).
    Attempts to launch a kernel where KERNEL_GID's value is in this list will result
    in an exception indicating error 403 (Forbidden).  See also EG_PROHIBITED_UIDS.

  EG_PROHIBITED_LOCAL_IPS=''
    A comma-separated list of local IPv4 addresses (or regular expressions) that
    should not be used when determining the response address used to convey connection
    information back to Enterprise Gateway from a remote kernel.  In some cases, other
    network interfaces (e.g., docker with 172.17.0.*) can interfere - leading to
    connection failures during kernel startup.
    Example: EG_PROHIBITED_LOCAL_IPS=172.17.0.*,192.168.0.27 will eliminate the use of
    all addresses in 172.17.0 as well as 192.168.0.27

  EG_PROHIBITED_UIDS=0
    Containers only.  A comma-separated list of user ids (UID) whose values are not
    allowed to be referenced by KERNEL_UID.  This defaults to the root user id (0).
    Attempts to launch a kernel where KERNEL_UID's value is in this list will result
    in an exception indicating error 403 (Forbidden).  See also EG_PROHIBITED_GIDS.

  EG_RESPONSE_IP=None
    Experimental.  The IP address to use to formulate the response address (with
    `EG_RESPONSE_PORT`).  By default, the server's IP is used.  However, we may find
    it necessary to use a different IP in cases where the target kernels are external
    to the Enterprise Gateway server (for example).  It's value may also need to be
    set in cases where the computed (default) is not correct for the current topology.

  EG_RESPONSE_PORT=8877
    The single response port used to receive connection information
    from launched kernels.

  EG_RESPONSE_PORT_RETRIES=10
    The number of retries to attempt when the original response port
    (EG_RESPONSE_PORT) is found to be in-use.  This value should be
    set to 0 (zero) if no port retries are desired.

  EG_SHARED_NAMESPACE=False
    Kubernetes only. This value indicates whether (True) or not (False) all kernel pods
    should reside in the same namespace as Enterprise Gateway.  This is not a recommended
    configuration.

  EG_SSH_PORT=22
    The port number used for ssh operations for installations choosing to
    configure the ssh server on a port other than the default 22.

  EG_REMOTE_PWD=None
    The password to use to ssh to remote hosts

  EG_REMOTE_USER=None
    The username to use when connecting to remote hosts (default to `getpass.getuser()`
    when not set).

  EG_REMOTE_GSS_SSH=False
    Use gss instead of EG_REMOTE_USER and EG_REMOTE_PWD to connect to remote host via SSH.
    Case insensitive. 'True' to enable, 'False', '' or unset to disable.
    Any other value will error.

  EG_YARN_CERT_BUNDLE=<custom_truststore_path>
    The path to a .pem or any other custom truststore used as a CA bundle in
    yarn-api-client.

  EG_ZMQ_IO_THREADS=1
    The size of the ZMQ thread pool used to handle I/O operations.  Applies only to shared
    contexts which are enabled by default but can be specified via
    `RemoteMappingKernelManager.shared_context = True`.

  EG_ZMQ_MAX_SOCKETS=1023
    Specifies the maximum number of sockets to allow on the ZMQ context.  Applies only to
    shared contexts which are enabled by default but can be specified via
    `RemoteMappingKernelManager.shared_context = True`.
```


================================================
FILE: docs/source/operators/config-availability.md
================================================
# Availability modes

Enterprise Gateway can be optionally configured in one of two "availability modes": _standalone_ or _replication_. When configured, Enterprise Gateway can recover from failures and reconnect to any active remote kernels that were previously managed by the terminated EG instance. As such, both modes require that kernel session persistence also be enabled via `KernelSessionManager.enable_persistence=True`.

```{note}
Kernel session persistence will be automtically enabled whenever availability mode is configured.
```

```{caution}
**Availability modes and kernel session persistence should be considered experimental!**

Known issues include:
1. Culling configurations do not account for different nodes and therefore could result in the incorrect culling of kernels.
2. Each "node switch" requires a manual reconnect to the kernel.

We hope to address these in future releaases (depending on demand).
```

## Standalone availability

_Standalone availability_ assumes that, upon failure of the original EG instance, another EG instance will be started. Upon startup of the second instance (following the termination of the first), EG will attempt to load and reconnect to all kernels that were deemed active when the previous instance terminated. This mode is somewhat analogous to the classic HA/DR mode of _active-passive_ and is typically used when node resources are at a premium or the number of replicas (in the Kubernetes sense) must remain at 1.

To enable Enterprise Gateway for 'standalone' availability, configure `EnterpiseGatewayApp.availability_mode=standalone` or set env `EG_AVAILABILITY_MODE=standalone`.

Here's an example for starting Enterprise Gateway with standalone availability:

```bash
#!/bin/bash

LOG=/var/log/enterprise_gateway.log
PIDFILE=/var/run/enterprise_gateway.pid

jupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --log-level=DEBUG \
   --EnterpriseGatewayApp.availability_mode=standalone > $LOG 2>&1 &

if [ "$?" -eq 0 ]; then
  echo $! > $PIDFILE
else
  exit 1
fi
```

## Replication availability

With _replication availability_, multiple EG instances (or replicas) are operating at the same time, and fronted with some kind of reverse proxy or load balancer. Because state still resides within each `KernelManager` instance executing within a given EG instance, we strongly suggest configuring some form of _client affinity_ (a.k.a, "sticky session") to avoid node switches wherever possible since each node switch requires manual reconnection of the front-end (today).

```{tip}
Configuring client affinity is **strongly recommended**, otherwise functionality that relies on state within the servicing node (e.g., culling) can be affected upon node switches, resulting in incorrect behavior.
```

In this mode, when one node goes down, the subsequent request will be routed to a different node that doesn't know about the kernel. Prior to returning a `404` (not found) status code, EG will check its persisted store to determine if the kernel was managed and, if so, attempt to "hydrate" a `KernelManager` instance associated with the remote kernel. (Of course, if the kernel was running local to the downed server, chances are it cannot be _revived_.) Upon successful "hydration" the request continues as if on the originating node. Because _client affinity_ is in place, subsequent requests should continue to be routed to the "servicing node".

To enable Enterprise Gateway for 'replication' availability, configure `EnterpiseGatewayApp.availability_mode=replication` or set env `EG_AVAILABILITY_MODE=replication`.

```{attention}
To preserve backwards compatibility, if only kernel session persistence is enabled via `KernelSessionManager.enable_persistence=True`, the availability mode will be automatically configured to 'replication' if `EnterpiseGatewayApp.availability_mode` is not configured.
```

Here's an example for starting Enterprise Gateway with replication availability:

```bash
#!/bin/bash

LOG=/var/log/enterprise_gateway.log
PIDFILE=/var/run/enterprise_gateway.pid

jupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --log-level=DEBUG \
   --EnterpriseGatewayApp.availability_mode=replication > $LOG 2>&1 &

if [ "$?" -eq 0 ]; then
  echo $! > $PIDFILE
else
  exit 1
fi
```

# Kernel Session Persistence

Enabling kernel session persistence allows Jupyter Notebooks to reconnect to kernels when Enterprise Gateway is restarted and forms the basis for the _availability modes_ described above. Enterprise Gateway provides two ways of persisting kernel sessions: _File Kernel Session Persistence_ and _Webhook Kernel Session Persistence_, although others can be provided by subclassing `KernelSessionManager` (see below).

```{attention}
Due to its experimental nature, kernel session persistence is disabled by default. To enable this functionality, you must configure `KernelSessionManger.enable_persistence=True` or configure `EnterpriseGatewayApp.availability_mode` to either `standalone` or `replication`.
```

As noted above, the availability modes rely on the persisted information relative to the kernel. This information consists of the arguments and options used to launch the kernel, along with its connection information. In essence, it consists of any information necessary to re-establish communication with the kernel.

## File Kernel Session Persistence

File Kernel Session Persistence stores kernel sessions as files in a specified directory. To enable this form of persistence, set the environment variable `EG_KERNEL_SESSION_PERSISTENCE=True` or configure `FileKernelSessionManager.enable_persistence=True`. To change the directory in which the kernel session file is being saved, either set the environment variable `EG_PERSISTENCE_ROOT` or configure `FileKernelSessionManager.persistence_root` to the directory. By default, the directory used to store a given kernel's session information is the `JUPYTER_DATA_DIR`.

```{note}
Enterprise Gateway handles corrupted or invalid session files gracefully. If a persisted session file contains invalid JSON or cannot be read, the error is logged and that session is skipped rather than preventing Enterprise Gateway from starting.
```

```{note}
Because `FileKernelSessionManager` is the default class for kernel session persistence, configuring `EnterpriseGatewayApp.kernel_session_manager_class` to `enterprise_gateway.services.sessions.kernelsessionmanager.FileKernelSessionManager` is not necessary.
```

## Webhook Kernel Session Persistence

Webhook Kernel Session Persistence stores all kernel sessions to any database. In order for this to work, an API must be created. The API must include four endpoints:

- A `GET` that will retrieve a list of all kernel sessions from a database
- A `GET` that will take the kernel id as a path variable and retrieve that information from a database
- A `DELETE` that will delete all kernel sessions, where the body of the request is a list of kernel ids
- A `POST` that will take kernel id as a path variable and kernel session in the body of the request and save it to a database where the object being saved is:

```
    {
      kernel_id: UUID string,
      kernel_session: JSON
    }
```

To enable the webhook kernel session persistence, set the environment variable `EG_KERNEL_SESSION_PERSISTENCE=True` or configure `WebhookKernelSessionManager.enable_persistence=True`. To connect the API, set the environment variable `EG_WEBHOOK_URL` or configure `WebhookKernelSessionManager.webhook_url` to the API endpoint.

Because `WebhookKernelSessionManager` is not the default kernel session persistence class, an additional configuration step must be taken to instruct EG to use this class: `EnterpriseGatewayApp.kernel_session_manager_class = enterprise_gateway.services.sessions.kernelsessionmanager.WebhookKernelSessionManager`.

### Enabling Authentication

Enabling authentication is an option if the API requires it for requests. Set the environment variable `EG_AUTH_TYPE` or configure `WebhookKernelSessionManager.auth_type` to be either `Basic` or `Digest`. If it is set to an empty string authentication won't be enabled.

Then set the environment variables `EG_WEBHOOK_USERNAME` and `EG_WEBHOOK_PASSWORD` or configure `WebhookKernelSessionManager.webhook_username` and `WebhookKernelSessionManager.webhook_password` to provide the username and password for authentication.

## Bring Your Own Kernel Session Persistence

To introduce a different implementation, you must configure the kernel session manager class. Here's an example for starting Enterprise Gateway using a custom `KernelSessionManager` and 'standalone' availability. Note that setting `--MyCustomKernelSessionManager.enable_persistence=True` is not necessary because an availability mode is specified, but displayed here for completeness:

```bash
#!/bin/bash

LOG=/var/log/enterprise_gateway.log
PIDFILE=/var/run/enterprise_gateway.pid

jupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --log-level=DEBUG \
   --EnterpriseGatewayApp.kernel_session_manager_class=custom.package.MyCustomKernelSessionManager \
   --MyCustomKernelSessionManager.enable_persistence=True \
   --EnterpriseGatewayApp.availability_mode=standalone > $LOG 2>&1 &

if [ "$?" -eq 0 ]; then
  echo $! > $PIDFILE
else
  exit 1
fi
```

Alternative persistence implementations using SQL and NoSQL databases would be ideal and, as always, contributions are welcome!

## Testing Kernel Session Persistence

Once kernel session persistence has been enabled and configured, create a kernel by opening up a Jupyter Notebook. Save some variable in that notebook and shutdown Enterprise Gateway using `kill -9 PID`, where `PID` is the PID of gateway. Restart Enterprise Gateway and refresh you notebook tab. If all worked correctly, the variable should be loaded without the need to rerun the cell.

If you are using docker, ensure the container isn't tied to the PID of Enterprise Gateway. The container should still run after killing that PID.


================================================
FILE: docs/source/operators/config-cli.md
================================================
# Command-line options

In some cases, it may be easier to use command line options. These can also be used for _static_ values that should not be the targeted for [_dynamic configurables_](config-dynamic.md/#dynamic-configurables).

To see the same configuration options at the command line, run the following:

```bash
jupyter enterprisegateway --help-all
```

A snapshot of this help appears below for ease of reference. The options for the superclass `EnterpriseGatewayConfigMixin` have been omitted. As with the `--generate-config` option, each option includes its corresponding environment variable, if applicable.

```text
Jupyter Enterprise Gateway

Provisions remote Jupyter kernels and proxies HTTP/Websocket traffic to them.

Options
-------

Arguments that take values are actually convenience aliases to full
Configurables, whose aliases are listed on the help line. For more information
on full configurables, see '--help-all'.

--debug
    set log level to logging.DEBUG (maximize logging output)
--generate-config
    generate default config file
-y
    Answer yes to any questions instead of prompting.
--log-level=<Enum> (Application.log_level)
    Default: 30
    Choices: (0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL')
    Set the log level by value or name.
--config=<Unicode> (JupyterApp.config_file)
    Default: ''
    Full path of a config file.
--ip=<Unicode> (EnterpriseGatewayApp.ip)
    Default: '127.0.0.1'
    IP address on which to listen (EG_IP env var)
--port=<Int> (EnterpriseGatewayApp.port)
    Default: 8888
    Port on which to listen (EG_PORT env var)
--port_retries=<Int> (EnterpriseGatewayApp.port_retries)
    Default: 50
    Number of ports to try if the specified port is not available
    (EG_PORT_RETRIES env var)
--keyfile=<Unicode> (EnterpriseGatewayApp.keyfile)
    Default: None
    The full path to a private key file for usage with SSL/TLS. (EG_KEYFILE env
    var)
--certfile=<Unicode> (EnterpriseGatewayApp.certfile)
    Default: None
    The full path to an SSL/TLS certificate file. (EG_CERTFILE env var)
--client-ca=<Unicode> (EnterpriseGatewayApp.client_ca)
    Default: None
    The full path to a certificate authority certificate for SSL/TLS client
    authentication. (EG_CLIENT_CA env var)

Class parameters
----------------

Parameters are set from command-line arguments of the form:
`--Class.trait=value`. This line is evaluated in Python, so simple expressions
are allowed, e.g.:: `--C.a='range(3)'` For setting C.a=[0,1,2].

EnterpriseGatewayApp(EnterpriseGatewayConfigMixin, JupyterApp) options
----------------------------------------------------------------------
--EnterpriseGatewayApp.allow_credentials=<Unicode>
    Sets the Access-Control-Allow-Credentials header. (EG_ALLOW_CREDENTIALS env
    var)
    Default: ''
--EnterpriseGatewayApp.allow_headers=<Unicode>
    Sets the Access-Control-Allow-Headers header. (EG_ALLOW_HEADERS env var)
    Default: ''
--EnterpriseGatewayApp.allow_methods=<Unicode>
    Sets the Access-Control-Allow-Methods header. (EG_ALLOW_METHODS env var)
    Default: ''
--EnterpriseGatewayApp.allow_origin=<Unicode>
    Sets the Access-Control-Allow-Origin header. (EG_ALLOW_ORIGIN env var)
    Default: ''
--EnterpriseGatewayApp.alt_yarn_endpoint=<Unicode>
    The http url specifying the alternate YARN Resource Manager.  This value
    should be set when YARN Resource Managers are configured for high
    availability.  Note: If both YARN endpoints are NOT set, the YARN library
    will use the files within the local HADOOP_CONFIG_DIR to determine the
    active resource manager. (EG_ALT_YARN_ENDPOINT env var)
    Default: None
--EnterpriseGatewayApp.answer_yes=<Bool>
    Answer yes to any prompts.
    Default: False
--EnterpriseGatewayApp.auth_token=<Unicode>
    Authorization token required for all requests (EG_AUTH_TOKEN env var)
    Default: ''
--EnterpriseGatewayApp.authorized_users=<set-item-1>...
    Comma-separated list of user names (e.g., ['bob','alice']) against which
    KERNEL_USERNAME will be compared.  Any match (case-sensitive) will allow the
    kernel's launch, otherwise an HTTP 403 (Forbidden) error will be raised.
    The set of unauthorized users takes precedence. This option should be used
    carefully as it can dramatically limit who can launch kernels.
    (EG_AUTHORIZED_USERS env var - non-bracketed, just comma-separated)
    Default: set()
--EnterpriseGatewayApp.authorized_origin=<Unicode>
    Hostname (e.g. 'localhost', 'reverse.proxy.net') which the handler will
    match against the request's SSL certificate.  An HTTP 403 (Forbidden) error
    will be raised on a failed match.  This option requires TLS to be enabled.
    It does not support IP addresses. (EG_AUTHORIZED_ORIGIN env var)
    Default: ''
--EnterpriseGatewayApp.availability_mode=<CaselessStrEnum>
    Specifies the type of availability.  Values must be one of "standalone"
    or "replication".  (EG_AVAILABILITY_MODE env var)
    Choices: any of ['standalone', 'replication'] (case-insensitive) or None
    Default: None
--EnterpriseGatewayApp.base_url=<Unicode>
    The base path for mounting all API resources (EG_BASE_URL env var)
    Default: '/'
--EnterpriseGatewayApp.certfile=<Unicode>
    The full path to an SSL/TLS certificate file. (EG_CERTFILE env var)
    Default: None
--EnterpriseGatewayApp.client_ca=<Unicode>
    The full path to a certificate authority certificate for SSL/TLS client
    authentication. (EG_CLIENT_CA env var)
    Default: None
--EnterpriseGatewayApp.client_envs=<list-item-1>...
    Environment variables allowed to be set when a client requests a
    new kernel. (EG_CLIENT_ENVS env var)
    Default: []
--EnterpriseGatewayApp.conductor_endpoint=<Unicode>
    The http url for accessing the Conductor REST API. (EG_CONDUCTOR_ENDPOINT
    env var)
    Default: None
--EnterpriseGatewayApp.config_file=<Unicode>
    Full path of a config file.
    Default: ''
--EnterpriseGatewayApp.config_file_name=<Unicode>
    Specify a config file to load.
    Default: ''
--EnterpriseGatewayApp.default_kernel_name=<Unicode>
    Default kernel name when spawning a kernel (EG_DEFAULT_KERNEL_NAME env var)
    Default: ''
--EnterpriseGatewayApp.dynamic_config_interval=<Int>
    Specifies the number of seconds configuration files are polled for changes.
    A value of 0 or less disables dynamic config updates.
    (EG_DYNAMIC_CONFIG_INTERVAL env var)
    Default: 0
--EnterpriseGatewayApp.env_process_whitelist=<list-item-1>...
    DEPRECATED, use inherited_envs
    Default: []
--EnterpriseGatewayApp.env_whitelist=<list-item-1>...
    DEPRECATED, use client_envs.
    Default: []
--EnterpriseGatewayApp.expose_headers=<Unicode>
    Sets the Access-Control-Expose-Headers header. (EG_EXPOSE_HEADERS env var)
    Default: ''
--EnterpriseGatewayApp.generate_config=<Bool>
    Generate default config file.
    Default: False
--EnterpriseGatewayApp.impersonation_enabled=<Bool>
    Indicates whether impersonation will be performed during kernel launch.
    (EG_IMPERSONATION_ENABLED env var)
    Default: False
--EnterpriseGatewayApp.inherited_envs=<list-item-1>...
    Environment variables allowed to be inherited
    from the spawning process by the kernel. (EG_INHERITED_ENVS env var)
    Default: []
--EnterpriseGatewayApp.ip=<Unicode>
    IP address on which to listen (EG_IP env var)
    Default: '127.0.0.1'
--EnterpriseGatewayApp.kernel_headers=<list-item-1>...
    Request headers to make available to kernel launch framework.
    (EG_KERNEL_HEADERS env var)
    Default: []
--EnterpriseGatewayApp.kernel_manager_class=<Type>
    The kernel manager class to use. Must be a subclass of
    `enterprise_gateway.services.kernels.RemoteMappingKernelManager`.
    Default: 'enterprise_gateway.services.kernels.remotemanager.RemoteMapp...
--EnterpriseGatewayApp.kernel_session_manager_class=<Type>
    The kernel session manager class to use. Must be a subclass of
    `enterprise_gateway.services.sessions.KernelSessionManager`.
    Default: 'enterprise_gateway.services.sessions.kernelsessionmanager.Fi...
--EnterpriseGatewayApp.kernel_spec_cache_class=<Type>
    The kernel spec cache class to use. Must be a subclass of
    `enterprise_gateway.services.kernelspecs.KernelSpecCache`.
    Default: 'enterprise_gateway.services.kernelspecs.kernelspec_cache.Ker...
--EnterpriseGatewayApp.kernel_spec_manager_class=<Type>
    The kernel spec manager class to use. Must be a subclass of
    `jupyter_client.kernelspec.KernelSpecManager`.
    Default: 'jupyter_client.kernelspec.KernelSpecManager'
--EnterpriseGatewayApp.keyfile=<Unicode>
    The full path to a private key file for usage with SSL/TLS. (EG_KEYFILE env
    var)
    Default: None
--EnterpriseGatewayApp.list_kernels=<Bool>
    Permits listing of the running kernels using API endpoints /api/kernels and
    /api/sessions. (EG_LIST_KERNELS env var) Note: Jupyter Notebook allows this
    by default but Jupyter Enterprise Gateway does not.
    Default: False
--EnterpriseGatewayApp.load_balancing_algorithm=<Unicode>
    Specifies which load balancing algorithm DistributedProcessProxy should use.
    Must be one of "round-robin" or "least-connection".
    (EG_LOAD_BALANCING_ALGORITHM env var)
    Default: 'round-robin'
--EnterpriseGatewayApp.log_datefmt=<Unicode>
    The date format used by logging formatters for %(asctime)s
    Default: '%Y-%m-%d %H:%M:%S'
--EnterpriseGatewayApp.log_format=<Unicode>
    The Logging format template
    Default: '[%(name)s]%(highlevel)s %(message)s'
--EnterpriseGatewayApp.log_level=<Enum>
    Set the log level by value or name.
    Choices: any of [0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL']
    Default: 30
--EnterpriseGatewayApp.max_age=<Unicode>
    Sets the Access-Control-Max-Age header. (EG_MAX_AGE env var)
    Default: ''
--EnterpriseGatewayApp.max_kernels=<Int>
    Limits the number of kernel instances allowed to run by this gateway.
    Unbounded by default. (EG_MAX_KERNELS env var)
    Default: None
--EnterpriseGatewayApp.max_kernels_per_user=<Int>
    Specifies the maximum number of kernels a user can have active
    simultaneously.  A value of -1 disables enforcement.
    (EG_MAX_KERNELS_PER_USER env var)
    Default: -1
--EnterpriseGatewayApp.port=<Int>
    Port on which to listen (EG_PORT env var)
    Default: 8888
--EnterpriseGatewayApp.port_range=<Unicode>
    Specifies the lower and upper port numbers from which ports are created. The
    bounded values are separated by '..' (e.g., 33245..34245 specifies a range
    of 1000 ports to be randomly selected). A range of zero (e.g., 33245..33245
    or 0..0) disables port-range enforcement.  (EG_PORT_RANGE env var)
    Default: '0..0'
--EnterpriseGatewayApp.port_retries=<Int>
    Number of ports to try if the specified port is not available
    (EG_PORT_RETRIES env var)
    Default: 50
--EnterpriseGatewayApp.remote_hosts=<list-item-1>...
    Bracketed comma-separated list of hosts on which DistributedProcessProxy
    kernels will be launched e.g., ['host1','host2']. (EG_REMOTE_HOSTS env var -
    non-bracketed, just comma-separated)
    Default: ['localhost']
--EnterpriseGatewayApp.show_config=<Bool>
    Instead of starting the Application, dump configuration to stdout
    Default: False
--EnterpriseGatewayApp.show_config_json=<Bool>
    Instead of starting the Application, dump configuration to stdout (as JSON)
    Default: False
--EnterpriseGatewayApp.ssl_version=<Int>
    Sets the SSL version to use for the web socket connection. (EG_SSL_VERSION
    env var)
    Default: None
--EnterpriseGatewayApp.trust_xheaders=<CBool>
    Use x-* header values for overriding the remote-ip, useful when application
    is behind a proxy. (EG_TRUST_XHEADERS env var)
    Default: False
--EnterpriseGatewayApp.unauthorized_users=<set-item-1>...
    Comma-separated list of user names (e.g., ['root','admin']) against which
    KERNEL_USERNAME will be compared.  Any match (case-sensitive) will prevent
    the kernel's launch and result in an HTTP 403 (Forbidden) error.
    (EG_UNAUTHORIZED_USERS env var - non-bracketed, just comma-separated)
    Default: {'root'}
--EnterpriseGatewayApp.ws_ping_interval=<Int>
    Specifies the ping interval(in seconds) that should be used by zmq port
     associated with spawned kernels.Set this variable to 0 to disable ping mechanism.
    (EG_WS_PING_INTERVAL_SECS env var)
    Default: 30
--EnterpriseGatewayApp.yarn_endpoint=<Unicode>
    The http url specifying the YARN Resource Manager. Note: If this value is
    NOT set, the YARN library will use the files within the local
    HADOOP_CONFIG_DIR to determine the active resource manager.
    (EG_YARN_ENDPOINT env var)
    Default: None
--EnterpriseGatewayApp.yarn_endpoint_security_ena
Download .txt
gitextract_mwzi65qv/

├── .git-blame-ignore-revs
├── .gitattributes
├── .github/
│   ├── ISSUE_TEMPLATE.md
│   ├── codeql/
│   │   └── codeql-config.yml
│   ├── dependabot.yml
│   └── workflows/
│       ├── build.yml
│       └── codeql-analysis.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .readthedocs.yaml
├── LICENSE.md
├── Makefile
├── README.md
├── codecov.yml
├── conftest.py
├── docs/
│   ├── Makefile
│   ├── doc-requirements.txt
│   ├── environment.yml
│   ├── make.bat
│   └── source/
│       ├── _static/
│       │   └── custom.css
│       ├── conf.py
│       ├── contributors/
│       │   ├── contrib.md
│       │   ├── debug.md
│       │   ├── devinstall.md
│       │   ├── docker.md
│       │   ├── index.rst
│       │   ├── roadmap.md
│       │   ├── sequence-diagrams.md
│       │   └── system-architecture.md
│       ├── developers/
│       │   ├── custom-images.md
│       │   ├── dev-process-proxy.md
│       │   ├── index.rst
│       │   ├── kernel-launcher.md
│       │   ├── kernel-library.md
│       │   ├── kernel-manager.md
│       │   ├── kernel-specification.md
│       │   └── rest-api.rst
│       ├── index.rst
│       ├── operators/
│       │   ├── config-add-env.md
│       │   ├── config-availability.md
│       │   ├── config-cli.md
│       │   ├── config-culling.md
│       │   ├── config-dynamic.md
│       │   ├── config-env-debug.md
│       │   ├── config-file.md
│       │   ├── config-kernel-override.md
│       │   ├── config-security.md
│       │   ├── config-sys-env.md
│       │   ├── deploy-conductor.md
│       │   ├── deploy-distributed.md
│       │   ├── deploy-docker.md
│       │   ├── deploy-kubernetes.md
│       │   ├── deploy-single.md
│       │   ├── deploy-yarn-cluster.md
│       │   ├── index.rst
│       │   ├── installing-eg.md
│       │   ├── installing-kernels.md
│       │   └── launching-eg.md
│       ├── other/
│       │   ├── index.rst
│       │   ├── related-resources.md
│       │   └── troubleshooting.md
│       └── users/
│           ├── client-config.md
│           ├── connecting-to-eg.md
│           ├── index.rst
│           ├── installation.md
│           └── kernel-envs.md
├── enterprise_gateway/
│   ├── __init__.py
│   ├── __main__.py
│   ├── _version.py
│   ├── base/
│   │   ├── __init__.py
│   │   └── handlers.py
│   ├── client/
│   │   ├── __init__.py
│   │   └── gateway_client.py
│   ├── enterprisegatewayapp.py
│   ├── itests/
│   │   ├── __init__.py
│   │   ├── kernels/
│   │   │   └── authorization_test/
│   │   │       └── kernel.json
│   │   ├── test_authorization.py
│   │   ├── test_base.py
│   │   ├── test_python_kernel.py
│   │   ├── test_r_kernel.py
│   │   └── test_scala_kernel.py
│   ├── mixins.py
│   ├── services/
│   │   ├── __init__.py
│   │   ├── api/
│   │   │   ├── __init__.py
│   │   │   ├── handlers.py
│   │   │   ├── swagger.json
│   │   │   └── swagger.yaml
│   │   ├── kernels/
│   │   │   ├── __init__.py
│   │   │   ├── handlers.py
│   │   │   └── remotemanager.py
│   │   ├── kernelspecs/
│   │   │   ├── __init__.py
│   │   │   ├── handlers.py
│   │   │   └── kernelspec_cache.py
│   │   ├── processproxies/
│   │   │   ├── __init__.py
│   │   │   ├── conductor.py
│   │   │   ├── container.py
│   │   │   ├── crd.py
│   │   │   ├── distributed.py
│   │   │   ├── docker_swarm.py
│   │   │   ├── k8s.py
│   │   │   ├── processproxy.py
│   │   │   ├── spark_operator.py
│   │   │   └── yarn.py
│   │   └── sessions/
│   │       ├── __init__.py
│   │       ├── handlers.py
│   │       ├── kernelsessionmanager.py
│   │       └── sessionmanager.py
│   └── tests/
│       ├── __init__.py
│       ├── resources/
│       │   ├── failing_code2.ipynb
│       │   ├── failing_code3.ipynb
│       │   ├── kernel_api2.ipynb
│       │   ├── kernel_api3.ipynb
│       │   ├── kernels/
│       │   │   └── kernel_defaults_test/
│       │   │       └── kernel.json
│       │   ├── public/
│       │   │   └── index.html
│       │   ├── responses_2.ipynb
│       │   ├── responses_3.ipynb
│       │   ├── simple_api2.ipynb
│       │   ├── simple_api3.ipynb
│       │   ├── unknown_kernel.ipynb
│       │   ├── zen2.ipynb
│       │   └── zen3.ipynb
│       ├── test_enterprise_gateway.py
│       ├── test_gatewayapp.py
│       ├── test_handlers.py
│       ├── test_kernelspec_cache.py
│       ├── test_mixins.py
│       ├── test_process_proxy.py
│       └── test_yaml_injection.py
├── etc/
│   ├── Makefile
│   ├── docker/
│   │   ├── demo-base/
│   │   │   ├── Dockerfile
│   │   │   ├── README.md
│   │   │   ├── bootstrap-yarn-spark.sh
│   │   │   ├── core-site.xml.template
│   │   │   ├── fix-permissions
│   │   │   ├── hdfs-site.xml
│   │   │   ├── mapred-site.xml
│   │   │   ├── ssh_config
│   │   │   └── yarn-site.xml.template
│   │   ├── docker-compose.yml
│   │   ├── enterprise-gateway/
│   │   │   ├── Dockerfile
│   │   │   ├── README.md
│   │   │   └── start-enterprise-gateway.sh
│   │   ├── enterprise-gateway-demo/
│   │   │   ├── Dockerfile
│   │   │   ├── README.md
│   │   │   ├── bootstrap-enterprise-gateway.sh
│   │   │   └── start-enterprise-gateway.sh.template
│   │   ├── kernel-image-puller/
│   │   │   ├── Dockerfile
│   │   │   ├── README.md
│   │   │   ├── image_fetcher.py
│   │   │   ├── kernel_image_puller.py
│   │   │   └── requirements.txt
│   │   ├── kernel-py/
│   │   │   ├── Dockerfile
│   │   │   └── README.md
│   │   ├── kernel-r/
│   │   │   ├── Dockerfile
│   │   │   └── README.md
│   │   ├── kernel-scala/
│   │   │   ├── Dockerfile
│   │   │   └── README.md
│   │   ├── kernel-spark-py/
│   │   │   ├── Dockerfile
│   │   │   └── README.md
│   │   ├── kernel-spark-r/
│   │   │   ├── Dockerfile
│   │   │   └── README.md
│   │   ├── kernel-tf-gpu-py/
│   │   │   ├── Dockerfile
│   │   │   └── README.md
│   │   └── kernel-tf-py/
│   │       ├── Dockerfile
│   │       └── README.md
│   ├── kernel-launchers/
│   │   ├── R/
│   │   │   └── scripts/
│   │   │       ├── launch_IRkernel.R
│   │   │       └── server_listener.py
│   │   ├── bootstrap/
│   │   │   └── bootstrap-kernel.sh
│   │   ├── docker/
│   │   │   └── scripts/
│   │   │       └── launch_docker.py
│   │   ├── kubernetes/
│   │   │   └── scripts/
│   │   │       ├── kernel-pod.yaml.j2
│   │   │       └── launch_kubernetes.py
│   │   ├── operators/
│   │   │   └── scripts/
│   │   │       ├── launch_custom_resource.py
│   │   │       └── sparkoperator.k8s.io-v1beta2.yaml.j2
│   │   ├── python/
│   │   │   └── scripts/
│   │   │       └── launch_ipykernel.py
│   │   └── scala/
│   │       └── toree-launcher/
│   │           ├── build.sbt
│   │           ├── project/
│   │           │   ├── build.properties
│   │           │   ├── plugins.sbt
│   │           │   └── scalastyle-config.xml
│   │           └── src/
│   │               └── main/
│   │                   └── scala/
│   │                       └── launcher/
│   │                           ├── KernelProfile.scala
│   │                           ├── ToreeLauncher.scala
│   │                           └── utils/
│   │                               ├── SecurityUtils.scala
│   │                               └── SocketUtils.scala
│   ├── kernel-resources/
│   │   └── ir/
│   │       └── kernel.js
│   ├── kernelspecs/
│   │   ├── R_docker/
│   │   │   └── kernel.json
│   │   ├── R_kubernetes/
│   │   │   └── kernel.json
│   │   ├── dask_python_yarn_remote/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── python_distributed/
│   │   │   └── kernel.json
│   │   ├── python_docker/
│   │   │   └── kernel.json
│   │   ├── python_kubernetes/
│   │   │   └── kernel.json
│   │   ├── python_tf_docker/
│   │   │   └── kernel.json
│   │   ├── python_tf_gpu_docker/
│   │   │   └── kernel.json
│   │   ├── python_tf_gpu_kubernetes/
│   │   │   └── kernel.json
│   │   ├── python_tf_kubernetes/
│   │   │   └── kernel.json
│   │   ├── scala_docker/
│   │   │   └── kernel.json
│   │   ├── scala_kubernetes/
│   │   │   └── kernel.json
│   │   ├── spark_R_conductor_cluster/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_R_kubernetes/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_R_yarn_client/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_R_yarn_cluster/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_python_conductor_cluster/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_python_kubernetes/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_python_operator/
│   │   │   └── kernel.json
│   │   ├── spark_python_yarn_client/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_python_yarn_cluster/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_scala_conductor_cluster/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_scala_kubernetes/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   ├── spark_scala_yarn_client/
│   │   │   ├── bin/
│   │   │   │   └── run.sh
│   │   │   └── kernel.json
│   │   └── spark_scala_yarn_cluster/
│   │       ├── bin/
│   │       │   └── run.sh
│   │       └── kernel.json
│   └── kubernetes/
│       └── helm/
│           └── enterprise-gateway/
│               ├── Chart.yaml
│               ├── templates/
│               │   ├── daemonset.yaml
│               │   ├── deployment.yaml
│               │   ├── eg-clusterrole.yaml
│               │   ├── eg-clusterrolebinding.yaml
│               │   ├── eg-serviceaccount.yaml
│               │   ├── imagepullSecret.yaml
│               │   ├── ingress.yaml
│               │   ├── kip-clusterrole.yaml
│               │   ├── kip-clusterrolebinding.yaml
│               │   ├── kip-serviceaccount.yaml
│               │   ├── psp.yaml
│               │   └── service.yaml
│               └── values.yaml
├── pyproject.toml
├── release.sh
├── requirements.yml
└── website/
    ├── .gitignore
    ├── README.md
    ├── _config.yml
    ├── _data/
    │   └── navigation.yml
    ├── _includes/
    │   ├── call-to-action.html
    │   ├── contact.html
    │   ├── features.html
    │   ├── head.html
    │   ├── header.html
    │   ├── nav.html
    │   ├── platforms.html
    │   └── scripts.html
    ├── _layouts/
    │   ├── home.html
    │   └── page.html
    ├── _sass/
    │   ├── _base.scss
    │   └── _mixins.scss
    ├── css/
    │   ├── bootstrap.css
    │   └── main.scss
    ├── font-awesome/
    │   ├── css/
    │   │   └── font-awesome.css
    │   ├── fonts/
    │   │   └── FontAwesome.otf
    │   ├── less/
    │   │   ├── animated.less
    │   │   ├── bordered-pulled.less
    │   │   ├── core.less
    │   │   ├── fixed-width.less
    │   │   ├── font-awesome.less
    │   │   ├── icons.less
    │   │   ├── larger.less
    │   │   ├── list.less
    │   │   ├── mixins.less
    │   │   ├── path.less
    │   │   ├── rotated-flipped.less
    │   │   ├── stacked.less
    │   │   └── variables.less
    │   └── scss/
    │       ├── _animated.scss
    │       ├── _bordered-pulled.scss
    │       ├── _core.scss
    │       ├── _fixed-width.scss
    │       ├── _icons.scss
    │       ├── _larger.scss
    │       ├── _list.scss
    │       ├── _mixins.scss
    │       ├── _path.scss
    │       ├── _rotated-flipped.scss
    │       ├── _stacked.scss
    │       ├── _variables.scss
    │       └── font-awesome.scss
    ├── index.md
    ├── js/
    │   ├── bootstrap.js
    │   ├── cbpAnimatedHeader.js
    │   ├── classie.js
    │   ├── creative.js
    │   ├── jquery.fittext.js
    │   └── jquery.js
    ├── platform-kubernetes.md
    ├── platform-spark.md
    ├── privacy-policy.md
    └── publish.sh
Download .txt
SYMBOL INDEX (853 symbols across 49 files)

FILE: conftest.py
  function pytest_addoption (line 1) | def pytest_addoption(parser):
  function pytest_generate_tests (line 7) | def pytest_generate_tests(metafunc):

FILE: enterprise_gateway/__init__.py
  function launch_instance (line 8) | def launch_instance(*args, **kwargs):

FILE: enterprise_gateway/base/handlers.py
  class APIVersionHandler (line 18) | class APIVersionHandler(TokenAuthorizationMixin, CORSMixin, JSONErrorsMi...
    method get (line 24) | def get(self):
  class NotFoundHandler (line 34) | class NotFoundHandler(JSONErrorsMixin, web.RequestHandler):
    method prepare (line 46) | def prepare(self):

FILE: enterprise_gateway/client/gateway_client.py
  class GatewayClient (line 20) | class GatewayClient:
    method __init__ (line 33) | def __init__(self, host=DEFAULT_GATEWAY_HOST, use_secure_connection=Fa...
    method start_kernel (line 44) | def start_kernel(
    method shutdown_kernel (line 83) | def shutdown_kernel(self, kernel):
  class KernelClient (line 93) | class KernelClient:
    method __init__ (line 100) | def __init__(
    method shutdown (line 130) | def shutdown(self):
    method execute (line 158) | def execute(self, code, timeout=REQUEST_TIMEOUT):
    method interrupt (line 231) | def interrupt(self):
    method restart (line 242) | def restart(self, timeout=REQUEST_TIMEOUT):
    method get_state (line 262) | def get_state(self):
    method start_interrupt_thread (line 276) | def start_interrupt_thread(self, wait_time=DEFAULT_INTERRUPT_WAIT):
    method perform_interrupt (line 281) | def perform_interrupt(self, wait_time):
    method terminate_interrupt_thread (line 286) | def terminate_interrupt_thread(self):
    method _send_request (line 292) | def _send_request(self, code):
    method _get_response (line 308) | def _get_response(self, msg_id, timeout, post_idle):
    method _read_responses (line 335) | def _read_responses(self):
    method _get_msg_id (line 385) | def _get_msg_id(message, logger):
    method _convert_raw_response (line 399) | def _convert_raw_response(raw_response_message):
    method __create_execute_request (line 407) | def __create_execute_request(msg_id, code):

FILE: enterprise_gateway/enterprisegatewayapp.py
  class EnterpriseGatewayApp (line 56) | class EnterpriseGatewayApp(EnterpriseGatewayConfigMixin, JupyterApp):
    method initialize (line 86) | def initialize(self, argv: Optional[List[str]] = None) -> None:
    method init_configurables (line 100) | def init_configurables(self) -> None:
    method _create_request_handlers (line 164) | def _create_request_handlers(self) -> List[tuple]:
    method __add_authorized_hostname_match (line 189) | def __add_authorized_hostname_match(self, handler: tuple) -> None:
    method init_webapp (line 203) | def init_webapp(self) -> None:
    method _build_ssl_options (line 256) | def _build_ssl_options(self) -> Optional[ssl.SSLContext]:
    method init_http_server (line 271) | def init_http_server(self) -> None:
    method start (line 305) | def start(self) -> None:
    method shutdown (line 342) | def shutdown(self) -> None:
    method stop (line 355) | def stop(self) -> None:
    method _signal_stop (line 366) | def _signal_stop(self, sig, frame) -> None:
    method update_dynamic_configurables (line 373) | def update_dynamic_configurables(self) -> bool:
    method add_dynamic_configurable (line 409) | def add_dynamic_configurable(self, config_name: str, configurable: Con...
    method init_dynamic_configs (line 422) | def init_dynamic_configs(self) -> None:

FILE: enterprise_gateway/itests/__init__.py
  function teardown (line 6) | def teardown():

FILE: enterprise_gateway/itests/test_authorization.py
  class TestAuthorization (line 7) | class TestAuthorization(unittest.TestCase):
    method setUpClass (line 11) | def setUpClass(cls):
    method setUp (line 17) | def setUp(self):
    method tearDown (line 20) | def tearDown(self):
    method test_authorized_users (line 23) | def test_authorized_users(self):
    method test_unauthorized_users (line 34) | def test_unauthorized_users(self):

FILE: enterprise_gateway/itests/test_base.py
  class TestBase (line 12) | class TestBase:
    method get_expected_application_id (line 13) | def get_expected_application_id(self):
    method get_expected_spark_version (line 16) | def get_expected_spark_version(self):
    method get_expected_spark_master (line 19) | def get_expected_spark_master(self):
    method get_expected_deploy_mode (line 22) | def get_expected_deploy_mode(self):
    method get_expected_hostname (line 25) | def get_expected_hostname(self):

FILE: enterprise_gateway/itests/test_python_kernel.py
  class PythonKernelBaseTestCase (line 9) | class PythonKernelBaseTestCase(TestBase):
    method test_get_hostname (line 14) | def test_get_hostname(self):
    method test_hello_world (line 21) | def test_hello_world(self):
    method test_restart (line 26) | def test_restart(self):
    method test_interrupt (line 42) | def test_interrupt(self):
    method test_scope (line 80) | def test_scope(self):
  class PythonKernelBaseSparkTestCase (line 95) | class PythonKernelBaseSparkTestCase(PythonKernelBaseTestCase):
    method test_get_application_id (line 100) | def test_get_application_id(self):
    method test_get_deploy_mode (line 105) | def test_get_deploy_mode(self):
    method test_get_resource_manager (line 110) | def test_get_resource_manager(self):
    method test_get_spark_version (line 115) | def test_get_spark_version(self):
    method test_run_pi_example (line 121) | def test_run_pi_example(self):
  class TestPythonKernelLocal (line 139) | class TestPythonKernelLocal(unittest.TestCase, PythonKernelBaseTestCase):
    method setUpClass (line 143) | def setUpClass(cls):
    method tearDownClass (line 152) | def tearDownClass(cls):
  class TestPythonKernelDistributed (line 160) | class TestPythonKernelDistributed(unittest.TestCase, PythonKernelBaseTes...
    method setUpClass (line 166) | def setUpClass(cls):
    method tearDownClass (line 175) | def tearDownClass(cls):
  class TestPythonKernelClient (line 183) | class TestPythonKernelClient(unittest.TestCase, PythonKernelBaseSparkTes...
    method setUpClass (line 189) | def setUpClass(cls):
    method tearDownClass (line 198) | def tearDownClass(cls):
  class TestPythonKernelCluster (line 206) | class TestPythonKernelCluster(unittest.TestCase, PythonKernelBaseSparkTe...
    method setUpClass (line 212) | def setUpClass(cls):
    method tearDownClass (line 221) | def tearDownClass(cls):

FILE: enterprise_gateway/itests/test_r_kernel.py
  class RKernelBaseTestCase (line 9) | class RKernelBaseTestCase(TestBase):
    method test_get_hostname (line 14) | def test_get_hostname(self):
    method test_hello_world (line 19) | def test_hello_world(self):
    method test_restart (line 24) | def test_restart(self):
    method test_interrupt (line 40) | def test_interrupt(self):
  class RKernelBaseSparkTestCase (line 77) | class RKernelBaseSparkTestCase(RKernelBaseTestCase):
    method test_get_application_id (line 82) | def test_get_application_id(self):
    method test_get_spark_version (line 89) | def test_get_spark_version(self):
    method test_get_resource_manager (line 94) | def test_get_resource_manager(self):
    method test_get_deploy_mode (line 99) | def test_get_deploy_mode(self):
  class TestRKernelLocal (line 105) | class TestRKernelLocal(unittest.TestCase, RKernelBaseTestCase):
    method setUpClass (line 109) | def setUpClass(cls):
    method tearDownClass (line 118) | def tearDownClass(cls):
  class TestRKernelClient (line 126) | class TestRKernelClient(unittest.TestCase, RKernelBaseSparkTestCase):
    method setUpClass (line 132) | def setUpClass(cls):
    method tearDownClass (line 140) | def tearDownClass(cls):
  class TestRKernelCluster (line 148) | class TestRKernelCluster(unittest.TestCase, RKernelBaseSparkTestCase):
    method setUpClass (line 154) | def setUpClass(cls):
    method tearDownClass (line 163) | def tearDownClass(cls):

FILE: enterprise_gateway/itests/test_scala_kernel.py
  class ScalaKernelBaseTestCase (line 9) | class ScalaKernelBaseTestCase(TestBase):
    method test_get_hostname (line 14) | def test_get_hostname(self):
    method test_hello_world (line 23) | def test_hello_world(self):
    method test_restart (line 28) | def test_restart(self):
    method test_interrupt (line 44) | def test_interrupt(self):
  class ScalaKernelBaseSparkTestCase (line 81) | class ScalaKernelBaseSparkTestCase(ScalaKernelBaseTestCase):
    method test_get_application_id (line 86) | def test_get_application_id(self):
    method test_get_spark_version (line 91) | def test_get_spark_version(self):
    method test_get_resource_manager (line 96) | def test_get_resource_manager(self):
    method test_get_deploy_mode (line 101) | def test_get_deploy_mode(self):
  class TestScalaKernelLocal (line 107) | class TestScalaKernelLocal(unittest.TestCase, ScalaKernelBaseTestCase):
    method setUpClass (line 115) | def setUpClass(cls):
    method tearDownClass (line 124) | def tearDownClass(cls):
  class TestScalaKernelClient (line 132) | class TestScalaKernelClient(unittest.TestCase, ScalaKernelBaseSparkTestC...
    method setUpClass (line 138) | def setUpClass(cls):
    method tearDownClass (line 147) | def tearDownClass(cls):
  class TestScalaKernelCluster (line 155) | class TestScalaKernelCluster(unittest.TestCase, ScalaKernelBaseSparkTest...
    method setUpClass (line 161) | def setUpClass(cls):
    method tearDownClass (line 170) | def tearDownClass(cls):

FILE: enterprise_gateway/mixins.py
  class CORSMixin (line 35) | class CORSMixin:
    method set_default_headers (line 49) | def set_default_headers(self) -> None:
    method options (line 66) | def options(self) -> None:
  class TokenAuthorizationMixin (line 75) | class TokenAuthorizationMixin:
    method prepare (line 83) | def prepare(self) -> Optional[Awaitable[None]]:
  class JSONErrorsMixin (line 111) | class JSONErrorsMixin:
    method write_error (line 116) | def write_error(self, status_code: int, **kwargs) -> None:
  class EnterpriseGatewayConfigMixin (line 161) | class EnterpriseGatewayConfigMixin(Configurable):
    method _port_default (line 172) | def _port_default(self) -> int:
    method _port_retries_default (line 185) | def _port_retries_default(self) -> int:
    method _ip_default (line 199) | def _ip_default(self) -> str:
    method _base_url_default (line 212) | def _base_url_default(self) -> str:
    method _auth_token_default (line 222) | def _auth_token_default(self) -> str:
    method _allow_credentials_default (line 233) | def _allow_credentials_default(self) -> str:
    method _allow_headers_default (line 242) | def _allow_headers_default(self) -> str:
    method _allow_methods_default (line 251) | def _allow_methods_default(self) -> str:
    method _allow_origin_default (line 260) | def _allow_origin_default(self) -> str:
    method _expose_headers_default (line 270) | def _expose_headers_default(self) -> str:
    method _trust_xheaders_default (line 282) | def _trust_xheaders_default(self) -> bool:
    method _certfile_default (line 296) | def _certfile_default(self) -> Optional[str]:
    method _keyfile_default (line 308) | def _keyfile_default(self) -> Optional[str]:
    method _client_ca_default (line 321) | def _client_ca_default(self) -> Optional[str]:
    method _ssl_version_default (line 335) | def _ssl_version_default(self) -> Optional[int]:
    method _max_age_default (line 345) | def _max_age_default(self) -> str:
    method _max_kernels_default (line 360) | def _max_kernels_default(self) -> Optional[int]:
    method _default_kernel_name_default (line 371) | def _default_kernel_name_default(self) -> str:
    method _list_kernels_default (line 384) | def _list_kernels_default(self) -> bool:
    method _update_env_whitelist (line 396) | def _update_env_whitelist(self, change):
    method _client_envs_default (line 408) | def _client_envs_default(self):
    method _update_env_process_whitelist (line 417) | def _update_env_process_whitelist(self, change):
    method _inherited_envs_default (line 429) | def _inherited_envs_default(self) -> List[str]:
    method _kernel_headers_default (line 442) | def _kernel_headers_default(self) -> List[str]:
    method _remote_hosts_default (line 458) | def _remote_hosts_default(self) -> List[str]:
    method _load_balancing_algorithm_default (line 474) | def _load_balancing_algorithm_default(self) -> str:
    method _validate_load_balancing_algorithm (line 480) | def _validate_load_balancing_algorithm(self, proposal: Dict[str, str])...
    method _yarn_endpoint_default (line 503) | def _yarn_endpoint_default(self) -> Optional[str]:
    method _alt_yarn_endpoint_default (line 520) | def _alt_yarn_endpoint_default(self) -> Optional[str]:
    method _yarn_endpoint_security_enabled_default (line 533) | def _yarn_endpoint_security_enabled_default(self) -> bool:
    method _conductor_endpoint_default (line 553) | def _conductor_endpoint_default(self) -> Optional[str]:
    method _default_log_format (line 559) | def _default_log_format(self) -> str:
    method _impersonation_enabled_default (line 575) | def _impersonation_enabled_default(self) -> bool:
    method _unauthorized_users_default (line 591) | def _unauthorized_users_default(self) -> Set[str]:
    method _authorized_users_default (line 609) | def _authorized_users_default(self) -> Set[str]:
    method _port_range_default (line 636) | def _port_range_default(self) -> str:
    method _max_kernels_per_user_default (line 651) | def _max_kernels_per_user_default(self) -> int:
    method _ws_ping_interval_default (line 667) | def _ws_ping_interval_default(self) -> int:
    method _dynamic_config_interval_default (line 683) | def _dynamic_config_interval_default(self) -> int:
    method _dynamic_config_interval_changed (line 689) | def _dynamic_config_interval_changed(self, event: Dict[str, Any]) -> N...
    method _availability_mode_env_default (line 723) | def _availability_mode_env_default(self):
    method _authorizer_class_default (line 787) | def _authorizer_class_default(self):

FILE: enterprise_gateway/services/api/handlers.py
  class BaseSpecHandler (line 15) | class BaseSpecHandler(CORSMixin, web.StaticFileHandler):
    method get_resource_metadata (line 19) | def get_resource_metadata() -> tuple:
    method initialize (line 23) | def initialize(self) -> None:
    method get (line 31) | async def get(self) -> None:
    method options (line 38) | def options(self, **kwargs) -> None:
  class SpecJsonHandler (line 43) | class SpecJsonHandler(BaseSpecHandler):
    method get_resource_metadata (line 47) | def get_resource_metadata() -> tuple:
  class APIYamlHandler (line 52) | class APIYamlHandler(BaseSpecHandler):
    method get_resource_metadata (line 56) | def get_resource_metadata() -> tuple:

FILE: enterprise_gateway/services/kernels/handlers.py
  class MainKernelHandler (line 24) | class MainKernelHandler(
    method client_envs (line 32) | def client_envs(self):
    method inherited_envs (line 36) | def inherited_envs(self):
    method _build_kernel_env (line 39) | def _build_kernel_env(self, model_env: dict[str, Any]) -> dict[str, str]:
    method _build_kernel_headers (line 58) | def _build_kernel_headers(self) -> dict[str, str]:
    method post (line 79) | async def post(self):
    method get (line 117) | async def get(self):
    method options (line 133) | def options(self, **kwargs: dict[str, Any] | None):
  class KernelHandler (line 138) | class KernelHandler(
    method options (line 145) | def options(self, **kwargs: dict[str, Any] | None):
    method get (line 150) | def get(self, kernel_id: str):
    method delete (line 158) | async def delete(self, kernel_id):
  class ZMQChannelsHandler (line 164) | class ZMQChannelsHandler(
    method get (line 169) | async def get(self, kernel_id):

FILE: enterprise_gateway/services/kernels/remotemanager.py
  function import_item (line 33) | def import_item(name: str):
  function get_process_proxy_config (line 62) | def get_process_proxy_config(kernelspec: KernelSpec) -> dict[str, Any]:
  function new_kernel_id (line 89) | def new_kernel_id(**kwargs: dict[str, Any] | None) -> str:
  class TrackPendingRequests (line 131) | class TrackPendingRequests:
    method increment (line 142) | def increment(self, username: str) -> None:
    method decrement (line 148) | def decrement(self, username: str) -> None:
    method get_counts (line 154) | def get_counts(self, username: str) -> tuple[int, int]:
  class RemoteMappingKernelManager (line 159) | class RemoteMappingKernelManager(AsyncMappingKernelManager):
    method _context_default (line 164) | def _context_default(self) -> Context:
    method _kernel_manager_class_default (line 189) | def _kernel_manager_class_default(self) -> str:
    method check_kernel_id (line 192) | def check_kernel_id(self, kernel_id: str) -> None:
    method _refresh_kernel (line 198) | def _refresh_kernel(self, kernel_id: str) -> bool:
    method start_kernel (line 209) | async def start_kernel(self, *args: list[Any] | None, **kwargs: dict[s...
    method restart_kernel (line 236) | async def restart_kernel(self, kernel_id: str, now: bool = False) -> N...
    method shutdown_kernel (line 249) | async def shutdown_kernel(
    method wait_for_restart_finish (line 262) | async def wait_for_restart_finish(self, kernel_id: str, action: str = ...
    method _enforce_kernel_limits (line 282) | def _enforce_kernel_limits(self, username: str) -> None:
    method remove_kernel (line 327) | def remove_kernel(self, kernel_id: str) -> None:
    method start_kernel_from_session (line 338) | def start_kernel_from_session(
    method new_kernel_id (line 419) | def new_kernel_id(self, **kwargs: dict[str, Any] | None) -> str:
  class RemoteKernelManager (line 427) | class RemoteKernelManager(EnterpriseGatewayConfigMixin, AsyncIOLoopKerne...
    method __init__ (line 435) | def __init__(self, **kwargs: dict[str, Any] | None):
    method _link_dependent_props (line 466) | def _link_dependent_props(self) -> None:
    method start_kernel (line 496) | async def start_kernel(self, **kwargs: dict[str, Any] | None):
    method _capture_user_overrides (line 510) | def _capture_user_overrides(self, **kwargs: dict[str, Any] | None) -> ...
    method format_kernel_cmd (line 531) | def format_kernel_cmd(self, extra_arguments: list[str] | None = None) ...
    method _launch_kernel (line 557) | async def _launch_kernel(
    method request_shutdown (line 584) | def request_shutdown(self, restart: bool = False) -> None:
    method restart_kernel (line 595) | async def restart_kernel(self, now: bool = False, **kwargs: dict[str, ...
    method signal_kernel (line 650) | async def signal_kernel(self, signum: int) -> None:
    method cleanup (line 688) | def cleanup(self, connection_file: bool = True) -> None:
    method cleanup_resources (line 705) | def cleanup_resources(self, restart: bool = False) -> None:
    method write_connection_file (line 723) | def write_connection_file(self) -> None:
    method _get_process_proxy (line 745) | def _get_process_proxy(self) -> None:
    method kernel_session_manager (line 768) | def kernel_session_manager(self) -> KernelSessionManager | None:
    method cull_idle_timeout (line 775) | def cull_idle_timeout(self) -> int:
    method mapping_kernel_manager (line 782) | def mapping_kernel_manager(self) -> RemoteMappingKernelManager | None:

FILE: enterprise_gateway/services/kernelspecs/handlers.py
  function apply_user_filter (line 20) | def apply_user_filter(
  class MainKernelSpecHandler (line 64) | class MainKernelSpecHandler(TokenAuthorizationMixin, CORSMixin, JSONErro...
    method kernel_spec_cache (line 68) | def kernel_spec_cache(self) -> KernelSpecCache:
    method get (line 72) | async def get(self) -> None:
  class KernelSpecHandler (line 119) | class KernelSpecHandler(TokenAuthorizationMixin, CORSMixin, JSONErrorsMi...
    method kernel_spec_cache (line 123) | def kernel_spec_cache(self) -> KernelSpecCache:
    method get (line 127) | async def get(self, kernel_name: str) -> None:
  class KernelSpecResourceHandler (line 159) | class KernelSpecResourceHandler(
    method kernel_spec_cache (line 167) | def kernel_spec_cache(self) -> KernelSpecCache:
    method initialize (line 170) | def initialize(self) -> None:
    method get (line 175) | async def get(self, kernel_name: str, path: str, include_body: bool = ...
    method head (line 187) | def head(self, kernel_name: str, path: str) -> None:

FILE: enterprise_gateway/services/kernelspecs/kernelspec_cache.py
  class KernelSpecCache (line 23) | class KernelSpecCache(SingletonConfigurable):
    method _cache_enabled_default (line 44) | def _cache_enabled_default(self):
    method __init__ (line 47) | def __init__(self, kernel_spec_manager, **kwargs) -> None:
    method get_kernel_spec (line 53) | async def get_kernel_spec(self, kernel_name: str) -> KernelSpec:
    method get_all_specs (line 69) | async def get_all_specs(self) -> Dict[str, CacheItemType]:
    method get_item (line 91) | def get_item(self, kernel_name: str) -> Optional[KernelSpec]:
    method get_all_items (line 112) | def get_all_items(self) -> Dict[str, CacheItemType]:
    method put_item (line 127) | def put_item(self, kernel_name: str, cache_item: Union[KernelSpec, Cac...
    method put_all_items (line 151) | def put_all_items(self, kernelspecs: Dict[str, CacheItemType]) -> None:
    method remove_item (line 156) | def remove_item(self, kernel_name: str) -> Optional[CacheItemType]:
    method _initialize (line 164) | def _initialize(self):
    method kernel_spec_to_cache_item (line 195) | def kernel_spec_to_cache_item(kernelspec: KernelSpec) -> CacheItemType:
    method cache_item_to_kernel_spec (line 203) | def cache_item_to_kernel_spec(cache_item: CacheItemType) -> KernelSpec:
  class KernelSpecChangeHandler (line 209) | class KernelSpecChangeHandler(FileSystemEventHandler):
    method __init__ (line 217) | def __init__(self, kernel_spec_cache: KernelSpecCache, **kwargs):
    method dispatch (line 223) | def dispatch(self, event):
    method on_created (line 246) | def on_created(self, event):
    method on_deleted (line 262) | def on_deleted(self, event):
    method on_modified (line 267) | def on_modified(self, event):
    method on_moved (line 283) | def on_moved(self, event):

FILE: enterprise_gateway/services/processproxies/conductor.py
  class ConductorClusterProcessProxy (line 31) | class ConductorClusterProcessProxy(RemoteProcessProxy):
    method __init__ (line 39) | def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: ...
    method launch_process (line 52) | async def launch_process(
    method _update_launch_info (line 99) | def _update_launch_info(self, kernel_cmd: list[str], env_dict: dict) -...
    method _update_notebook_master_rest_url (line 137) | def _update_notebook_master_rest_url(self, env_dict: dict) -> None:
    method poll (line 235) | def poll(self) -> bool | None:
    method send_signal (line 250) | def send_signal(self, signum: int) -> bool | None:
    method kill (line 264) | def kill(self) -> bool | None:
    method cleanup (line 296) | def cleanup(self) -> None:
    method _parse_driver_submission_id (line 317) | def _parse_driver_submission_id(self, submission_response: str) -> None:
    method confirm_remote_startup (line 347) | async def confirm_remote_startup(self) -> None:
    method _get_application_state (line 388) | def _get_application_state(self) -> str:
    method handle_timeout (line 406) | async def handle_timeout(self) -> None:
    method _get_application_id (line 436) | def _get_application_id(self, ignore_final_states: bool = False) -> str:
    method get_process_info (line 473) | def get_process_info(self) -> dict[str, Any]:
    method load_process_info (line 482) | def load_process_info(self, process_info: dict[str, Any]) -> None:
    method _query_app_by_driver_id (line 490) | def _query_app_by_driver_id(self, driver_id: str) -> dict | None:
    method _query_app_by_id (line 523) | def _query_app_by_id(self, app_id: str) -> dict | None:
    method _query_app_state_by_driver_id (line 553) | def _query_app_state_by_driver_id(self, driver_id: str) -> dict | None:
    method _get_driver_by_app_id (line 567) | def _get_driver_by_app_id(self, app_id: str) -> dict | None:
    method _kill_app_by_driver_id (line 584) | def _kill_app_by_driver_id(self, driver_id: str):
    method _performRestCall (line 630) | def _performRestCall(self, cmd: list[str], url: str, HA_LIST: list[str...
    method _performConductorJWTLogonAndRetrieval (line 657) | def _performConductorJWTLogonAndRetrieval(  # noqa

FILE: enterprise_gateway/services/processproxies/container.py
  function _parse_prohibited_ids (line 30) | def _parse_prohibited_ids(env_var: str, default: str) -> list[int]:
  class ContainerProcessProxy (line 70) | class ContainerProcessProxy(RemoteProcessProxy):
    method __init__ (line 75) | def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: ...
    method _determine_kernel_images (line 81) | def _determine_kernel_images(self, **kwargs: dict[str, Any] | None) ->...
    method launch_process (line 106) | async def launch_process(
    method _enforce_prohibited_ids (line 139) | def _enforce_prohibited_ids(self, **kwargs: dict[str, Any] | None) -> ...
    method poll (line 190) | def poll(self) -> bool | None:
    method send_signal (line 212) | def send_signal(self, signum: int) -> bool | None:
    method kill (line 229) | def kill(self) -> bool | None:
    method shutdown_listener (line 243) | def shutdown_listener(self):
    method confirm_remote_startup (line 249) | async def confirm_remote_startup(self) -> None:
    method get_process_info (line 276) | def get_process_info(self) -> dict[str, Any]:
    method load_process_info (line 286) | def load_process_info(self, process_info: dict[str, Any]) -> None:
    method get_initial_states (line 292) | def get_initial_states(self):
    method get_error_states (line 297) | def get_error_states(self):
    method get_container_status (line 302) | def get_container_status(self, iteration: int | None) -> str:
    method terminate_container_resources (line 307) | def terminate_container_resources(self):

FILE: enterprise_gateway/services/processproxies/crd.py
  class CustomResourceProcessProxy (line 18) | class CustomResourceProcessProxy(KubernetesProcessProxy):
    method __init__ (line 27) | def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: ...
    method launch_process (line 33) | async def launch_process(
    method get_container_status (line 46) | def get_container_status(self, iteration: int | None) -> str:
    method delete_managed_object (line 101) | def delete_managed_object(self, termination_stati: list[str]) -> bool:
    method get_initial_states (line 123) | def get_initial_states(self) -> set:
    method _get_exception_text (line 127) | def _get_exception_text(self, error_message):

FILE: enterprise_gateway/services/processproxies/distributed.py
  class TrackKernelOnHost (line 25) | class TrackKernelOnHost:
    method add_kernel_id (line 31) | def add_kernel_id(self, host: str, kernel_id: str) -> None:
    method delete_kernel_id (line 36) | def delete_kernel_id(self, kernel_id: str) -> None:
    method min_or_remote_host (line 43) | def min_or_remote_host(self, remote_host: str | None = None) -> str:
    method increment (line 49) | def increment(self, host: str) -> None:
    method decrement (line 54) | def decrement(self, host: str) -> None:
    method init_host_kernels (line 59) | def init_host_kernels(self, hosts) -> None:
  class DistributedProcessProxy (line 65) | class DistributedProcessProxy(RemoteProcessProxy):
    method __init__ (line 73) | def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: ...
    method launch_process (line 87) | async def launch_process(
    method _launch_remote_process (line 122) | def _launch_remote_process(self, kernel_cmd: str, **kwargs: dict[str, ...
    method _build_startup_command (line 147) | def _build_startup_command(self, argv_cmd: str, **kwargs: dict[str, An...
    method _determine_next_host (line 181) | def _determine_next_host(self, env_dict: dict) -> str:
    method _unregister_assigned_host (line 197) | def _unregister_assigned_host(self) -> None:
    method confirm_remote_startup (line 201) | async def confirm_remote_startup(self) -> None:
    method handle_timeout (line 219) | async def handle_timeout(self) -> None:
    method cleanup (line 237) | def cleanup(self) -> None:
    method shutdown_listener (line 248) | def shutdown_listener(self) -> None:

FILE: enterprise_gateway/services/processproxies/docker_swarm.py
  class DockerSwarmProcessProxy (line 30) | class DockerSwarmProcessProxy(ContainerProcessProxy):
    method __init__ (line 35) | def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: ...
    method launch_process (line 39) | def launch_process(
    method get_initial_states (line 50) | def get_initial_states(self) -> set:
    method get_error_states (line 54) | def get_error_states(self) -> set:
    method _get_service (line 58) | def _get_service(self) -> Service:
    method _get_task (line 74) | def _get_task(self) -> dict:
    method get_container_status (line 93) | def get_container_status(self, iteration: int | None) -> str:
    method terminate_container_resources (line 130) | def terminate_container_resources(self) -> bool | None:
  class DockerProcessProxy (line 167) | class DockerProcessProxy(ContainerProcessProxy):
    method __init__ (line 170) | def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: ...
    method launch_process (line 174) | def launch_process(
    method get_initial_states (line 183) | def get_initial_states(self) -> set:
    method get_error_states (line 187) | def get_error_states(self) -> set:
    method _get_container (line 191) | def _get_container(self) -> Container:
    method get_container_status (line 208) | def get_container_status(self, iteration: int | None) -> str:
    method terminate_container_resources (line 254) | def terminate_container_resources(self) -> bool | None:

FILE: enterprise_gateway/services/processproxies/k8s.py
  function get_subject_class (line 37) | def get_subject_class():
  class KubernetesProcessProxy (line 57) | class KubernetesProcessProxy(ContainerProcessProxy):
    method __init__ (line 68) | def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: ...
    method launch_process (line 76) | async def launch_process(
    method get_initial_states (line 96) | def get_initial_states(self) -> set:
    method get_error_states (line 100) | def get_error_states(self) -> set:
    method get_container_status (line 104) | def get_container_status(self, iteration: int | None) -> str:
    method delete_managed_object (line 133) | def delete_managed_object(self, termination_stati: list[str]) -> bool:
    method terminate_container_resources (line 157) | def terminate_container_resources(self) -> bool | None:
    method _safe_template_substitute (line 239) | def _safe_template_substitute(self, template_str: str, variables: dict...
    method _determine_kernel_pod_name (line 275) | def _determine_kernel_pod_name(self, **kwargs: dict[str, Any] | None) ...
    method _determine_kernel_namespace (line 314) | def _determine_kernel_namespace(self, **kwargs: dict[str, Any] | None)...
    method _determine_kernel_service_account_name (line 342) | def _determine_kernel_service_account_name(**kwargs: dict[str, Any] | ...
    method _create_kernel_namespace (line 351) | def _create_kernel_namespace(self, service_account_name: str) -> str:
    method _create_role_binding (line 402) | def _create_role_binding(self, namespace: str, service_account_name: s...
    method get_process_info (line 439) | def get_process_info(self) -> dict[str, Any]:
    method load_process_info (line 447) | def load_process_info(self, process_info: dict[str, Any]) -> None:

FILE: enterprise_gateway/services/processproxies/processproxy.py
  function _get_local_ip (line 93) | def _get_local_ip() -> str:
  class KernelChannel (line 113) | class KernelChannel(Enum):
  class Response (line 128) | class Response(asyncio.Event):
    method response (line 134) | def response(self):
    method response (line 138) | def response(self, value):
  class ResponseManager (line 144) | class ResponseManager(SingletonConfigurable):
    method __init__ (line 165) | def __init__(self, **kwargs: dict[str, Any] | None):
    method public_key (line 185) | def public_key(self) -> str:
    method response_address (line 195) | def response_address(self) -> str:
    method register_event (line 198) | def register_event(self, kernel_id: str) -> None:
    method get_connection_info (line 202) | async def get_connection_info(self, kernel_id: str) -> dict:
    method _prepare_response_socket (line 207) | def _prepare_response_socket(self) -> None:
    method _start_response_manager (line 254) | def _start_response_manager(self) -> None:
    method stop_response_manager (line 263) | def stop_response_manager(self) -> None:
    method _process_connections (line 272) | async def _process_connections(self) -> None:
    method _decode_payload (line 295) | def _decode_payload(self, data: json) -> dict:
    method _post_connection (line 381) | def _post_connection(self, connection_info: dict) -> None:
  class BaseProcessProxyABC (line 397) | class BaseProcessProxyABC(metaclass=abc.ABCMeta):
    method __init__ (line 405) | def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: ...
    method launch_process (line 486) | async def launch_process(self, kernel_cmd: str, **kwargs: dict[str, An...
    method launch_kernel (line 537) | def launch_kernel(
    method cleanup (line 551) | def cleanup(self) -> None:  # noqa
    method poll (line 555) | def poll(self) -> Any | None:
    method wait (line 567) | def wait(self) -> int | None:
    method send_signal (line 590) | def send_signal(self, signum: int) -> bool | None:
    method kill (line 618) | def kill(self) -> bool | None:
    method terminate (line 645) | def terminate(self) -> bool | None:
    method ip_is_local (line 667) | def ip_is_local(ip: str) -> bool:
    method _get_ssh_client (line 673) | def _get_ssh_client(self, host: str) -> SSHClient | None:
    method rsh (line 726) | def rsh(self, host: str, command: str) -> list[str]:
    method remote_signal (line 758) | def remote_signal(self, signum: int) -> bool | None:
    method local_signal (line 786) | def local_signal(self, signum: int) -> bool | None:
    method _enforce_authorization (line 804) | def _enforce_authorization(self, **kwargs: dict[str, Any] | None) -> N...
    method _raise_authorization_error (line 839) | def _raise_authorization_error(self, kernel_username: str, differentia...
    method get_process_info (line 851) | def get_process_info(self) -> dict[str, Any]:
    method load_process_info (line 861) | def load_process_info(self, process_info: dict[str, Any]) -> None:
    method _validate_port_range (line 873) | def _validate_port_range(self) -> None:
    method select_ports (line 947) | def select_ports(self, count: int) -> list[int]:
    method select_socket (line 970) | def select_socket(self, ip: str | None = "") -> socket:
    method _get_candidate_port (line 1000) | def _get_candidate_port(self) -> int:
    method log_and_raise (line 1010) | def log_and_raise(self, http_status_code: int | None = None, reason: s...
  class LocalProcessProxy (line 1035) | class LocalProcessProxy(BaseProcessProxyABC):
    method __init__ (line 1042) | def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: ...
    method launch_process (line 1047) | async def launch_process(
  class RemoteProcessProxy (line 1070) | class RemoteProcessProxy(BaseProcessProxyABC, metaclass=abc.ABCMeta):
    method __init__ (line 1075) | def __init__(self, kernel_manager, proxy_config):
    method launch_process (line 1095) | async def launch_process(self, kernel_cmd, **kwargs):
    method confirm_remote_startup (line 1108) | def confirm_remote_startup(self):
    method detect_launch_failure (line 1112) | def detect_launch_failure(self) -> None:
    method _tunnel_to_kernel (line 1139) | def _tunnel_to_kernel(
    method _tunnel_to_port (line 1183) | def _tunnel_to_port(
    method _create_ssh_tunnel (line 1203) | def _create_ssh_tunnel(
    method _spawn_ssh_tunnel (line 1233) | def _spawn_ssh_tunnel(
    method _get_keep_alive_interval (line 1276) | def _get_keep_alive_interval(self, kernel_channel: KernelChannel) -> int:
    method receive_connection_info (line 1297) | async def receive_connection_info(self) -> bool:
    method _setup_connection_info (line 1325) | def _setup_connection_info(self, connect_info: dict) -> None:
    method _update_connection (line 1395) | def _update_connection(self, connect_info: dict) -> None:
    method _close_response_socket (line 1428) | def _close_response_socket(self) -> None:
    method _extract_pid_info (line 1439) | def _extract_pid_info(self, connect_info: dict) -> None:
    method handle_timeout (line 1470) | async def handle_timeout(self):
    method cleanup (line 1486) | def cleanup(self):
    method _send_listener_request (line 1499) | def _send_listener_request(self, request: dict, shutdown_socket: bool ...
    method send_signal (line 1534) | def send_signal(self, signum):
    method shutdown_listener (line 1567) | def shutdown_listener(self):
    method get_process_info (line 1602) | def get_process_info(self):
    method load_process_info (line 1618) | def load_process_info(self, process_info):
    method log_and_raise (line 1635) | def log_and_raise(self, http_status_code: int | None = None, reason: s...
    method get_current_time (line 1644) | def get_current_time():
    method get_time_diff (line 1649) | def get_time_diff(time1, time2):

FILE: enterprise_gateway/services/processproxies/spark_operator.py
  class SparkOperatorProcessProxy (line 11) | class SparkOperatorProcessProxy(CustomResourceProcessProxy):
    method __init__ (line 20) | def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: ...

FILE: enterprise_gateway/services/processproxies/yarn.py
  class YarnClusterProcessProxy (line 42) | class YarnClusterProcessProxy(RemoteProcessProxy):
    method __init__ (line 50) | def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: ...
    method _initialize_resource_manager (line 84) | def _initialize_resource_manager(self, **kwargs: dict[str, Any] | None...
    method launch_process (line 126) | async def launch_process(
    method confirm_yarn_queue_availability (line 154) | def confirm_yarn_queue_availability(self, **kwargs: dict[str, Any] | N...
    method handle_yarn_queue_timeout (line 256) | def handle_yarn_queue_timeout(self) -> None:
    method poll (line 270) | def poll(self) -> bool | None:
    method send_signal (line 289) | def send_signal(self, signum: int) -> bool | None:
    method kill (line 305) | def kill(self) -> bool | None:
    method cleanup (line 334) | def cleanup(self) -> None:
    method confirm_remote_startup (line 355) | async def confirm_remote_startup(self) -> None:
    method handle_timeout (line 391) | async def handle_timeout(self) -> None:
    method get_process_info (line 426) | def get_process_info(self) -> dict[str, Any]:
    method load_process_info (line 432) | def load_process_info(self, process_info: dict[str, Any]) -> None:
    method _get_application_state (line 437) | def _get_application_state(self) -> str:
    method _get_application_id (line 454) | def _get_application_id(self, ignore_final_states: bool = False) -> str:
    method _query_app_by_name (line 484) | def _query_app_by_name(self, kernel_id: str) -> dict:
    method _query_app_by_id (line 533) | def _query_app_by_id(self, app_id: str) -> dict:
    method _query_app_state_by_id (line 553) | def _query_app_state_by_id(self, app_id: str) -> str:
    method _kill_app_by_id (line 573) | def _kill_app_by_id(self, app_id: str) -> Response:

FILE: enterprise_gateway/services/sessions/handlers.py
  class SessionRootHandler (line 13) | class SessionRootHandler(
    method get (line 20) | async def get(self) -> None:

FILE: enterprise_gateway/services/sessions/kernelsessionmanager.py
  class KernelSessionManager (line 26) | class KernelSessionManager(LoggingConfigurable):
    method _session_persistence_default (line 55) | def _session_persistence_default(self) -> bool:
    method _persistence_root_default (line 72) | def _persistence_root_default(self) -> str:
    method __init__ (line 75) | def __init__(self, kernel_manager: RemoteMappingKernelManager, **kwarg...
    method create_session (line 82) | def create_session(self, kernel_id: str, **kwargs) -> None:
    method refresh_session (line 113) | def refresh_session(self, kernel_id: str) -> None:
    method _save_session (line 130) | def _save_session(self, kernel_id: str, kernel_session: dict) -> None:
    method start_session (line 147) | def start_session(self, kernel_id: str) -> bool | None:
    method start_sessions (line 154) | def start_sessions(self) -> None:
    method _start_session (line 182) | def _start_session(self, kernel_session: dict) -> bool:
    method delete_session (line 198) | def delete_session(self, kernel_id: str) -> None:
    method _delete_sessions (line 207) | def _delete_sessions(self, kernel_ids: list[str]) -> None:
    method pre_save_transformation (line 228) | def pre_save_transformation(session: dict) -> dict:
    method post_load_transformation (line 241) | def post_load_transformation(session: dict) -> dict:
    method load_sessions (line 254) | def load_sessions(self) -> None:
    method load_session (line 262) | def load_session(self, kernel_id: str) -> None:
    method delete_sessions (line 271) | def delete_sessions(self, kernel_ids: list[str]) -> None:
    method save_session (line 278) | def save_session(self, kernel_id: str) -> None:
    method active_sessions (line 285) | def active_sessions(self, username: str) -> int:
    method get_kernel_username (line 303) | def get_kernel_username(**kwargs) -> str:
  class FileKernelSessionManager (line 330) | class FileKernelSessionManager(KernelSessionManager):
    method _persistence_root_default (line 338) | def _persistence_root_default(self) -> str:
    method __init__ (line 341) | def __init__(self, kernel_manager: RemoteMappingKernelManager, **kwarg...
    method delete_sessions (line 347) | def delete_sessions(self, kernel_ids: list[str]) -> None:
    method save_session (line 356) | def save_session(self, kernel_id: str) -> None:
    method load_sessions (line 366) | def load_sessions(self) -> None:
    method load_session (line 377) | def load_session(self, kernel_id: str) -> None:
    method _load_session_from_file (line 383) | def _load_session_from_file(self, file_name: str) -> None:
    method _get_sessions_loc (line 401) | def _get_sessions_loc(self) -> str:
  class WebhookKernelSessionManager (line 408) | class WebhookKernelSessionManager(KernelSessionManager):
    method _webhook_url_default (line 427) | def _webhook_url_default(self) -> str | None:
    method _webhook_username_default (line 439) | def _webhook_username_default(self) -> str | None:
    method _webhook_password_default (line 451) | def _webhook_password_default(self) -> str | None:
    method _auth_type_default (line 464) | def _auth_type_default(self) -> str | None:
    method __init__ (line 467) | def __init__(self, kernel_manager: RemoteMappingKernelManager, **kwarg...
    method delete_sessions (line 486) | def delete_sessions(self, kernel_ids: list[str]) -> None:
    method save_session (line 500) | def save_session(self, kernel_id: str) -> None:
    method load_sessions (line 517) | def load_sessions(self) -> None:
    method load_session (line 530) | def load_session(self, kernel_id: str) -> None:
    method _load_session_from_response (line 544) | def _load_session_from_response(self, kernel_session: dict) -> None:

FILE: enterprise_gateway/services/sessions/sessionmanager.py
  class SessionManager (line 15) | class SessionManager(LoggingConfigurable):
    method __init__ (line 34) | def __init__(self, kernel_manager: RemoteMappingKernelManager, *args, ...
    method session_exists (line 41) | def session_exists(self, path: str, *args, **kwargs) -> bool:
    method new_session_id (line 55) | def new_session_id(self) -> str:
    method create_session (line 59) | async def create_session(
    method save_session (line 90) | def save_session(
    method get_session_by_key (line 121) | def get_session_by_key(self, key: Hashable, val: Any, *args, **kwargs)...
    method get_session (line 139) | def get_session(self, **kwargs) -> dict:
    method update_session (line 182) | def update_session(self, session_id: str, *args, **kwargs) -> None:
    method row_to_model (line 219) | def row_to_model(self, row: dict, *args, **kwargs) -> dict:
    method list_sessions (line 243) | def list_sessions(self, *args, **kwargs) -> List[dict]:
    method delete_session (line 254) | async def delete_session(self, session_id: str, *args, **kwargs) -> None:

FILE: enterprise_gateway/tests/__init__.py
  function teardown (line 6) | def teardown():

FILE: enterprise_gateway/tests/test_enterprise_gateway.py
  class TestEnterpriseGateway (line 18) | class TestEnterpriseGateway(TestHandlers):
    method setUp (line 19) | def setUp(self):
    method test_max_kernels_per_user (line 26) | def test_max_kernels_per_user(self):
    method test_authorization (line 73) | def test_authorization(self):
    method test_port_range (line 100) | def test_port_range(self):
    method test_dynamic_updates (line 154) | def test_dynamic_updates(self):
    method test_kernel_id_env_var (line 204) | def test_kernel_id_env_var(self):

FILE: enterprise_gateway/tests/test_gatewayapp.py
  class TestGatewayAppConfig (line 17) | class TestGatewayAppConfig(unittest.TestCase):
    method setUp (line 20) | def setUp(self):
    method tearDown (line 24) | def tearDown(self):
    method _assert_envs_to_traitlets (line 29) | def _assert_envs_to_traitlets(self, env_prefix: str):
    method test_config_env_vars_bc (line 56) | def test_config_env_vars_bc(self):
    method test_config_env_vars (line 79) | def test_config_env_vars(self):
    method test_ssl_options_no_config (line 105) | def test_ssl_options_no_config(self):
    method test_authorizer_class_default (line 110) | def test_authorizer_class_default(self):
    method test_authorizer_class_env_var (line 123) | def test_authorizer_class_env_var(self):
  class TestGatewayAppBase (line 161) | class TestGatewayAppBase(AsyncHTTPTestCase, ExpectLog):
    method tearDown (line 171) | def tearDown(self):
    method get_app (line 178) | def get_app(self):
    method setup_app (line 189) | def setup_app(self):
    method setup_configurables (line 195) | def setup_configurables(self):

FILE: enterprise_gateway/tests/test_handlers.py
  class TestHandlers (line 17) | class TestHandlers(TestGatewayAppBase):
    method setup_app (line 22) | def setup_app(self):
    method tearDown (line 33) | def tearDown(self):
    method spawn_kernel (line 47) | def spawn_kernel(self, kernel_body="{}"):
    method execute_request (line 76) | def execute_request(self, code):
    method await_stream (line 110) | def await_stream(self, ws):
  class TestDefaults (line 121) | class TestDefaults(TestHandlers):
    method test_startup (line 125) | def test_startup(self):
    method test_headless (line 132) | def test_headless(self):
    method test_check_origin (line 142) | def test_check_origin(self):
    method test_auth_token (line 164) | def test_auth_token(self):
    method test_cors_headers (line 252) | def test_cors_headers(self):
    method test_max_kernels (line 277) | def test_max_kernels(self):
    method test_get_api (line 308) | def test_get_api(self):
    method test_get_kernelspecs (line 317) | def test_get_kernelspecs(self):
    method test_get_kernels (line 326) | def test_get_kernels(self):
    method test_kernel_comm (line 349) | def test_kernel_comm(self):
    method test_no_discovery (line 386) | def test_no_discovery(self):
    method test_crud_sessions (line 395) | def test_crud_sessions(self):
    method test_json_errors (line 435) | def test_json_errors(self):
    method test_kernel_env (line 460) | def test_kernel_env(self):
    method test_kernel_defaults (line 494) | def test_kernel_defaults(self):
    method test_get_swagger_yaml_spec (line 533) | def test_get_swagger_yaml_spec(self):
    method test_get_swagger_json_spec (line 539) | def test_get_swagger_json_spec(self):
    method test_kernel_env_auth_token (line 545) | def test_kernel_env_auth_token(self):
  class TestCustomDefaultKernel (line 561) | class TestCustomDefaultKernel(TestHandlers):
    method setup_app (line 564) | def setup_app(self):
    method test_default_kernel_name (line 568) | def test_default_kernel_name(self):
  class TestEnableDiscovery (line 578) | class TestEnableDiscovery(TestHandlers):
    method setup_configurables (line 581) | def setup_configurables(self):
    method test_enable_kernel_list (line 586) | def test_enable_kernel_list(self):
  class TestBaseURL (line 600) | class TestBaseURL(TestHandlers):
    method setup_app (line 603) | def setup_app(self):
    method setup_configurables (line 607) | def setup_configurables(self):
    method test_base_url (line 612) | def test_base_url(self):
  class TestRelativeBaseURL (line 627) | class TestRelativeBaseURL(TestHandlers):
    method setup_app (line 630) | def setup_app(self):
    method test_base_url (line 635) | def test_base_url(self):
  class TestWildcardEnvs (line 646) | class TestWildcardEnvs(TestHandlers):
    method setup_app (line 649) | def setup_app(self):
    method test_kernel_wildcard_env (line 656) | def test_kernel_wildcard_env(self):

FILE: enterprise_gateway/tests/test_kernelspec_cache.py
  function mkdir (line 19) | def mkdir(tmp_path, *parts):
  function environ (line 37) | def environ(
  function _install_kernelspec (line 70) | def _install_kernelspec(kernels_dir, kernel_name):
  function _modify_kernelspec (line 82) | def _modify_kernelspec(kernelspec_dir, kernel_name):
  function setup_kernelspecs (line 97) | def setup_kernelspecs(environ, kernelspec_location):
  function kernel_spec_manager (line 105) | def kernel_spec_manager(environ, setup_kernelspecs):
  function kernel_spec_cache (line 110) | def kernel_spec_cache(is_enabled, kernel_spec_manager):
  function is_enabled (line 120) | def is_enabled(request):
  function tests_get_all_specs (line 124) | async def tests_get_all_specs(kernel_spec_cache):
  function tests_get_named_spec (line 129) | async def tests_get_named_spec(kernel_spec_cache):
  function tests_get_modified_spec (line 134) | async def tests_get_modified_spec(kernel_spec_cache):
  function tests_add_spec (line 145) | async def tests_add_spec(kernel_spec_cache, kernelspec_location, other_k...
  function tests_remove_spec (line 178) | async def tests_remove_spec(kernel_spec_cache):
  function tests_get_missing (line 191) | async def tests_get_missing(kernel_spec_cache):

FILE: enterprise_gateway/tests/test_mixins.py
  class SuperTokenAuthHandler (line 19) | class SuperTokenAuthHandler:
    method prepare (line 24) | def prepare(self):
  class TestableTokenAuthHandler (line 29) | class TestableTokenAuthHandler(TokenAuthorizationMixin, SuperTokenAuthHa...
    method __init__ (line 34) | def __init__(self, token=""):
    method send_error (line 40) | def send_error(self, status_code):
    method get_argument (line 43) | def get_argument(self, name, default=""):
  class TestTokenAuthMixin (line 47) | class TestTokenAuthMixin(unittest.TestCase):
    method setUp (line 50) | def setUp(self):
    method test_no_token_required (line 54) | def test_no_token_required(self):
    method test_missing_token (line 61) | def test_missing_token(self):
    method test_valid_header_token (line 69) | def test_valid_header_token(self):
    method test_wrong_header_token (line 77) | def test_wrong_header_token(self):
    method test_valid_url_token (line 85) | def test_valid_url_token(self):
    method test_wrong_url_token (line 94) | def test_wrong_url_token(self):
    method test_differing_tokens_valid_url (line 103) | def test_differing_tokens_valid_url(self):
    method test_differing_tokens_wrong_url (line 112) | def test_differing_tokens_wrong_url(self):
  class TestableJSONErrorsHandler (line 122) | class TestableJSONErrorsHandler(JSONErrorsMixin):
    method __init__ (line 127) | def __init__(self):
    method finish (line 133) | def finish(self, response):
    method set_status (line 136) | def set_status(self, status_code, reason=None):
    method set_header (line 140) | def set_header(self, name, value):
  class TestJSONErrorsMixin (line 144) | class TestJSONErrorsMixin(unittest.TestCase):
    method setUp (line 147) | def setUp(self):
    method test_status (line 151) | def test_status(self):
    method test_custom_status (line 159) | def test_custom_status(self):
    method test_log_message (line 169) | def test_log_message(self):

FILE: enterprise_gateway/tests/test_process_proxy.py
  class TestParseProhibitedIds (line 18) | class TestParseProhibitedIds(unittest.TestCase):
    method test_default_value (line 21) | def test_default_value(self):
    method test_multiple_values (line 27) | def test_multiple_values(self):
    method test_values_with_spaces (line 32) | def test_values_with_spaces(self):
    method test_invalid_entries_raise_value_error (line 37) | def test_invalid_entries_raise_value_error(self):
    method test_username_instead_of_uid_raises_value_error (line 44) | def test_username_instead_of_uid_raises_value_error(self):
    method test_empty_entries_ignored (line 50) | def test_empty_entries_ignored(self):
  class TestContainerProxyProhibitedIds (line 56) | class TestContainerProxyProhibitedIds(unittest.TestCase):
    method setUp (line 59) | def setUp(self):
    method _make_kwargs (line 72) | def _make_kwargs(self, uid=None, gid=None):
    method test_valid_uid_gid_passes (line 80) | def test_valid_uid_gid_passes(self):
    method test_defaults_used_when_not_provided (line 86) | def test_defaults_used_when_not_provided(self):
    method test_prohibited_uid_exact_match (line 92) | def test_prohibited_uid_exact_match(self):
    method test_prohibited_gid_exact_match (line 98) | def test_prohibited_gid_exact_match(self):
    method test_trailing_whitespace_uid_denied (line 104) | def test_trailing_whitespace_uid_denied(self):
    method test_leading_whitespace_uid_denied (line 110) | def test_leading_whitespace_uid_denied(self):
    method test_leading_zeros_uid_denied (line 116) | def test_leading_zeros_uid_denied(self):
    method test_plus_sign_uid_denied (line 122) | def test_plus_sign_uid_denied(self):
    method test_non_numeric_uid_rejected (line 128) | def test_non_numeric_uid_rejected(self):
    method test_empty_uid_rejected (line 134) | def test_empty_uid_rejected(self):
    method test_negative_uid_rejected (line 140) | def test_negative_uid_rejected(self):
    method test_negative_gid_rejected (line 147) | def test_negative_gid_rejected(self):
    method test_uid_exceeding_uint32_max_rejected (line 154) | def test_uid_exceeding_uint32_max_rejected(self):
    method test_gid_exceeding_uint32_max_rejected (line 161) | def test_gid_exceeding_uint32_max_rejected(self):
    method test_uid_at_uint32_max_allowed (line 168) | def test_uid_at_uint32_max_allowed(self):
    method test_normalized_values_stored (line 173) | def test_normalized_values_stored(self):
    method test_both_uid_and_gid_checked_independently (line 179) | def test_both_uid_and_gid_checked_independently(self):
    method test_trailing_whitespace_gid_denied (line 186) | def test_trailing_whitespace_gid_denied(self):
  class TestKubernetesProcessProxy (line 193) | class TestKubernetesProcessProxy(unittest.TestCase):
    method setUp (line 196) | def setUp(self):
    method test_valid_template_substitution (line 213) | def test_valid_template_substitution(self):
    method test_missing_variables_fallback (line 235) | def test_missing_variables_fallback(self):
    method test_malicious_template_injection_prevention (line 252) | def test_malicious_template_injection_prevention(self):
    method test_pod_name_determination_with_templates (line 290) | def test_pod_name_determination_with_templates(self):
    method test_pod_name_determination_with_malicious_template (line 305) | def test_pod_name_determination_with_malicious_template(self):
    method test_pod_name_determination_with_missing_variables (line 322) | def test_pod_name_determination_with_missing_variables(self):
    method test_pod_name_without_template (line 339) | def test_pod_name_without_template(self):
    method test_pod_name_dns_normalization (line 348) | def test_pod_name_dns_normalization(self):
    method test_regex_pattern_validation (line 363) | def test_regex_pattern_validation(self):

FILE: enterprise_gateway/tests/test_yaml_injection.py
  function yaml_safe_str (line 43) | def yaml_safe_str(value):
  function _build_keywords (line 53) | def _build_keywords(env_overrides: dict) -> dict:
  function _render_pod_template (line 67) | def _render_pod_template(keywords: dict) -> str:
  function _base_env (line 83) | def _base_env() -> dict:
  class TestYamlSafeStrFilter (line 95) | class TestYamlSafeStrFilter(unittest.TestCase):
    method test_normal_string (line 98) | def test_normal_string(self):
    method test_string_with_quotes (line 102) | def test_string_with_quotes(self):
    method test_string_with_newlines_escaped (line 108) | def test_string_with_newlines_escaped(self):
    method test_document_boundary_escaped (line 114) | def test_document_boundary_escaped(self):
    method test_end_of_document_marker_escaped (line 120) | def test_end_of_document_marker_escaped(self):
    method test_none_serialized_as_yaml_null (line 125) | def test_none_serialized_as_yaml_null(self):
    method test_bool_serialized_as_yaml_bool (line 131) | def test_bool_serialized_as_yaml_bool(self):
    method test_numeric_serialized_correctly (line 139) | def test_numeric_serialized_correctly(self):
    method test_dict_rendered_as_flow_mapping (line 147) | def test_dict_rendered_as_flow_mapping(self):
    method test_empty_string (line 153) | def test_empty_string(self):
    method test_image_name_with_tag (line 158) | def test_image_name_with_tag(self):
  class TestEnvVarParsing (line 164) | class TestEnvVarParsing(unittest.TestCase):
    method test_scalar_vars_remain_strings (line 167) | def test_scalar_vars_remain_strings(self):
    method test_volume_mounts_parsed_as_list (line 175) | def test_volume_mounts_parsed_as_list(self):
    method test_volumes_parsed_as_list (line 183) | def test_volumes_parsed_as_list(self):
    method test_non_list_volume_rejected (line 190) | def test_non_list_volume_rejected(self):
    method test_list_of_strings_volume_rejected (line 195) | def test_list_of_strings_volume_rejected(self):
    method test_mixed_list_volume_rejected (line 201) | def test_mixed_list_volume_rejected(self):
    method test_yaml_safe_load_not_applied_to_scalars (line 207) | def test_yaml_safe_load_not_applied_to_scalars(self):
  class TestSecurityContextInjection (line 214) | class TestSecurityContextInjection(unittest.TestCase):
    method test_security_context_not_overridden (line 217) | def test_security_context_not_overridden(self):
    method test_injection_via_kernel_image (line 231) | def test_injection_via_kernel_image(self):
    method test_injection_via_kernel_namespace (line 242) | def test_injection_via_kernel_namespace(self):
    method test_injection_via_volume_mounts_string_list_blocked_at_l1 (line 253) | def test_injection_via_volume_mounts_string_list_blocked_at_l1(self):
    method test_injection_via_volume_mounts_blocked_at_l2 (line 262) | def test_injection_via_volume_mounts_blocked_at_l2(self):
    method test_all_rendered_kinds_are_allowed (line 289) | def test_all_rendered_kinds_are_allowed(self):
    method test_duplicate_pod_kind_detected (line 302) | def test_duplicate_pod_kind_detected(self):
  class TestNormalOperation (line 320) | class TestNormalOperation(unittest.TestCase):
    method test_basic_pod_renders_correctly (line 323) | def test_basic_pod_renders_correctly(self):
    method test_working_dir_set_correctly (line 337) | def test_working_dir_set_correctly(self):
    method test_resource_limits_rendered (line 346) | def test_resource_limits_rendered(self):
    method test_security_context_with_uid_gid (line 362) | def test_security_context_with_uid_gid(self):
    method test_volume_mounts_rendered (line 373) | def test_volume_mounts_rendered(self):
  class TestSparkOperatorTemplate (line 390) | class TestSparkOperatorTemplate(unittest.TestCase):
    method _render_operator_template (line 393) | def _render_operator_template(self, keywords: dict) -> str:
    method test_injection_via_kernel_image_blocked (line 407) | def test_injection_via_kernel_image_blocked(self):
    method test_normal_spark_app_renders (line 426) | def test_normal_spark_app_renders(self):

FILE: etc/docker/kernel-image-puller/image_fetcher.py
  class ImageNameFetcher (line 13) | class ImageNameFetcher(metaclass=abc.ABCMeta):
    method fetch_image_names (line 19) | def fetch_image_names(self) -> set[str]:
  class KernelSpecsFetcher (line 28) | class KernelSpecsFetcher(ImageNameFetcher):
    method __init__ (line 35) | def __init__(self, logger):
    method get_kernel_specs (line 46) | def get_kernel_specs(self):
    method fetch_image_names (line 60) | def fetch_image_names(self) -> set[str]:
  class StaticListFetcher (line 96) | class StaticListFetcher(ImageNameFetcher):
    method __init__ (line 112) | def __init__(self, logger) -> None:
    method fetch_image_names (line 118) | def fetch_image_names(self) -> set[str]:
  class ConfigMapImagesFetcher (line 126) | class ConfigMapImagesFetcher(ImageNameFetcher):
    method __init__ (line 146) | def __init__(self, logger) -> None:
    method fetch_image_names (line 158) | def fetch_image_names(self) -> set[str]:
  class CombinedImagesFetcher (line 185) | class CombinedImagesFetcher(ImageNameFetcher):
    method __init__ (line 202) | def __init__(self, logger):
    method fetch_image_names (line 215) | def fetch_image_names(self) -> set[str]:

FILE: etc/docker/kernel-image-puller/kernel_image_puller.py
  class KernelImagePuller (line 20) | class KernelImagePuller:
    method __init__ (line 31) | def __init__(self, kip_logger, image_fetcher):
    method load_static_env_values (line 47) | def load_static_env_values(self):
    method start (line 85) | def start(self):
    method initialize_workers (line 106) | def initialize_workers(self):
    method get_container_runtime (line 114) | def get_container_runtime(self) -> Optional[str]:
    method is_runtime_endpoint_recognized (line 123) | def is_runtime_endpoint_recognized(self) -> bool:
    method fetch_image_names (line 130) | def fetch_image_names(self):
    method image_puller (line 147) | def image_puller(self):
    method pull_image (line 178) | def pull_image(self, image_name):
    method get_absolute_image_name (line 212) | def get_absolute_image_name(self, image_name: str) -> str:
    method image_exists (line 223) | def image_exists(self, image_name: str) -> bool:
    method download_image (line 245) | def download_image(self, image_name: str) -> bool:
    method execute_cmd (line 266) | def execute_cmd(self, argv: List[str]) -> bool:

FILE: etc/kernel-launchers/R/scripts/server_listener.py
  function _encrypt (line 33) | def _encrypt(connection_info_str, public_key):
  function return_connection_info (line 61) | def return_connection_info(
  function prepare_comm_socket (line 109) | def prepare_comm_socket(lower_port, upper_port):
  function _select_ports (line 123) | def _select_ports(count, lower_port, upper_port):
  function _select_socket (line 139) | def _select_socket(lower_port, upper_port):
  function _get_candidate_port (line 161) | def _get_candidate_port(lower_port, upper_port):
  function get_server_request (line 172) | def get_server_request(sock):
  function server_listener (line 198) | def server_listener(sock, parent_pid):
  function setup_server_listener (line 219) | def setup_server_listener(

FILE: etc/kernel-launchers/docker/scripts/launch_docker.py
  function launch_docker_kernel (line 21) | def launch_docker_kernel(

FILE: etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py
  function yaml_safe_str (line 30) | def yaml_safe_str(value):
  function generate_kernel_pod_yaml (line 46) | def generate_kernel_pod_yaml(keywords):
  function extend_pod_env (line 72) | def extend_pod_env(pod_def: dict) -> dict:
  function _parse_k8s_exception (line 98) | def _parse_k8s_exception(exc: ApiException) -> str:
  function launch_kubernetes_kernel (line 114) | def launch_kubernetes_kernel(
  function _get_spark_resources (line 310) | def _get_spark_resources(pod_template: Dict) -> str:

FILE: etc/kernel-launchers/operators/scripts/launch_custom_resource.py
  function yaml_safe_str (line 18) | def yaml_safe_str(value):
  function generate_kernel_custom_resource_yaml (line 34) | def generate_kernel_custom_resource_yaml(kernel_crd_template, keywords):
  function extend_operator_env (line 55) | def extend_operator_env(op_def: dict, sub_spec: str) -> dict:
  function launch_custom_resource_kernel (line 77) | def launch_custom_resource_kernel(

FILE: etc/kernel-launchers/python/scripts/launch_ipykernel.py
  class ExceptionThread (line 44) | class ExceptionThread(Thread):
    method __init__ (line 47) | def __init__(self, target):
    method run (line 53) | def run(self):
  function initialize_namespace (line 61) | def initialize_namespace(namespace, cluster_type="spark"):
  class WaitingForSparkSessionToBeInitialized (line 128) | class WaitingForSparkSessionToBeInitialized:
    method __init__ (line 140) | def __init__(self, global_variable_name, init_thread, namespace):
    method __getattr__ (line 149) | def __getattr__(self, name):
  function _validate_port_range (line 166) | def _validate_port_range(port_range):
  function determine_connection_file (line 193) | def determine_connection_file(conn_file, kid):
  function _encrypt (line 207) | def _encrypt(connection_info_str, public_key):
  function return_connection_info (line 235) | def return_connection_info(
  function prepare_comm_socket (line 283) | def prepare_comm_socket(lower_port, upper_port):
  function _select_ports (line 297) | def _select_ports(count, lower_port, upper_port):
  function _select_socket (line 313) | def _select_socket(lower_port, upper_port):
  function _get_candidate_port (line 336) | def _get_candidate_port(lower_port, upper_port):
  function get_server_request (line 347) | def get_server_request(sock):
  function cancel_spark_jobs (line 373) | def cancel_spark_jobs(sig, frame):
  function server_listener (line 393) | def server_listener(sock, parent_pid, cluster_type):
  function import_item (line 416) | def import_item(name):
  function start_ipython (line 445) | def start_ipython(

FILE: etc/kernel-resources/ir/kernel.js
  method handler (line 9) | handler(cm) {
  method handler (line 18) | handler(cm) {
  method handler (line 27) | handler(cm, cell) {
  function add_edit_shortcut (line 48) | function add_edit_shortcut(notebook, actions, keyboard_manager, edit_act...
  function render_math (line 66) | function render_math(pager, html) {
  method onload (line 82) | onload() {

FILE: website/js/bootstrap.js
  function transitionEnd (line 34) | function transitionEnd() {
  function removeElement (line 126) | function removeElement() {
  function Plugin (line 142) | function Plugin(option) {
  function Plugin (line 247) | function Plugin(option) {
  function Plugin (line 466) | function Plugin(option) {
  function getTargetFromTrigger (line 685) | function getTargetFromTrigger($trigger) {
  function Plugin (line 697) | function Plugin(option) {
  function clearMenus (line 829) | function clearMenus(e) {
  function getParent (line 848) | function getParent($this) {
  function Plugin (line 865) | function Plugin(option) {
  function Plugin (line 1179) | function Plugin(option, _relatedTarget) {
  function complete (line 1521) | function complete() {
  function Plugin (line 1673) | function Plugin(option) {
  function Plugin (line 1787) | function Plugin(option) {
  function ScrollSpy (line 1830) | function ScrollSpy(element, options) {
  function Plugin (line 1953) | function Plugin(option) {
  function next (line 2060) | function next() {
  function Plugin (line 2106) | function Plugin(option) {
  function Plugin (line 2263) | function Plugin(option) {

FILE: website/js/cbpAnimatedHeader.js
  function init (line 18) | function init() {
  function scrollPage (line 27) | function scrollPage() {
  function scrollY (line 38) | function scrollY() {

FILE: website/js/classie.js
  function classReg (line 20) | function classReg( className ) {
  function toggleClass (line 53) | function toggleClass( elem, c ) {

FILE: website/js/jquery.js
  function r (line 2) | function r(a){var b=a.length,c=m.type(a);return"function"===c||m.isWindo...
  function fb (line 2) | function fb(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)...
  function gb (line 2) | function gb(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLengt...
  function hb (line 2) | function hb(a){return a[u]=!0,a}
  function ib (line 2) | function ib(a){var b=n.createElement("div");try{return!!a(b)}catch(c){re...
  function jb (line 2) | function jb(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[...
  function kb (line 2) | function kb(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sou...
  function lb (line 2) | function lb(a){return function(b){var c=b.nodeName.toLowerCase();return"...
  function mb (line 2) | function mb(a){return function(b){var c=b.nodeName.toLowerCase();return(...
  function nb (line 2) | function nb(a){return hb(function(b){return b=+b,hb(function(c,d){var e,...
  function ob (line 2) | function ob(a){return a&&typeof a.getElementsByTagName!==C&&a}
  function pb (line 2) | function pb(){}
  function qb (line 2) | function qb(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}
  function rb (line 2) | function rb(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.firs...
  function sb (line 2) | function sb(a){return a.length>1?function(b,c,d){var e=a.length;while(e-...
  function tb (line 2) | function tb(a,b,c){for(var d=0,e=b.length;e>d;d++)fb(a,b[d],c);return c}
  function ub (line 2) | function ub(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(...
  function vb (line 2) | function vb(a,b,c,d,e,f){return d&&!d[u]&&(d=vb(d)),e&&!e[u]&&(e=vb(e,f)...
  function wb (line 2) | function wb(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.r...
  function xb (line 2) | function xb(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var...
  function w (line 2) | function w(a,b,c){if(m.isFunction(b))return m.grep(a,function(a,d){retur...
  function D (line 2) | function D(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}
  function G (line 2) | function G(a){var b=F[a]={};return m.each(a.match(E)||[],function(a,c){b...
  function I (line 2) | function I(){y.addEventListener?(y.removeEventListener("DOMContentLoaded...
  function J (line 2) | function J(){(y.addEventListener||"load"===event.type||"complete"===y.re...
  function O (line 2) | function O(a,b,c){if(void 0===c&&1===a.nodeType){var d="data-"+b.replace...
  function P (line 2) | function P(a){var b;for(b in a)if(("data"!==b||!m.isEmptyObject(a[b]))&&...
  function Q (line 2) | function Q(a,b,d,e){if(m.acceptData(a)){var f,g,h=m.expando,i=a.nodeType...
  function R (line 3) | function R(a,b,c){if(m.acceptData(a)){var d,e,f=a.nodeType,g=f?m.cache:a...
  function ab (line 3) | function ab(){return!0}
  function bb (line 3) | function bb(){return!1}
  function cb (line 3) | function cb(){try{return y.activeElement}catch(a){}}
  function db (line 3) | function db(a){var b=eb.split("|"),c=a.createDocumentFragment();if(c.cre...
  function ub (line 3) | function ub(a,b){var c,d,e=0,f=typeof a.getElementsByTagName!==K?a.getEl...
  function vb (line 3) | function vb(a){W.test(a.type)&&(a.defaultChecked=a.checked)}
  function wb (line 3) | function wb(a,b){return m.nodeName(a,"table")&&m.nodeName(11!==b.nodeTyp...
  function xb (line 3) | function xb(a){return a.type=(null!==m.find.attr(a,"type"))+"/"+a.type,a}
  function yb (line 3) | function yb(a){var b=pb.exec(a.type);return b?a.type=b[1]:a.removeAttrib...
  function zb (line 3) | function zb(a,b){for(var c,d=0;null!=(c=a[d]);d++)m._data(c,"globalEval"...
  function Ab (line 3) | function Ab(a,b){if(1===b.nodeType&&m.hasData(a)){var c,d,e,f=m._data(a)...
  function Bb (line 3) | function Bb(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCas...
  function Eb (line 3) | function Eb(b,c){var d,e=m(c.createElement(b)).appendTo(c.body),f=a.getD...
  function Fb (line 3) | function Fb(a){var b=y,c=Db[a];return c||(c=Eb(a,b),"none"!==c&&c||(Cb=(...
  function Lb (line 3) | function Lb(a,b){return{get:function(){var c=a();if(null!=c)return c?voi...
  function i (line 3) | function i(){var b,c,d,i;c=y.getElementsByTagName("body")[0],c&&c.style&...
  function Ub (line 3) | function Ub(a,b){if(b in a)return b;var c=b.charAt(0).toUpperCase()+b.sl...
  function Vb (line 3) | function Vb(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.styl...
  function Wb (line 3) | function Wb(a,b,c){var d=Pb.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[...
  function Xb (line 3) | function Xb(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===...
  function Yb (line 3) | function Yb(a,b,c){var d=!0,e="width"===b?a.offsetWidth:a.offsetHeight,f...
  function Zb (line 3) | function Zb(a,b,c,d,e){return new Zb.prototype.init(a,b,c,d,e)}
  function fc (line 4) | function fc(){return setTimeout(function(){$b=void 0}),$b=m.now()}
  function gc (line 4) | function gc(a,b){var c,d={height:a},e=0;for(b=b?1:0;4>e;e+=2-b)c=T[e],d[...
  function hc (line 4) | function hc(a,b,c){for(var d,e=(ec[b]||[]).concat(ec["*"]),f=0,g=e.lengt...
  function ic (line 4) | function ic(a,b,c){var d,e,f,g,h,i,j,l,n=this,o={},p=a.style,q=a.nodeTyp...
  function jc (line 4) | function jc(a,b){var c,d,e,f,g;for(c in a)if(d=m.camelCase(c),e=b[d],f=a...
  function kc (line 4) | function kc(a,b,c){var d,e,f=0,g=dc.length,h=m.Deferred().always(functio...
  function Lc (line 4) | function Lc(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var ...
  function Mc (line 4) | function Mc(a,b,c,d){var e={},f=a===Ic;function g(h){var i;return e[h]=!...
  function Nc (line 4) | function Nc(a,b){var c,d,e=m.ajaxSettings.flatOptions||{};for(d in b)voi...
  function Oc (line 4) | function Oc(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[...
  function Pc (line 4) | function Pc(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])fo...
  function x (line 4) | function x(a,b,c,d){var j,r,s,u,w,x=b;2!==t&&(t=2,g&&clearTimeout(g),i=v...
  function Vc (line 4) | function Vc(a,b,c,d){var e;if(m.isArray(b))m.each(b,function(b,e){c||Rc....
  function Zc (line 4) | function Zc(){try{return new a.XMLHttpRequest}catch(b){}}
  function $c (line 4) | function $c(){try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(...
  function dd (line 4) | function dd(a){return m.isWindow(a)?a:9===a.nodeType?a.defaultView||a.pa...
Condensed preview — 295 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,770K chars).
[
  {
    "path": ".git-blame-ignore-revs",
    "chars": 71,
    "preview": "# Initial pre-commit reformat\ndf811d0deacebfd6cc77e8bf501d9b87ff006fb5\n"
  },
  {
    "path": ".gitattributes",
    "chars": 337,
    "preview": "# Set the default behavior to have all files normalized to Unix-style\n# line endings upon check-in.\n* text=auto\n\n# Decla"
  },
  {
    "path": ".github/ISSUE_TEMPLATE.md",
    "chars": 465,
    "preview": "Help us improve the Jupyter Enterprise Gateway project by reporting issues\nor asking questions.\n\n## Description\n\n## Scre"
  },
  {
    "path": ".github/codeql/codeql-config.yml",
    "chars": 126,
    "preview": "name: \"Enterprise Gateway CodeQL config\"\n\nqueries:\n  - uses: security-and-quality\n\npaths-ignore:\n  - enterprise_gateway/"
  },
  {
    "path": ".github/dependabot.yml",
    "chars": 431,
    "preview": "version: 2\nupdates:\n  # Set update schedule for GitHub Actions\n  - package-ecosystem: \"github-actions\"\n    directory: \"/"
  },
  {
    "path": ".github/workflows/build.yml",
    "chars": 4445,
    "preview": "name: Builds\non:\n  push:\n  pull_request:\n\njobs:\n  build:\n    runs-on: ${{ matrix.os }}\n    env:\n      ASYNC_TEST_TIMEOUT"
  },
  {
    "path": ".github/workflows/codeql-analysis.yml",
    "chars": 2980,
    "preview": "# For most projects, this workflow file will not need changing; you simply need\n# to commit it to your repository.\n#\n# Y"
  },
  {
    "path": ".gitignore",
    "chars": 944,
    "preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\n"
  },
  {
    "path": ".pre-commit-config.yaml",
    "chars": 1073,
    "preview": "ci:\n  autoupdate_schedule: monthly\n\nrepos:\n  - repo: https://github.com/pre-commit/pre-commit-hooks\n    rev: v4.5.0\n    "
  },
  {
    "path": ".readthedocs.yaml",
    "chars": 166,
    "preview": "version: 2\nbuild:\n  os: \"ubuntu-22.04\"\n  tools:\n    python: \"mambaforge-22.9\"\nsphinx:\n  configuration: docs/source/conf."
  },
  {
    "path": "LICENSE.md",
    "chars": 2992,
    "preview": "# Licensing terms\n\nThis project is licensed under the terms of the Modified BSD License\n(also known as New or Revised or"
  },
  {
    "path": "Makefile",
    "chars": 12426,
    "preview": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n.PHONY: help clean"
  },
  {
    "path": "README.md",
    "chars": 4335,
    "preview": "**[Website](https://jupyter-enterprise-gateway.readthedocs.io/)** |\n**[Technical Overview](#technical-overview)** |\n**[I"
  },
  {
    "path": "codecov.yml",
    "chars": 290,
    "preview": "codecov:\n  notify:\n    require_ci_to_pass: yes\n\ncoverage:\n  precision: 2\n  round: down\n  range: \"70...100\"\n\n  status:\n  "
  },
  {
    "path": "conftest.py",
    "chars": 790,
    "preview": "def pytest_addoption(parser):\n    parser.addoption(\"--host\", action=\"store\", default=\"localhost:8888\")\n    parser.addopt"
  },
  {
    "path": "docs/Makefile",
    "chars": 7190,
    "preview": "# Makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS    = -n\nSPHINXBUI"
  },
  {
    "path": "docs/doc-requirements.txt",
    "chars": 246,
    "preview": "# https://github.com/miyakogi/m2r/issues/66\nmistune<4\nmyst-parser\npydata_sphinx_theme\nsphinx\nsphinx-markdown-tables\nsphi"
  },
  {
    "path": "docs/environment.yml",
    "chars": 157,
    "preview": "name: enterprise_gateway_docs\nchannels:\n  - conda-forge\n  - defaults\n  - free\ndependencies:\n  - pip\n  - python=3.11\n  - "
  },
  {
    "path": "docs/make.bat",
    "chars": 7267,
    "preview": "@ECHO OFF\r\n\r\nREM Command file for Sphinx documentation\r\n\r\nif \"%SPHINXBUILD%\" == \"\" (\r\n\tset SPHINXBUILD=sphinx-build\r\n)\r\n"
  },
  {
    "path": "docs/source/_static/custom.css",
    "chars": 94,
    "preview": "body div.sphinxsidebarwrapper p.logo {\n  text-align: left;\n}\n.mermaid svg {\n  height: 100%;\n}\n"
  },
  {
    "path": "docs/source/conf.py",
    "chars": 13446,
    "preview": "#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible conf"
  },
  {
    "path": "docs/source/contributors/contrib.md",
    "chars": 866,
    "preview": "# Contributing to Jupyter Enterprise Gateway\n\nThank you for your interest in Jupyter Enterprise Gateway! If you would li"
  },
  {
    "path": "docs/source/contributors/debug.md",
    "chars": 1317,
    "preview": "# Debugging Jupyter Enterprise Gateway\n\nThis page discusses how to go about debugging Enterprise Gateway. We also provid"
  },
  {
    "path": "docs/source/contributors/devinstall.md",
    "chars": 6011,
    "preview": "# Development Workflow\n\nHere are instructions for setting up a development environment for the [Jupyter Enterprise Gatew"
  },
  {
    "path": "docs/source/contributors/docker.md",
    "chars": 5375,
    "preview": "# Docker Images\n\nAll docker images can be pulled from docker hub's [elyra organization](https://hub.docker.com/u/elyra/)"
  },
  {
    "path": "docs/source/contributors/index.rst",
    "chars": 953,
    "preview": "Contributors Guide\n==================\n\nThese pages target people who are interested in contributing directly to the Jupy"
  },
  {
    "path": "docs/source/contributors/roadmap.md",
    "chars": 1789,
    "preview": "# Project Roadmap\n\nWe have plenty to do, now and in the future. Here's where we're headed:\n\n## Completed in 3.x\n\n- Spark"
  },
  {
    "path": "docs/source/contributors/sequence-diagrams.md",
    "chars": 2229,
    "preview": "# Sequence Diagrams\n\nThe following consists of various sequence diagrams you might find helpful. We plan to add\ndiagrams"
  },
  {
    "path": "docs/source/contributors/system-architecture.md",
    "chars": 31050,
    "preview": "# System Architecture\n\nBelow are sections presenting details of the Enterprise Gateway internals and other related items"
  },
  {
    "path": "docs/source/developers/custom-images.md",
    "chars": 10011,
    "preview": "# Custom Kernel Images\n\nThis section presents information needed for how a custom kernel image could be built for your o"
  },
  {
    "path": "docs/source/developers/dev-process-proxy.md",
    "chars": 5614,
    "preview": "# Implementing a process proxy\n\nA process proxy implementation is necessary if you want to interact with a resource mana"
  },
  {
    "path": "docs/source/developers/index.rst",
    "chars": 1423,
    "preview": "Developers Guide\n================\n\nThese pages target *developers* writing applications against the REST API, authoring "
  },
  {
    "path": "docs/source/developers/kernel-launcher.md",
    "chars": 7562,
    "preview": "# Implementing a kernel launcher\n\nA new implementation for a [_kernel launcher_](../contributors/system-architecture.md#"
  },
  {
    "path": "docs/source/developers/kernel-library.md",
    "chars": 1264,
    "preview": "# Standalone Remote Kernel Execution\n\nRemote kernels can be executed by using the `RemoteKernelManager` class directly. "
  },
  {
    "path": "docs/source/developers/kernel-manager.md",
    "chars": 2268,
    "preview": "# Using Jupyter Server's `GatewayKernelManager`\n\nAnother way to expose other Jupyter applications like `nbclient` or `pa"
  },
  {
    "path": "docs/source/developers/kernel-specification.md",
    "chars": 5104,
    "preview": "# Implementing a kernel specification\n\nIf you find yourself [implementing a kernel launcher](kernel-launcher.md), you'll"
  },
  {
    "path": "docs/source/developers/rest-api.rst",
    "chars": 16775,
    "preview": "Using the REST API\n===============================\n\nThe REST API is used to author new applications that need to interac"
  },
  {
    "path": "docs/source/index.rst",
    "chars": 3595,
    "preview": "Welcome to Jupyter Enterprise Gateway!\n======================================\nJupyter Enterprise Gateway is a headless w"
  },
  {
    "path": "docs/source/operators/config-add-env.md",
    "chars": 8202,
    "preview": "# Additional environment variables\n\nBesides those environment variables associated with configurable options, the follow"
  },
  {
    "path": "docs/source/operators/config-availability.md",
    "chars": 10005,
    "preview": "# Availability modes\n\nEnterprise Gateway can be optionally configured in one of two \"availability modes\": _standalone_ o"
  },
  {
    "path": "docs/source/operators/config-cli.md",
    "chars": 17372,
    "preview": "# Command-line options\n\nIn some cases, it may be easier to use command line options. These can also be used for _static_"
  },
  {
    "path": "docs/source/operators/config-culling.md",
    "chars": 1863,
    "preview": "# Culling idle kernels\n\nWith the adoption of notebooks and interactive development for data science, a new \"resource uti"
  },
  {
    "path": "docs/source/operators/config-dynamic.md",
    "chars": 2097,
    "preview": "# Dynamic configurables\n\nEnterprise Gateway also supports the ability to update configuration variables without having t"
  },
  {
    "path": "docs/source/operators/config-env-debug.md",
    "chars": 2524,
    "preview": "# Environment variables that assist in troubleshooting\n\nThe following environment variables may be useful for troublesho"
  },
  {
    "path": "docs/source/operators/config-file.md",
    "chars": 1691,
    "preview": "# Configuration file options\n\nPlacing configuration options into the configuration file `jupyter_enterprise_gateway_conf"
  },
  {
    "path": "docs/source/operators/config-kernel-override.md",
    "chars": 3550,
    "preview": "# Per-kernel overrides\n\nAs mentioned in the overview of [Process Proxy Configuration](../contributors/system-architectur"
  },
  {
    "path": "docs/source/operators/config-security.md",
    "chars": 10923,
    "preview": "# Configuring security\n\nJupyter Enterprise Gateway does not currently perform user _authentication_ but, instead, assume"
  },
  {
    "path": "docs/source/operators/config-sys-env.md",
    "chars": 934,
    "preview": "# System-owned environment variables\n\nThe following environment variables are managed by Enterprise Gateway and listed h"
  },
  {
    "path": "docs/source/operators/deploy-conductor.md",
    "chars": 441,
    "preview": "# IBM Spectrum Conductor deployments\n\nThis information will be added shortly. The configuration is similar to that of [H"
  },
  {
    "path": "docs/source/operators/deploy-distributed.md",
    "chars": 9670,
    "preview": "# Distributed deployments\n\nThis section describes how to deploy Enterprise Gateway to manage kernels across a distribute"
  },
  {
    "path": "docs/source/operators/deploy-docker.md",
    "chars": 9766,
    "preview": "# Docker and Docker Swarm deployments\n\nThis section describes how to deploy Enterprise Gateway into an existing Docker o"
  },
  {
    "path": "docs/source/operators/deploy-kubernetes.md",
    "chars": 70782,
    "preview": "# Kubernetes deployments\n\n## Overview\n\nThis section describes how to deploy Enterprise Gateway into an existing Kubernet"
  },
  {
    "path": "docs/source/operators/deploy-single.md",
    "chars": 2288,
    "preview": "# Single-server deployments\n\nSingle-server deployment can be useful for development and is not meant to be run in produc"
  },
  {
    "path": "docs/source/operators/deploy-yarn-cluster.md",
    "chars": 7391,
    "preview": "# Hadoop YARN deployments\n\nTo leverage the full distributed capabilities of Jupyter Enterprise Gateway, there is a need "
  },
  {
    "path": "docs/source/operators/index.rst",
    "chars": 2712,
    "preview": "Operators Guide\n===============\n\nThese pages are targeted at *operators* that need to deploy and configure a Jupyter Ent"
  },
  {
    "path": "docs/source/operators/installing-eg.md",
    "chars": 2561,
    "preview": "# Installing Enterprise Gateway (common)\n\nFor new users, we **highly recommend** [installing Anaconda](https://www.anaco"
  },
  {
    "path": "docs/source/operators/installing-kernels.md",
    "chars": 2622,
    "preview": "# Installing supported kernels (common)\n\nEnterprise Gateway includes kernel specifications that support the following ke"
  },
  {
    "path": "docs/source/operators/launching-eg.md",
    "chars": 1693,
    "preview": "# Launching Enterprise Gateway (common)\n\nVery few arguments are necessary to minimally start Enterprise Gateway. The fol"
  },
  {
    "path": "docs/source/other/index.rst",
    "chars": 295,
    "preview": "Other helpful information\n===========================\nThis section includes some additional information you might find h"
  },
  {
    "path": "docs/source/other/related-resources.md",
    "chars": 1295,
    "preview": "# Related Resources\n\nHere are some resources related to the Jupyter Enterprise Gateway project.\n\n- [Jupyter.org](https:/"
  },
  {
    "path": "docs/source/other/troubleshooting.md",
    "chars": 16088,
    "preview": "# Troubleshooting Guide\n\nThis page identifies scenarios we've encountered when running Enterprise Gateway. We also provi"
  },
  {
    "path": "docs/source/users/client-config.md",
    "chars": 4798,
    "preview": "# Gateway Client Configuration\n\nThe set of Gateway Client configuration options include the following. To get the curren"
  },
  {
    "path": "docs/source/users/connecting-to-eg.md",
    "chars": 2836,
    "preview": "# Connecting the server to Enterprise Gateway\n\nTo leverage the benefits of Enterprise Gateway, it's helpful to redirect "
  },
  {
    "path": "docs/source/users/index.rst",
    "chars": 1722,
    "preview": "Users Guide\n===========\n\nBecause Enterprise Gateway is a headless web server, it is typically accessed from other applic"
  },
  {
    "path": "docs/source/users/installation.md",
    "chars": 799,
    "preview": "# Installing the client\n\nIn terms of Enterprise Gateway, the client application is typically Jupyter Server (hosting Jup"
  },
  {
    "path": "docs/source/users/kernel-envs.md",
    "chars": 9857,
    "preview": "# Kernel Environment Variables\n\nThe Enterprise Gateway client software will also include _any_ environment variables pre"
  },
  {
    "path": "enterprise_gateway/__init__.py",
    "chars": 360,
    "preview": "\"\"\"Lazy-loading entrypoint for the enterprise gateway package.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distribut"
  },
  {
    "path": "enterprise_gateway/__main__.py",
    "chars": 271,
    "preview": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"CLI entrypoint f"
  },
  {
    "path": "enterprise_gateway/_version.py",
    "chars": 168,
    "preview": "\"\"\"enterprise_gateway version info\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Mo"
  },
  {
    "path": "enterprise_gateway/base/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "enterprise_gateway/base/handlers.py",
    "chars": 1522,
    "preview": "\"\"\"Tornado handlers for the base of the API.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms"
  },
  {
    "path": "enterprise_gateway/client/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "enterprise_gateway/client/gateway_client.py",
    "chars": 16830,
    "preview": "\"\"\"An Enterprise Gateway client.\"\"\"\n\nimport logging\nimport os\nimport queue\nimport time\nfrom threading import Thread\nfrom"
  },
  {
    "path": "enterprise_gateway/enterprisegatewayapp.py",
    "chars": 18596,
    "preview": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Enterprise Gatew"
  },
  {
    "path": "enterprise_gateway/itests/__init__.py",
    "chars": 420,
    "preview": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom tornado import"
  },
  {
    "path": "enterprise_gateway/itests/kernels/authorization_test/kernel.json",
    "chars": 422,
    "preview": "{\n  \"display_name\": \"Authorization Testing\",\n  \"language\": \"python\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class"
  },
  {
    "path": "enterprise_gateway/itests/test_authorization.py",
    "chars": 1439,
    "preview": "import os\nimport unittest\n\nfrom enterprise_gateway.client.gateway_client import GatewayClient\n\n\nclass TestAuthorization("
  },
  {
    "path": "enterprise_gateway/itests/test_base.py",
    "chars": 920,
    "preview": "import os\n\nexpected_hostname = os.getenv(\"ITEST_HOSTNAME_PREFIX\", \"\") + \"*\"  # use ${KERNEL_USERNAME} on k8s\nexpected_ap"
  },
  {
    "path": "enterprise_gateway/itests/test_python_kernel.py",
    "chars": 8563,
    "preview": "import os\nimport unittest\n\nfrom enterprise_gateway.client.gateway_client import GatewayClient\n\nfrom .test_base import Te"
  },
  {
    "path": "enterprise_gateway/itests/test_r_kernel.py",
    "chars": 6273,
    "preview": "import os\nimport unittest\n\nfrom enterprise_gateway.client.gateway_client import GatewayClient\n\nfrom .test_base import Te"
  },
  {
    "path": "enterprise_gateway/itests/test_scala_kernel.py",
    "chars": 6617,
    "preview": "import os\nimport unittest\n\nfrom enterprise_gateway.client.gateway_client import GatewayClient\n\nfrom .test_base import Te"
  },
  {
    "path": "enterprise_gateway/mixins.py",
    "chars": 30003,
    "preview": "\"\"\"Mixins for Tornado handlers.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modif"
  },
  {
    "path": "enterprise_gateway/services/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "enterprise_gateway/services/api/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "enterprise_gateway/services/api/handlers.py",
    "chars": 1988,
    "preview": "\"\"\"Tornado handlers for kernel specs.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the"
  },
  {
    "path": "enterprise_gateway/services/api/swagger.json",
    "chars": 16335,
    "preview": "{\n  \"swagger\": \"2.0\",\n  \"info\": {\n    \"title\": \"Jupyter Enterprise Gateway API\",\n    \"description\": \"The API for the Jup"
  },
  {
    "path": "enterprise_gateway/services/api/swagger.yaml",
    "chars": 12575,
    "preview": "swagger: \"2.0\"\n\ninfo:\n  title: Jupyter Enterprise Gateway API\n  description: The API for the Jupyter Enterprise Gateway\n"
  },
  {
    "path": "enterprise_gateway/services/kernels/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "enterprise_gateway/services/kernels/handlers.py",
    "chars": 6546,
    "preview": "\"\"\"Tornado handlers for kernel CRUD and communication.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under"
  },
  {
    "path": "enterprise_gateway/services/kernels/remotemanager.py",
    "chars": 34845,
    "preview": "\"\"\"Kernel managers that operate against a remote process.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed un"
  },
  {
    "path": "enterprise_gateway/services/kernelspecs/__init__.py",
    "chars": 156,
    "preview": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom .kernelspec_c"
  },
  {
    "path": "enterprise_gateway/services/kernelspecs/handlers.py",
    "chars": 7585,
    "preview": "\"\"\"Tornado handlers for kernel specs.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the"
  },
  {
    "path": "enterprise_gateway/services/kernelspecs/kernelspec_cache.py",
    "chars": 13832,
    "preview": "\"\"\"Cache handling for kernel specs.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the M"
  },
  {
    "path": "enterprise_gateway/services/processproxies/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "enterprise_gateway/services/processproxies/conductor.py",
    "chars": 33600,
    "preview": "\"\"\"Code related to managing kernels running in Conductor clusters.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distr"
  },
  {
    "path": "enterprise_gateway/services/processproxies/container.py",
    "chars": 11859,
    "preview": "\"\"\"Code related to managing kernels running in containers.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed u"
  },
  {
    "path": "enterprise_gateway/services/processproxies/crd.py",
    "chars": 5300,
    "preview": "\"\"\"Code related to managing kernels running based on k8s custom resource.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n"
  },
  {
    "path": "enterprise_gateway/services/processproxies/distributed.py",
    "chars": 9950,
    "preview": "\"\"\"Code used for the generic distribution of kernels across a set of hosts.\"\"\"\n\n# Copyright (c) Jupyter Development Team"
  },
  {
    "path": "enterprise_gateway/services/processproxies/docker_swarm.py",
    "chars": 12294,
    "preview": "\"\"\"Code related to managing kernels running in docker-based containers.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# "
  },
  {
    "path": "enterprise_gateway/services/processproxies/k8s.py",
    "chars": 21008,
    "preview": "\"\"\"Code related to managing kernels running in Kubernetes clusters.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Dist"
  },
  {
    "path": "enterprise_gateway/services/processproxies/processproxy.py",
    "chars": 71698,
    "preview": "\"\"\"Kernel managers that operate against a remote process.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed un"
  },
  {
    "path": "enterprise_gateway/services/processproxies/spark_operator.py",
    "chars": 964,
    "preview": "\"\"\"A spark operator process proxy.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Mo"
  },
  {
    "path": "enterprise_gateway/services/processproxies/yarn.py",
    "chars": 25633,
    "preview": "\"\"\"Code related to managing kernels running in YARN clusters.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distribute"
  },
  {
    "path": "enterprise_gateway/services/sessions/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "enterprise_gateway/services/sessions/handlers.py",
    "chars": 1549,
    "preview": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Tornado handlers"
  },
  {
    "path": "enterprise_gateway/services/sessions/kernelsessionmanager.py",
    "chars": 22149,
    "preview": "\"\"\"Session manager that keeps all its metadata in memory.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed un"
  },
  {
    "path": "enterprise_gateway/services/sessions/sessionmanager.py",
    "chars": 8312,
    "preview": "\"\"\"Session manager that keeps all its metadata in memory.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed un"
  },
  {
    "path": "enterprise_gateway/tests/__init__.py",
    "chars": 668,
    "preview": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nfrom tornado import"
  },
  {
    "path": "enterprise_gateway/tests/resources/failing_code2.ipynb",
    "chars": 618,
    "preview": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \""
  },
  {
    "path": "enterprise_gateway/tests/resources/failing_code3.ipynb",
    "chars": 617,
    "preview": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \""
  },
  {
    "path": "enterprise_gateway/tests/resources/kernel_api2.ipynb",
    "chars": 8679,
    "preview": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# API Creation \\n\",\n    \"This noteb"
  },
  {
    "path": "enterprise_gateway/tests/resources/kernel_api3.ipynb",
    "chars": 8682,
    "preview": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# API Creation \\n\",\n    \"This noteb"
  },
  {
    "path": "enterprise_gateway/tests/resources/kernels/kernel_defaults_test/kernel.json",
    "chars": 416,
    "preview": "{\n  \"display_name\": \"Kernel Defaults Testing\",\n  \"language\": \"python\",\n  \"env\": {\n    \"KERNEL_VAR1\": \"kernel_var1_defaul"
  },
  {
    "path": "enterprise_gateway/tests/resources/public/index.html",
    "chars": 127,
    "preview": "<!doctype html>\n<html>\n  <head>\n    <title>Hello world!</title>\n  </head>\n  <body>\n    <h1>Hello world!</h1>\n  </body>\n<"
  },
  {
    "path": "enterprise_gateway/tests/resources/responses_2.ipynb",
    "chars": 2243,
    "preview": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"o"
  },
  {
    "path": "enterprise_gateway/tests/resources/responses_3.ipynb",
    "chars": 2321,
    "preview": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"o"
  },
  {
    "path": "enterprise_gateway/tests/resources/simple_api2.ipynb",
    "chars": 1206,
    "preview": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"o"
  },
  {
    "path": "enterprise_gateway/tests/resources/simple_api3.ipynb",
    "chars": 1203,
    "preview": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"o"
  },
  {
    "path": "enterprise_gateway/tests/resources/unknown_kernel.ipynb",
    "chars": 668,
    "preview": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \""
  },
  {
    "path": "enterprise_gateway/tests/resources/zen2.ipynb",
    "chars": 605,
    "preview": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \""
  },
  {
    "path": "enterprise_gateway/tests/resources/zen3.ipynb",
    "chars": 604,
    "preview": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \""
  },
  {
    "path": "enterprise_gateway/tests/test_enterprise_gateway.py",
    "chars": 7927,
    "preview": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Tests for jupyte"
  },
  {
    "path": "enterprise_gateway/tests/test_gatewayapp.py",
    "chars": 7462,
    "preview": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Tests for basic "
  },
  {
    "path": "enterprise_gateway/tests/test_handlers.py",
    "chars": 25842,
    "preview": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Tests for jupyte"
  },
  {
    "path": "enterprise_gateway/tests/test_kernelspec_cache.py",
    "chars": 6994,
    "preview": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Tests for Kernel"
  },
  {
    "path": "enterprise_gateway/tests/test_mixins.py",
    "chars": 6290,
    "preview": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Tests for handle"
  },
  {
    "path": "enterprise_gateway/tests/test_process_proxy.py",
    "chars": 18115,
    "preview": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Tests for proces"
  },
  {
    "path": "enterprise_gateway/tests/test_yaml_injection.py",
    "chars": 17539,
    "preview": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\"\"\"Tests for YAML i"
  },
  {
    "path": "etc/Makefile",
    "chars": 10483,
    "preview": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n.PHONY: help clean"
  },
  {
    "path": "etc/docker/demo-base/Dockerfile",
    "chars": 6536,
    "preview": "ARG BASE_CONTAINER=continuumio/miniconda3:24.1.2-0\nFROM $BASE_CONTAINER\n\nARG SPARK_VERSION\nARG SPARKR_VERSION=3.1.2\nARG "
  },
  {
    "path": "etc/docker/demo-base/README.md",
    "chars": 769,
    "preview": "# What this image Gives You\n\n- Ubuntu base image : bionic\n- Hadoop 2.7.7\n- Apache Spark 2.4.6\n- Java 1.8 runtime\n- Mini-"
  },
  {
    "path": "etc/docker/demo-base/bootstrap-yarn-spark.sh",
    "chars": 4623,
    "preview": "#!/bin/bash\n\n# This file is a copy of /etc/bootstrap.sh but sets up the YARN cluster in its \"deamon\" case.\n# It also che"
  },
  {
    "path": "etc/docker/demo-base/core-site.xml.template",
    "chars": 154,
    "preview": "  <configuration>\n      <property>\n          <name>fs.defaultFS</name>\n          <value>hdfs://HOSTNAME:9000</value>\n   "
  },
  {
    "path": "etc/docker/demo-base/fix-permissions",
    "chars": 965,
    "preview": "#!/bin/bash\n# set permissions on a directory\n# after any installation, if a directory needs to be (human) user-writable,"
  },
  {
    "path": "etc/docker/demo-base/hdfs-site.xml",
    "chars": 126,
    "preview": "<configuration>\n    <property>\n        <name>dfs.replication</name>\n        <value>1</value>\n    </property>\n</configura"
  },
  {
    "path": "etc/docker/demo-base/mapred-site.xml",
    "chars": 138,
    "preview": "<configuration>\n    <property>\n        <name>mapreduce.framework.name</name>\n        <value>yarn</value>\n    </property>"
  },
  {
    "path": "etc/docker/demo-base/ssh_config",
    "chars": 94,
    "preview": "Host *\n  UserKnownHostsFile /dev/null\n  StrictHostKeyChecking no\n  LogLevel quiet\n  Port 2122\n"
  },
  {
    "path": "etc/docker/demo-base/yarn-site.xml.template",
    "chars": 2266,
    "preview": "\n<configuration>\n\n    <property>\n        <name>yarn.nodemanager.vmem-check-enabled</name>\n        <value>false</value>\n "
  },
  {
    "path": "etc/docker/docker-compose.yml",
    "chars": 4005,
    "preview": "version: \"3.5\"\n\n# A docker user network is created and referenced by the service.  This network\n# must also get conveyed"
  },
  {
    "path": "etc/docker/enterprise-gateway/Dockerfile",
    "chars": 1782,
    "preview": "ARG BASE_CONTAINER=jupyter/minimal-notebook:2023-03-13\n\nFROM $BASE_CONTAINER\n\nARG SPARK_VERSION\n\nENV SPARK_VER=$SPARK_VE"
  },
  {
    "path": "etc/docker/enterprise-gateway/README.md",
    "chars": 2359,
    "preview": "This image adds support for [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) w"
  },
  {
    "path": "etc/docker/enterprise-gateway/start-enterprise-gateway.sh",
    "chars": 2527,
    "preview": "#!/bin/bash\n\n#export ANACONDA_HOME=/opt/conda\n#export JAVA_HOME=/usr/java/default\n#export PYSPARK_PYTHON=${ANACONDA_HOME"
  },
  {
    "path": "etc/docker/enterprise-gateway-demo/Dockerfile",
    "chars": 2472,
    "preview": "ARG HUB_ORG\nARG SPARK_VERSION\n\nARG BASE_CONTAINER=${HUB_ORG}/demo-base:${SPARK_VERSION}\nFROM $BASE_CONTAINER\n\n# An ARG d"
  },
  {
    "path": "etc/docker/enterprise-gateway-demo/README.md",
    "chars": 5573,
    "preview": "Built on [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/), this image adds support for [Jupyter Enterprise G"
  },
  {
    "path": "etc/docker/enterprise-gateway-demo/bootstrap-enterprise-gateway.sh",
    "chars": 1770,
    "preview": "#!/bin/bash\n\n# This file is a copy of /etc/bootstrap.sh but invokes Jupyter Enterprise Gateway in its \"deamon\" case.\n# I"
  },
  {
    "path": "etc/docker/enterprise-gateway-demo/start-enterprise-gateway.sh.template",
    "chars": 1040,
    "preview": "#!/bin/bash\n\n# Allow for mounts of kernelspecs to /tmp/byok/kernels\nexport JUPYTER_PATH=${JUPYTER_PATH:-/tmp/byok}\n\n# En"
  },
  {
    "path": "etc/docker/kernel-image-puller/Dockerfile",
    "chars": 1094,
    "preview": "ARG BASE_CONTAINER=python:3.10-bookworm\nFROM $BASE_CONTAINER\n\nWORKDIR /usr/src/app\n\nCOPY requirements.txt ./\nRUN pip ins"
  },
  {
    "path": "etc/docker/kernel-image-puller/README.md",
    "chars": 1547,
    "preview": "This image is responsible for contacting the configured [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway."
  },
  {
    "path": "etc/docker/kernel-image-puller/image_fetcher.py",
    "chars": 8715,
    "preview": "\"\"\"image name fetcher abstract class and concrete implementation\"\"\"\n\nimport abc\nimport importlib\nimport os\n\nimport reque"
  },
  {
    "path": "etc/docker/kernel-image-puller/kernel_image_puller.py",
    "chars": 12834,
    "preview": "\"\"\"A kernel image puller.\"\"\"\n\nimport importlib\nimport logging\nimport os\nimport queue\nimport time\nfrom subprocess import "
  },
  {
    "path": "etc/docker/kernel-image-puller/requirements.txt",
    "chars": 53,
    "preview": "docker>=3.7.2\nkubernetes>=17.17.0\nrequests>=2.7,<3.0\n"
  },
  {
    "path": "etc/docker/kernel-py/Dockerfile",
    "chars": 902,
    "preview": "# Ubuntu 18.04.1 LTS Bionic\nARG BASE_CONTAINER=jupyter/scipy-notebook:2023-03-13\nFROM $BASE_CONTAINER\n\nENV PATH=$PATH:$C"
  },
  {
    "path": "etc/docker/kernel-py/README.md",
    "chars": 957,
    "preview": "This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-ga"
  },
  {
    "path": "etc/docker/kernel-r/Dockerfile",
    "chars": 802,
    "preview": "# Ubuntu 18.04.1 LTS Bionic\nARG BASE_CONTAINER=quay.io/jupyter/r-notebook:r-4.5.2\nFROM $BASE_CONTAINER\n\nRUN conda instal"
  },
  {
    "path": "etc/docker/kernel-r/README.md",
    "chars": 818,
    "preview": "This image enables the use of an IRKernel kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-g"
  },
  {
    "path": "etc/docker/kernel-scala/Dockerfile",
    "chars": 687,
    "preview": "ARG HUB_ORG\nARG SPARK_VERSION\n\n# TODO: Restore usage of SPARK_VERSION ARG once https://github.com/jupyter/enterprise_gat"
  },
  {
    "path": "etc/docker/kernel-scala/README.md",
    "chars": 1119,
    "preview": "This image enables the use of a Scala ([Apache Toree](https://toree.apache.org/)) kernel launched from [Jupyter Enterpri"
  },
  {
    "path": "etc/docker/kernel-spark-py/Dockerfile",
    "chars": 1387,
    "preview": "ARG HUB_ORG\nARG TAG\n\n# Ubuntu 18.04.1 LTS Bionic\nARG BASE_CONTAINER=$HUB_ORG/kernel-py:$TAG\nFROM $BASE_CONTAINER\n\nARG SP"
  },
  {
    "path": "etc/docker/kernel-spark-py/README.md",
    "chars": 1200,
    "preview": "This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-ga"
  },
  {
    "path": "etc/docker/kernel-spark-r/Dockerfile",
    "chars": 1324,
    "preview": "ARG HUB_ORG\nARG TAG\n\nARG BASE_CONTAINER=$HUB_ORG/kernel-r:$TAG\nFROM $BASE_CONTAINER\n\nARG SPARK_VERSION\n\nUSER root\n\nENV S"
  },
  {
    "path": "etc/docker/kernel-spark-r/README.md",
    "chars": 1057,
    "preview": "This image enables the use of an IRKernel kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-g"
  },
  {
    "path": "etc/docker/kernel-tf-gpu-py/Dockerfile",
    "chars": 790,
    "preview": "# Ubuntu:xenial\nARG BASE_CONTAINER=tensorflow/tensorflow:2.9.1-gpu\nFROM $BASE_CONTAINER\n\nENV DEBIAN_FRONTEND=noninteract"
  },
  {
    "path": "etc/docker/kernel-tf-gpu-py/README.md",
    "chars": 1018,
    "preview": "This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-ga"
  },
  {
    "path": "etc/docker/kernel-tf-py/Dockerfile",
    "chars": 643,
    "preview": "# Ubuntu:Bionic\n# TensorFlow 2.4.0\nARG BASE_CONTAINER=jupyter/tensorflow-notebook:2023-10-20\n\nFROM $BASE_CONTAINER\n\nENV "
  },
  {
    "path": "etc/docker/kernel-tf-py/README.md",
    "chars": 1075,
    "preview": "This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-ga"
  },
  {
    "path": "etc/kernel-launchers/R/scripts/launch_IRkernel.R",
    "chars": 10641,
    "preview": "library(argparse)\nlibrary(jsonlite)\n\nrequire(\"SparkR\")\nrequire(\"base64enc\")\nrequire(\"digest\")\nrequire(\"stringr\")\n\nr_libs"
  },
  {
    "path": "etc/kernel-launchers/R/scripts/server_listener.py",
    "chars": 8761,
    "preview": "\"\"\"A server listener for R.\"\"\"\n\nimport base64\nimport json\nimport logging\nimport os\nimport random\nimport socket\nimport uu"
  },
  {
    "path": "etc/kernel-launchers/bootstrap/bootstrap-kernel.sh",
    "chars": 3959,
    "preview": "#!/bin/bash\n\nPORT_RANGE=${PORT_RANGE:-${EG_PORT_RANGE:-0..0}}\nRESPONSE_ADDRESS=${RESPONSE_ADDRESS:-${EG_RESPONSE_ADDRESS"
  },
  {
    "path": "etc/kernel-launchers/docker/scripts/launch_docker.py",
    "chars": 7725,
    "preview": "\"\"\"Launches a containerized kernel.\"\"\"\n\nimport argparse\nimport os\nimport re\nimport sys\n\nimport urllib3\nfrom docker.clien"
  },
  {
    "path": "etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2",
    "chars": 3976,
    "preview": "# This file defines the Kubernetes objects necessary for kernels to run witihin Kubernetes.\n# Substitution parameters ar"
  },
  {
    "path": "etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py",
    "chars": 18690,
    "preview": "#!/opt/conda/bin/python\n\"\"\"Launch on kubernetes.\"\"\"\nimport argparse\nimport os\nimport sys\nfrom typing import Dict, List\n\n"
  },
  {
    "path": "etc/kernel-launchers/operators/scripts/launch_custom_resource.py",
    "chars": 7434,
    "preview": "#!/opt/conda/bin/python\n\"\"\"Launch a custom operator resource.\"\"\"\nimport argparse\nimport os\nimport re\nimport sys\n\nimport "
  },
  {
    "path": "etc/kernel-launchers/operators/scripts/sparkoperator.k8s.io-v1beta2.yaml.j2",
    "chars": 2816,
    "preview": "apiVersion: \"sparkoperator.k8s.io/v1beta2\"\nkind: SparkApplication\nmetadata:\n  name: {{ kernel_resource_name | yaml_safe "
  },
  {
    "path": "etc/kernel-launchers/python/scripts/launch_ipykernel.py",
    "chars": 23278,
    "preview": "\"\"\"Launch an ipython kernel.\"\"\"\n\nimport argparse\nimport base64\nimport json\nimport logging\nimport os\nimport random\nimport"
  },
  {
    "path": "etc/kernel-launchers/scala/toree-launcher/build.sbt",
    "chars": 716,
    "preview": "/*\n * Copyright (c) Jupyter Development Team.\n * Distributed under the terms of the Modified BSD License.\n */\n\nname := \""
  },
  {
    "path": "etc/kernel-launchers/scala/toree-launcher/project/build.properties",
    "chars": 126,
    "preview": "#\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n#\nsbt.version = 1"
  },
  {
    "path": "etc/kernel-launchers/scala/toree-launcher/project/plugins.sbt",
    "chars": 319,
    "preview": "/*\n * Copyright (c) Jupyter Development Team.\n * Distributed under the terms of the Modified BSD License.\n */\n\nlogLevel "
  },
  {
    "path": "etc/kernel-launchers/scala/toree-launcher/project/scalastyle-config.xml",
    "chars": 9779,
    "preview": "<!--\n  Copyright (c) Jupyter Development Team.\n  Distributed under the terms of the Modified BSD License.\n-->\n\n<!--\n\nIf "
  },
  {
    "path": "etc/kernel-launchers/scala/toree-launcher/src/main/scala/launcher/KernelProfile.scala",
    "chars": 1541,
    "preview": "/**\n * Copyright (c) Jupyter Development Team.\n * Distributed under the terms of the Modified BSD License.\n */\n\npackage "
  },
  {
    "path": "etc/kernel-launchers/scala/toree-launcher/src/main/scala/launcher/ToreeLauncher.scala",
    "chars": 11681,
    "preview": "/**\n * Copyright (c) Jupyter Development Team.\n * Distributed under the terms of the Modified BSD License.\n */\n\npackage "
  },
  {
    "path": "etc/kernel-launchers/scala/toree-launcher/src/main/scala/launcher/utils/SecurityUtils.scala",
    "chars": 2158,
    "preview": "/**\n  * Copyright (c) Jupyter Development Team.\n  * Distributed under the terms of the Modified BSD License.\n  */\n\npacka"
  },
  {
    "path": "etc/kernel-launchers/scala/toree-launcher/src/main/scala/launcher/utils/SocketUtils.scala",
    "chars": 2419,
    "preview": "/**\n  * Copyright (c) Jupyter Development Team.\n  * Distributed under the terms of the Modified BSD License.\n  */\n\npacka"
  },
  {
    "path": "etc/kernel-resources/ir/kernel.js",
    "chars": 2322,
    "preview": "const cmd_key = /Mac/.test(navigator.platform) ? \"Cmd\" : \"Ctrl\";\n\nconst edit_actions = [\n  {\n    name: \"R Assign\",\n    s"
  },
  {
    "path": "etc/kernelspecs/R_docker/kernel.json",
    "chars": 644,
    "preview": "{\n  \"language\": \"R\",\n  \"display_name\": \"R on Docker\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"enterp"
  },
  {
    "path": "etc/kernelspecs/R_kubernetes/kernel.json",
    "chars": 646,
    "preview": "{\n  \"language\": \"R\",\n  \"display_name\": \"R on Kubernetes\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\": \"en"
  },
  {
    "path": "etc/kernelspecs/dask_python_yarn_remote/bin/run.sh",
    "chars": 604,
    "preview": "#!/usr/bin/env bash\n\nif [ \"${EG_IMPERSONATION_ENABLED}\" = \"True\" ]; then\n    IMPERSONATION_OPTS=\"--user ${KERNEL_USERNAM"
  },
  {
    "path": "etc/kernelspecs/dask_python_yarn_remote/kernel.json",
    "chars": 937,
    "preview": "{\n  \"language\": \"python\",\n  \"display_name\": \"Dask - Python (YARN Remote Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n "
  },
  {
    "path": "etc/kernelspecs/python_distributed/kernel.json",
    "chars": 681,
    "preview": "{\n  \"display_name\": \"Python 3 (distributed)\",\n  \"language\": \"python\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"clas"
  },
  {
    "path": "etc/kernelspecs/python_docker/kernel.json",
    "chars": 682,
    "preview": "{\n  \"language\": \"python\",\n  \"display_name\": \"Python on Docker\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name"
  },
  {
    "path": "etc/kernelspecs/python_kubernetes/kernel.json",
    "chars": 684,
    "preview": "{\n  \"language\": \"python\",\n  \"display_name\": \"Python on Kubernetes\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_"
  },
  {
    "path": "etc/kernelspecs/python_tf_docker/kernel.json",
    "chars": 704,
    "preview": "{\n  \"language\": \"python\",\n  \"display_name\": \"Python on Docker with Tensorflow\",\n  \"metadata\": {\n    \"process_proxy\": {\n "
  },
  {
    "path": "etc/kernelspecs/python_tf_gpu_docker/kernel.json",
    "chars": 722,
    "preview": "{\n  \"language\": \"python\",\n  \"display_name\": \"Python on Docker with Tensorflow with GPUs\",\n  \"metadata\": {\n    \"process_p"
  },
  {
    "path": "etc/kernelspecs/python_tf_gpu_kubernetes/kernel.json",
    "chars": 724,
    "preview": "{\n  \"language\": \"python\",\n  \"display_name\": \"Python on Kubernetes with Tensorflow with GPUs\",\n  \"metadata\": {\n    \"proce"
  },
  {
    "path": "etc/kernelspecs/python_tf_kubernetes/kernel.json",
    "chars": 706,
    "preview": "{\n  \"language\": \"python\",\n  \"display_name\": \"Python on Kubernetes with Tensorflow\",\n  \"metadata\": {\n    \"process_proxy\":"
  },
  {
    "path": "etc/kernelspecs/scala_docker/kernel.json",
    "chars": 660,
    "preview": "{\n  \"language\": \"scala\",\n  \"display_name\": \"Scala on Docker\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_name\":"
  },
  {
    "path": "etc/kernelspecs/scala_kubernetes/kernel.json",
    "chars": 662,
    "preview": "{\n  \"language\": \"scala\",\n  \"display_name\": \"Scala on Kubernetes\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"class_na"
  },
  {
    "path": "etc/kernelspecs/spark_R_conductor_cluster/bin/run.sh",
    "chars": 888,
    "preview": "#!/usr/bin/env bash\n\nif [ \"${EG_IMPERSONATION_ENABLED}\" = \"True\" ]; then\n        IMPERSONATION_OPTS=\"--proxy-user ${KERN"
  },
  {
    "path": "etc/kernelspecs/spark_R_conductor_cluster/kernel.json",
    "chars": 754,
    "preview": "{\n  \"language\": \"R\",\n  \"display_name\": \"Spark R (Spark Cluster Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"cla"
  },
  {
    "path": "etc/kernelspecs/spark_R_kubernetes/bin/run.sh",
    "chars": 1393,
    "preview": "#!/usr/bin/env bash\n\nif [ \"${EG_IMPERSONATION_ENABLED}\" = \"True\" ]; then\n#        IMPERSONATION_OPTS=\"--proxy-user ${KER"
  },
  {
    "path": "etc/kernelspecs/spark_R_kubernetes/kernel.json",
    "chars": 1798,
    "preview": "{\n  \"language\": \"R\",\n  \"display_name\": \"Spark - R (Kubernetes Mode)\",\n  \"metadata\": {\n    \"process_proxy\": {\n      \"clas"
  }
]

// ... and 95 more files (download for full content)

About this extraction

This page contains the full source code of the jupyter-server/enterprise_gateway GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 295 files (1.6 MB), approximately 426.9k tokens, and a symbol index with 853 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!