Repository: jupyter-server/enterprise_gateway Branch: main Commit: 56a80a112385 Files: 295 Total size: 1.6 MB Directory structure: gitextract_mwzi65qv/ ├── .git-blame-ignore-revs ├── .gitattributes ├── .github/ │ ├── ISSUE_TEMPLATE.md │ ├── codeql/ │ │ └── codeql-config.yml │ ├── dependabot.yml │ └── workflows/ │ ├── build.yml │ └── codeql-analysis.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yaml ├── LICENSE.md ├── Makefile ├── README.md ├── codecov.yml ├── conftest.py ├── docs/ │ ├── Makefile │ ├── doc-requirements.txt │ ├── environment.yml │ ├── make.bat │ └── source/ │ ├── _static/ │ │ └── custom.css │ ├── conf.py │ ├── contributors/ │ │ ├── contrib.md │ │ ├── debug.md │ │ ├── devinstall.md │ │ ├── docker.md │ │ ├── index.rst │ │ ├── roadmap.md │ │ ├── sequence-diagrams.md │ │ └── system-architecture.md │ ├── developers/ │ │ ├── custom-images.md │ │ ├── dev-process-proxy.md │ │ ├── index.rst │ │ ├── kernel-launcher.md │ │ ├── kernel-library.md │ │ ├── kernel-manager.md │ │ ├── kernel-specification.md │ │ └── rest-api.rst │ ├── index.rst │ ├── operators/ │ │ ├── config-add-env.md │ │ ├── config-availability.md │ │ ├── config-cli.md │ │ ├── config-culling.md │ │ ├── config-dynamic.md │ │ ├── config-env-debug.md │ │ ├── config-file.md │ │ ├── config-kernel-override.md │ │ ├── config-security.md │ │ ├── config-sys-env.md │ │ ├── deploy-conductor.md │ │ ├── deploy-distributed.md │ │ ├── deploy-docker.md │ │ ├── deploy-kubernetes.md │ │ ├── deploy-single.md │ │ ├── deploy-yarn-cluster.md │ │ ├── index.rst │ │ ├── installing-eg.md │ │ ├── installing-kernels.md │ │ └── launching-eg.md │ ├── other/ │ │ ├── index.rst │ │ ├── related-resources.md │ │ └── troubleshooting.md │ └── users/ │ ├── client-config.md │ ├── connecting-to-eg.md │ ├── index.rst │ ├── installation.md │ └── kernel-envs.md ├── enterprise_gateway/ │ ├── __init__.py │ ├── __main__.py │ ├── _version.py │ ├── base/ │ │ ├── __init__.py │ │ └── handlers.py │ ├── client/ │ │ ├── __init__.py │ │ └── gateway_client.py │ ├── enterprisegatewayapp.py │ ├── itests/ │ │ ├── __init__.py │ │ ├── kernels/ │ │ │ └── authorization_test/ │ │ │ └── kernel.json │ │ ├── test_authorization.py │ │ ├── test_base.py │ │ ├── test_python_kernel.py │ │ ├── test_r_kernel.py │ │ └── test_scala_kernel.py │ ├── mixins.py │ ├── services/ │ │ ├── __init__.py │ │ ├── api/ │ │ │ ├── __init__.py │ │ │ ├── handlers.py │ │ │ ├── swagger.json │ │ │ └── swagger.yaml │ │ ├── kernels/ │ │ │ ├── __init__.py │ │ │ ├── handlers.py │ │ │ └── remotemanager.py │ │ ├── kernelspecs/ │ │ │ ├── __init__.py │ │ │ ├── handlers.py │ │ │ └── kernelspec_cache.py │ │ ├── processproxies/ │ │ │ ├── __init__.py │ │ │ ├── conductor.py │ │ │ ├── container.py │ │ │ ├── crd.py │ │ │ ├── distributed.py │ │ │ ├── docker_swarm.py │ │ │ ├── k8s.py │ │ │ ├── processproxy.py │ │ │ ├── spark_operator.py │ │ │ └── yarn.py │ │ └── sessions/ │ │ ├── __init__.py │ │ ├── handlers.py │ │ ├── kernelsessionmanager.py │ │ └── sessionmanager.py │ └── tests/ │ ├── __init__.py │ ├── resources/ │ │ ├── failing_code2.ipynb │ │ ├── failing_code3.ipynb │ │ ├── kernel_api2.ipynb │ │ ├── kernel_api3.ipynb │ │ ├── kernels/ │ │ │ └── kernel_defaults_test/ │ │ │ └── kernel.json │ │ ├── public/ │ │ │ └── index.html │ │ ├── responses_2.ipynb │ │ ├── responses_3.ipynb │ │ ├── simple_api2.ipynb │ │ ├── simple_api3.ipynb │ │ ├── unknown_kernel.ipynb │ │ ├── zen2.ipynb │ │ └── zen3.ipynb │ ├── test_enterprise_gateway.py │ ├── test_gatewayapp.py │ ├── test_handlers.py │ ├── test_kernelspec_cache.py │ ├── test_mixins.py │ ├── test_process_proxy.py │ └── test_yaml_injection.py ├── etc/ │ ├── Makefile │ ├── docker/ │ │ ├── demo-base/ │ │ │ ├── Dockerfile │ │ │ ├── README.md │ │ │ ├── bootstrap-yarn-spark.sh │ │ │ ├── core-site.xml.template │ │ │ ├── fix-permissions │ │ │ ├── hdfs-site.xml │ │ │ ├── mapred-site.xml │ │ │ ├── ssh_config │ │ │ └── yarn-site.xml.template │ │ ├── docker-compose.yml │ │ ├── enterprise-gateway/ │ │ │ ├── Dockerfile │ │ │ ├── README.md │ │ │ └── start-enterprise-gateway.sh │ │ ├── enterprise-gateway-demo/ │ │ │ ├── Dockerfile │ │ │ ├── README.md │ │ │ ├── bootstrap-enterprise-gateway.sh │ │ │ └── start-enterprise-gateway.sh.template │ │ ├── kernel-image-puller/ │ │ │ ├── Dockerfile │ │ │ ├── README.md │ │ │ ├── image_fetcher.py │ │ │ ├── kernel_image_puller.py │ │ │ └── requirements.txt │ │ ├── kernel-py/ │ │ │ ├── Dockerfile │ │ │ └── README.md │ │ ├── kernel-r/ │ │ │ ├── Dockerfile │ │ │ └── README.md │ │ ├── kernel-scala/ │ │ │ ├── Dockerfile │ │ │ └── README.md │ │ ├── kernel-spark-py/ │ │ │ ├── Dockerfile │ │ │ └── README.md │ │ ├── kernel-spark-r/ │ │ │ ├── Dockerfile │ │ │ └── README.md │ │ ├── kernel-tf-gpu-py/ │ │ │ ├── Dockerfile │ │ │ └── README.md │ │ └── kernel-tf-py/ │ │ ├── Dockerfile │ │ └── README.md │ ├── kernel-launchers/ │ │ ├── R/ │ │ │ └── scripts/ │ │ │ ├── launch_IRkernel.R │ │ │ └── server_listener.py │ │ ├── bootstrap/ │ │ │ └── bootstrap-kernel.sh │ │ ├── docker/ │ │ │ └── scripts/ │ │ │ └── launch_docker.py │ │ ├── kubernetes/ │ │ │ └── scripts/ │ │ │ ├── kernel-pod.yaml.j2 │ │ │ └── launch_kubernetes.py │ │ ├── operators/ │ │ │ └── scripts/ │ │ │ ├── launch_custom_resource.py │ │ │ └── sparkoperator.k8s.io-v1beta2.yaml.j2 │ │ ├── python/ │ │ │ └── scripts/ │ │ │ └── launch_ipykernel.py │ │ └── scala/ │ │ └── toree-launcher/ │ │ ├── build.sbt │ │ ├── project/ │ │ │ ├── build.properties │ │ │ ├── plugins.sbt │ │ │ └── scalastyle-config.xml │ │ └── src/ │ │ └── main/ │ │ └── scala/ │ │ └── launcher/ │ │ ├── KernelProfile.scala │ │ ├── ToreeLauncher.scala │ │ └── utils/ │ │ ├── SecurityUtils.scala │ │ └── SocketUtils.scala │ ├── kernel-resources/ │ │ └── ir/ │ │ └── kernel.js │ ├── kernelspecs/ │ │ ├── R_docker/ │ │ │ └── kernel.json │ │ ├── R_kubernetes/ │ │ │ └── kernel.json │ │ ├── dask_python_yarn_remote/ │ │ │ ├── bin/ │ │ │ │ └── run.sh │ │ │ └── kernel.json │ │ ├── python_distributed/ │ │ │ └── kernel.json │ │ ├── python_docker/ │ │ │ └── kernel.json │ │ ├── python_kubernetes/ │ │ │ └── kernel.json │ │ ├── python_tf_docker/ │ │ │ └── kernel.json │ │ ├── python_tf_gpu_docker/ │ │ │ └── kernel.json │ │ ├── python_tf_gpu_kubernetes/ │ │ │ └── kernel.json │ │ ├── python_tf_kubernetes/ │ │ │ └── kernel.json │ │ ├── scala_docker/ │ │ │ └── kernel.json │ │ ├── scala_kubernetes/ │ │ │ └── kernel.json │ │ ├── spark_R_conductor_cluster/ │ │ │ ├── bin/ │ │ │ │ └── run.sh │ │ │ └── kernel.json │ │ ├── spark_R_kubernetes/ │ │ │ ├── bin/ │ │ │ │ └── run.sh │ │ │ └── kernel.json │ │ ├── spark_R_yarn_client/ │ │ │ ├── bin/ │ │ │ │ └── run.sh │ │ │ └── kernel.json │ │ ├── spark_R_yarn_cluster/ │ │ │ ├── bin/ │ │ │ │ └── run.sh │ │ │ └── kernel.json │ │ ├── spark_python_conductor_cluster/ │ │ │ ├── bin/ │ │ │ │ └── run.sh │ │ │ └── kernel.json │ │ ├── spark_python_kubernetes/ │ │ │ ├── bin/ │ │ │ │ └── run.sh │ │ │ └── kernel.json │ │ ├── spark_python_operator/ │ │ │ └── kernel.json │ │ ├── spark_python_yarn_client/ │ │ │ ├── bin/ │ │ │ │ └── run.sh │ │ │ └── kernel.json │ │ ├── spark_python_yarn_cluster/ │ │ │ ├── bin/ │ │ │ │ └── run.sh │ │ │ └── kernel.json │ │ ├── spark_scala_conductor_cluster/ │ │ │ ├── bin/ │ │ │ │ └── run.sh │ │ │ └── kernel.json │ │ ├── spark_scala_kubernetes/ │ │ │ ├── bin/ │ │ │ │ └── run.sh │ │ │ └── kernel.json │ │ ├── spark_scala_yarn_client/ │ │ │ ├── bin/ │ │ │ │ └── run.sh │ │ │ └── kernel.json │ │ └── spark_scala_yarn_cluster/ │ │ ├── bin/ │ │ │ └── run.sh │ │ └── kernel.json │ └── kubernetes/ │ └── helm/ │ └── enterprise-gateway/ │ ├── Chart.yaml │ ├── templates/ │ │ ├── daemonset.yaml │ │ ├── deployment.yaml │ │ ├── eg-clusterrole.yaml │ │ ├── eg-clusterrolebinding.yaml │ │ ├── eg-serviceaccount.yaml │ │ ├── imagepullSecret.yaml │ │ ├── ingress.yaml │ │ ├── kip-clusterrole.yaml │ │ ├── kip-clusterrolebinding.yaml │ │ ├── kip-serviceaccount.yaml │ │ ├── psp.yaml │ │ └── service.yaml │ └── values.yaml ├── pyproject.toml ├── release.sh ├── requirements.yml └── website/ ├── .gitignore ├── README.md ├── _config.yml ├── _data/ │ └── navigation.yml ├── _includes/ │ ├── call-to-action.html │ ├── contact.html │ ├── features.html │ ├── head.html │ ├── header.html │ ├── nav.html │ ├── platforms.html │ └── scripts.html ├── _layouts/ │ ├── home.html │ └── page.html ├── _sass/ │ ├── _base.scss │ └── _mixins.scss ├── css/ │ ├── bootstrap.css │ └── main.scss ├── font-awesome/ │ ├── css/ │ │ └── font-awesome.css │ ├── fonts/ │ │ └── FontAwesome.otf │ ├── less/ │ │ ├── animated.less │ │ ├── bordered-pulled.less │ │ ├── core.less │ │ ├── fixed-width.less │ │ ├── font-awesome.less │ │ ├── icons.less │ │ ├── larger.less │ │ ├── list.less │ │ ├── mixins.less │ │ ├── path.less │ │ ├── rotated-flipped.less │ │ ├── stacked.less │ │ └── variables.less │ └── scss/ │ ├── _animated.scss │ ├── _bordered-pulled.scss │ ├── _core.scss │ ├── _fixed-width.scss │ ├── _icons.scss │ ├── _larger.scss │ ├── _list.scss │ ├── _mixins.scss │ ├── _path.scss │ ├── _rotated-flipped.scss │ ├── _stacked.scss │ ├── _variables.scss │ └── font-awesome.scss ├── index.md ├── js/ │ ├── bootstrap.js │ ├── cbpAnimatedHeader.js │ ├── classie.js │ ├── creative.js │ ├── jquery.fittext.js │ └── jquery.js ├── platform-kubernetes.md ├── platform-spark.md ├── privacy-policy.md └── publish.sh ================================================ FILE CONTENTS ================================================ ================================================ FILE: .git-blame-ignore-revs ================================================ # Initial pre-commit reformat df811d0deacebfd6cc77e8bf501d9b87ff006fb5 ================================================ FILE: .gitattributes ================================================ # Set the default behavior to have all files normalized to Unix-style # line endings upon check-in. * text=auto # Declare files that will always have CRLF line endings on checkout. *.bat text eol=crlf # Denote all files that are truly binary and should not be modified. *.dll binary *.exp binary *.lib binary *.pdb binary *.exe binary ================================================ FILE: .github/ISSUE_TEMPLATE.md ================================================ Help us improve the Jupyter Enterprise Gateway project by reporting issues or asking questions. ## Description ## Screenshots / Logs If applicable, add screenshots and/or logs to help explain your problem. To generate better logs, please run the gateway with `--debug` command line parameter. ## Environment - Enterprise Gateway Version \[e.g. 1.x, 2.x, ...\] - Platform: \[e.g. YARN, Kubernetes ...\] - Others \[e.g. Jupyter Server 5.7, JupyterHub 1.0, etc\] ================================================ FILE: .github/codeql/codeql-config.yml ================================================ name: "Enterprise Gateway CodeQL config" queries: - uses: security-and-quality paths-ignore: - enterprise_gateway/tests ================================================ FILE: .github/dependabot.yml ================================================ version: 2 updates: # Set update schedule for GitHub Actions - package-ecosystem: "github-actions" directory: "/" schedule: # Check for updates to GitHub Actions once a week (Mondays by default) interval: "weekly" # Set update schedule for pip - package-ecosystem: "pip" directory: "/" schedule: # Check for updates to Python deps once a week (Mondays by default) interval: "weekly" ================================================ FILE: .github/workflows/build.yml ================================================ name: Builds on: push: pull_request: jobs: build: runs-on: ${{ matrix.os }} env: ASYNC_TEST_TIMEOUT: 60 KERNEL_LAUNCH_TIMEOUT: 120 CONDA_HOME: /usr/share/miniconda strategy: fail-fast: false matrix: os: [ubuntu-latest] python-version: ["3.10", "3.11"] steps: - name: Checkout uses: actions/checkout@v4 with: clean: true - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - name: Display dependency info run: | python --version pip --version conda --version - name: Add SBT launcher uses: sbt/setup-sbt@v1 - name: Install Python dependencies run: | pip install ".[test]" - name: Build and install Jupyter Enterprise Gateway uses: nick-invision/retry@v3.0.0 with: timeout_minutes: 10 max_attempts: 2 command: | make clean dist enterprise-gateway-demo test-install-wheel - name: Log current Python dependencies version run: | pip freeze - name: Run unit tests uses: nick-invision/retry@v3.0.0 with: timeout_minutes: 3 max_attempts: 1 command: | make test - name: Run integration tests run: | # Run integration tests with debug output make itest-yarn-debug - name: Collect logs if: success() || failure() run: | python --version pip --version pip list echo "==== Docker Container Logs ====" docker logs itest-yarn echo "==== Docker Container Status ====" docker ps -a echo "==== Enterprise Gateway Log ====" docker exec -it itest-yarn cat /usr/local/share/jupyter/enterprise-gateway.log || true - name: Run linters run: | make lint - name: Bump versions run: | pipx run tbump --dry-run --no-tag --no-push 100.100.100rc0 link_check: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 with: python_version: "3.11" - name: Install Python dependencies run: | pip install ".[test]" - uses: jupyterlab/maintainer-tools/.github/actions/check-links@v1 with: ignore_links: |- http://my-gateway-server\.com:8888|https://docs\.openshift\.com/.*|https://docs\.redhat\.com/.* build_docs: runs-on: windows-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Base Setup uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 with: python_version: "3.11" - name: Build Docs run: make docs test_minimum_versions: name: Test Minimum Versions timeout-minutes: 20 runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 with: python_version: "3.11" - name: Install dependencies with minimum versions run: | pip install ".[test]" - name: Run the unit tests run: | pytest -vv -W default || pytest -vv -W default --lf make_sdist: name: Make SDist runs-on: ubuntu-latest timeout-minutes: 10 steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 with: python_version: "3.11" - uses: jupyterlab/maintainer-tools/.github/actions/make-sdist@v1 test_sdist: runs-on: ubuntu-latest needs: [make_sdist] name: Install from SDist and Test timeout-minutes: 20 steps: - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 with: python_version: "3.11" - uses: jupyterlab/maintainer-tools/.github/actions/test-sdist@v1 python_tests_check: # This job does nothing and is only used for the branch protection if: always() needs: - build - link_check - test_minimum_versions - build_docs - test_sdist runs-on: ubuntu-latest steps: - name: Decide whether the needed jobs succeeded or failed uses: re-actors/alls-green@release/v1 with: jobs: ${{ toJSON(needs) }} ================================================ FILE: .github/workflows/codeql-analysis.yml ================================================ # For most projects, this workflow file will not need changing; you simply need # to commit it to your repository. # # You may wish to alter this file to override the set of languages analyzed, # or to provide custom queries or build logic. # # ******** NOTE ******** # We have attempted to detect the languages in your repository. Please check # the `language` matrix defined below to confirm you have the correct set of # supported CodeQL languages. # name: "CodeQL Checks" on: push: branches: [main] pull_request: # The branches below must be a subset of the branches above branches: [main] schedule: - cron: "24 7 * * 1" jobs: analyze: name: Analyze runs-on: ubuntu-latest permissions: actions: read contents: read security-events: write strategy: fail-fast: false matrix: language: ["python"] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support steps: - name: Checkout repository uses: actions/checkout@v4 with: # We must fetch at least the immediate parents so that if this is # a pull request then we can checkout the head. fetch-depth: 2 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} config-file: ./.github/codeql/codeql-config.yml # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs # queries: security-extended,security-and-quality # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild uses: github/codeql-action/autobuild@v3 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun # If the Autobuild fails above, remove it and uncomment the following three lines. # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. # - run: | # echo "Run, Build Application using script" # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 ================================================ FILE: .gitignore ================================================ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *,cover .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log # Sphinx documentation docs/_build/ # PyBuilder target/ .DS_Store .ipynb_checkpoints/ # PyCharm .idea/ *.iml # Build-related .image-* # Jekyll _site/ .sass-cache/ # Debug-related .kube/ # vscode ide stuff *.code-workspace .history/ .vscode/ # jetbrains ide stuff *.iml .idea/ ================================================ FILE: .pre-commit-config.yaml ================================================ ci: autoupdate_schedule: monthly repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: - id: check-case-conflict - id: check-ast - id: check-docstring-first - id: check-executables-have-shebangs - id: check-added-large-files - id: check-case-conflict - id: check-merge-conflict - id: check-json - id: check-toml - id: check-yaml exclude: etc/kubernetes/.*.yaml - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/python-jsonschema/check-jsonschema rev: 0.27.4 hooks: - id: check-github-workflows - repo: https://github.com/executablebooks/mdformat rev: 0.7.17 hooks: - id: mdformat additional_dependencies: [mdformat-gfm, mdformat-frontmatter, mdformat-footnote] - repo: https://github.com/psf/black rev: 24.2.0 hooks: - id: black - repo: https://github.com/charliermarsh/ruff-pre-commit rev: v0.3.0 hooks: - id: ruff args: ["--fix"] ================================================ FILE: .readthedocs.yaml ================================================ version: 2 build: os: "ubuntu-22.04" tools: python: "mambaforge-22.9" sphinx: configuration: docs/source/conf.py conda: environment: docs/environment.yml ================================================ FILE: LICENSE.md ================================================ # Licensing terms This project is licensed under the terms of the Modified BSD License (also known as New or Revised or 3-Clause BSD), as follows: - Copyright (c) 2001-2015, IPython Development Team - Copyright (c) 2015-, Jupyter Development Team All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the Jupyter Development Team nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ## About the Jupyter Development Team The Jupyter Development Team is the set of all contributors to the Jupyter project. This includes all of the Jupyter Subprojects, which are the different repositories under the [jupyter](https://github.com/jupyter/) GitHub organization. The core team that coordinates development on GitHub can be found here: https://github.com/jupyter/. ## Our copyright policy Jupyter uses a shared copyright model. Each contributor maintains copyright over their contributions to Jupyter. But, it is important to note that these contributions are typically only changes to the repositories. Thus, the Jupyter source code, in its entirety is not the copyright of any single person or institution. Instead, it is the collective copyright of the entire Jupyter Development Team. If individual contributors want to maintain a record of what changes/contributions they have specific copyright on, they should indicate their copyright in the commit message of the change, when they commit the change to one of the Jupyter repositories. With this in mind, the following banner should be used in any source code file to indicate the copyright and license terms: ``` # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. ``` ================================================ FILE: Makefile ================================================ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. .PHONY: help clean clean-env dev dev-http docs install bdist sdist test release check_dists \ clean-images clean-enterprise-gateway clean-demo-base clean-kernel-images clean-enterprise-gateway \ clean-kernel-py clean-kernel-spark-py clean-kernel-r clean-kernel-spark-r clean-kernel-scala clean-kernel-tf-py \ clean-kernel-tf-gpu-py clean-kernel-image-puller push-images push-enterprise-gateway-demo push-demo-base \ push-kernel-images push-enterprise-gateway push-kernel-py push-kernel-spark-py push-kernel-r push-kernel-spark-r \ push-kernel-scala push-kernel-tf-py push-kernel-tf-gpu-py push-kernel-image-puller publish helm-chart SA?=source activate ENV:=enterprise-gateway-dev SHELL:=/bin/bash MULTIARCH_BUILD?= TARGET_ARCH?=undefined VERSION?=3.3.0.dev0 SPARK_VERSION?=3.2.1 ifeq (dev, $(findstring dev, $(VERSION))) TAG:=dev else TAG:=$(VERSION) endif WHEEL_FILES:=$(shell find . -type f ! -path "./build/*" ! -path "./etc/*" ! -path "./docs/*" ! -path "./.git/*" ! -path "./.idea/*" ! -path "./dist/*" ! -path "./.image-*" ! -path "*/__pycache__/*" ) WHEEL_FILE:=dist/jupyter_enterprise_gateway-$(VERSION)-py3-none-any.whl SDIST_FILE:=dist/jupyter_enterprise_gateway-$(VERSION).tar.gz DIST_FILES=$(WHEEL_FILE) $(SDIST_FILE) HELM_DESIRED_VERSION:=v3.18.3 # Pin the version of helm to use (v3.18.3 is latest as of 6/21/25) HELM_CHART_VERSION:=$(shell grep version: etc/kubernetes/helm/enterprise-gateway/Chart.yaml | sed 's/version: //') HELM_CHART_PACKAGE:=dist/enterprise-gateway-$(HELM_CHART_VERSION).tgz HELM_CHART:=dist/jupyter_enterprise_gateway_helm-$(VERSION).tar.gz HELM_CHART_DIR:=etc/kubernetes/helm/enterprise-gateway HELM_CHART_FILES:=$(shell find $(HELM_CHART_DIR) -type f ! -name .DS_Store) HELM_INSTALL_DIR?=/usr/local/bin help: # http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' env: ## Make a dev environment -conda env create --file requirements.yml --name $(ENV) -conda env config vars set PYTHONPATH=$(PWD) --name $(ENV) activate: ## Print instructions to activate the virtualenv (default: enterprise-gateway-dev) @echo "Run \`$(SA) $(ENV)\` to activate the environment." clean: ## Make a clean source tree -rm -rf dist -rm -rf build -rm -rf *.egg-info -find . -name target -type d -exec rm -fr {} + -find . -name __pycache__ -type d -exec rm -fr {} + -find enterprise_gateway -name '*.pyc' -exec rm -fr {} + -find website -name '.sass-cache' -type d -exec rm -fr {} + -find website -name '_site' -type d -exec rm -fr {} + -find website -name 'build' -type d -exec rm -fr {} + -make -C docs clean -make -C etc clean clean-env: ## Remove conda env -conda env remove -n $(ENV) -y lint: ## Check code style @pip install -q -e ".[lint]" @pip install -q pipx ruff check . black --check --diff --color . mdformat --check *.md pipx run 'validate-pyproject[all]' pyproject.toml pipx run interrogate -v . run-dev: test-install-wheel ## Make a server in jupyter_websocket mode python enterprise_gateway docs: ## Make HTML documentation make -C docs requirements html SPHINXOPTS="-W" kernelspecs: kernelspecs_all kernelspecs_yarn kernelspecs_conductor kernelspecs_kubernetes kernelspecs_docker kernel_image_files ## Create archives with sample kernelspecs kernelspecs_all kernelspecs_yarn kernelspecs_conductor kernelspecs_kubernetes kernelspecs_docker kernel_image_files: make VERSION=$(VERSION) TAG=$(TAG) SPARK_VERSION=$(SPARK_VERSION) -C etc $@ test-install: dist test-install-wheel test-install-tar ## Install and minimally run EG with the wheel and tar distributions test-install-wheel: pip uninstall -y jupyter_enterprise_gateway pip install dist/jupyter_enterprise_gateway-*.whl && \ jupyter enterprisegateway --help test-install-tar: pip uninstall -y jupyter_enterprise_gateway pip install dist/jupyter_enterprise_gateway-*.tar.gz && \ jupyter enterprisegateway --help bdist: $(WHEEL_FILE) $(WHEEL_FILE): $(WHEEL_FILES) pip install build && python -m build --wheel . \ && rm -rf *.egg-info && chmod 0755 dist/*.* sdist: $(SDIST_FILE) $(SDIST_FILE): $(WHEEL_FILES) pip install build && python -m build --sdist . \ && rm -rf *.egg-info && chmod 0755 dist/*.* helm-chart: helm-install $(HELM_CHART) ## Make helm chart distribution helm-install: $(HELM_INSTALL_DIR)/helm $(HELM_INSTALL_DIR)/helm: # Download and install helm curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 -o /tmp/get_helm.sh \ && chmod +x /tmp/get_helm.sh \ && DESIRED_VERSION=$(HELM_DESIRED_VERSION) /tmp/get_helm.sh \ && rm -f /tmp/get_helm.sh helm-lint: helm-clean helm lint $(HELM_CHART_DIR) helm-clean: # Remove any .DS_Store files that might wind up in the package $(shell find etc/kubernetes/helm -type f -name '.DS_Store' -exec rm -f {} \;) $(HELM_CHART): $(HELM_CHART_FILES) make helm-lint helm package $(HELM_CHART_DIR) -d dist mv $(HELM_CHART_PACKAGE) $(HELM_CHART) # Rename output to match other assets dist: lint bdist sdist kernelspecs helm-chart ## Make source, binary, kernelspecs and helm chart distributions to dist folder TEST_DEBUG_OPTS:= test-debug: make TEST_DEBUG_OPTS="--nocapture --nologcapture --logging-level=10" test test: TEST?= test: ## Run unit tests ifeq ($(TEST),) pytest -vv $(TEST_DEBUG_OPTS) else # e.g., make test TEST="test_gatewayapp.py::TestGatewayAppConfig" pytest -vv $(TEST_DEBUG_OPTS) enterprise_gateway/tests/$(TEST) endif release: dist check_dists ## Make a wheel + source release on PyPI twine upload $(DIST_FILES) check_dists: pip install twine && twine check --strict $(DIST_FILES) # Here for doc purposes docker-images: ## Build docker images (includes kernel-based images) kernel-images: ## Build kernel-based docker images # Actual working targets... docker-images: demo-base enterprise-gateway-demo kernel-images enterprise-gateway kernel-py kernel-spark-py kernel-r kernel-spark-r kernel-scala kernel-tf-py kernel-tf-gpu-py kernel-image-puller enterprise-gateway-demo kernel-images enterprise-gateway kernel-py kernel-spark-py kernel-r kernel-spark-r kernel-scala kernel-tf-py kernel-tf-gpu-py kernel-image-puller: make WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) NO_CACHE=$(NO_CACHE) TAG=$(TAG) SPARK_VERSION=$(SPARK_VERSION) MULTIARCH_BUILD=$(MULTIARCH_BUILD) TARGET_ARCH=$(TARGET_ARCH) -C etc $@ demo-base: make WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) NO_CACHE=$(NO_CACHE) TAG=$(SPARK_VERSION) SPARK_VERSION=$(SPARK_VERSION) MULTIARCH_BUILD=$(MULTIARCH_BUILD) TARGET_ARCH=$(TARGET_ARCH) -C etc $@ # Here for doc purposes clean-images: clean-demo-base ## Remove docker images (includes kernel-based images) clean-kernel-images: ## Remove kernel-based images clean-images clean-enterprise-gateway-demo clean-kernel-images clean-enterprise-gateway clean-kernel-py clean-kernel-spark-py clean-kernel-r clean-kernel-spark-r clean-kernel-scala clean-kernel-tf-py clean-kernel-tf-gpu-py clean-kernel-image-puller: make WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) TAG=$(TAG) -C etc $@ clean-demo-base: make WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) TAG=$(SPARK_VERSION) -C etc $@ push-images: push-demo-base push-images push-enterprise-gateway-demo push-kernel-images push-enterprise-gateway push-kernel-py push-kernel-spark-py push-kernel-r push-kernel-spark-r push-kernel-scala push-kernel-tf-py push-kernel-tf-gpu-py push-kernel-image-puller: make WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) TAG=$(TAG) -C etc $@ push-demo-base: make WHEEL_FILE=$(WHEEL_FILE) VERSION=$(VERSION) TAG=$(SPARK_VERSION) -C etc $@ publish: NO_CACHE=--no-cache publish: clean clean-images dist docker-images push-images # itest should have these targets up to date: bdist kernelspecs docker-enterprise-gateway itest: itest-docker itest-yarn # itest configurable settings # indicates two things: # this prefix is used by itest to determine hostname to test against, in addtion, # if itests will be run locally with docker-prep target, this will set the hostname within that container as well ITEST_HOSTNAME_PREFIX?=itest # indicates the user to emulate. This equates to 'KERNEL_USERNAME'... ITEST_USER?=bob # indicates the other set of options to use. At this time, only the python notebooks succeed, so we're skipping R and Scala. ITEST_OPTIONS?= # here's an example of the options (besides host and user) with their expected values ... # ITEST_OPTIONS=--impersonation < True | False > ITEST_YARN_PORT?=8888 ITEST_YARN_HOST?=localhost:$(ITEST_YARN_PORT) ITEST_YARN_TESTS?=enterprise_gateway/itests ITEST_KERNEL_LAUNCH_TIMEOUT=120 LOG_LEVEL=INFO itest-yarn-debug: ## Run integration tests (optionally) against docker demo (YARN) container with print statements make LOG_LEVEL=DEBUG TEST_DEBUG_OPTS="--log-level=10" itest-yarn PREP_ITEST_YARN?=1 itest-yarn: ## Run integration tests (optionally) against docker demo (YARN) container ifeq (1, $(PREP_ITEST_YARN)) make itest-yarn-prep endif (GATEWAY_HOST=$(ITEST_YARN_HOST) LOG_LEVEL=$(LOG_LEVEL) KERNEL_USERNAME=$(ITEST_USER) KERNEL_LAUNCH_TIMEOUT=$(ITEST_KERNEL_LAUNCH_TIMEOUT) SPARK_VERSION=$(SPARK_VERSION) ITEST_HOSTNAME_PREFIX=$(ITEST_HOSTNAME_PREFIX) pytest -vv $(TEST_DEBUG_OPTS) $(ITEST_YARN_TESTS)) @echo "Run \`docker logs itest-yarn\` to see enterprise-gateway log." PREP_TIMEOUT?=60 itest-yarn-prep: @-docker rm -f itest-yarn >> /dev/null @echo "Starting enterprise-gateway container (run \`docker logs itest-yarn\` to see container log)..." @-docker run -itd -p $(ITEST_YARN_PORT):$(ITEST_YARN_PORT) -p 8088:8088 -p 8042:8042 -h itest-yarn --name itest-yarn -v `pwd`/enterprise_gateway/itests:/tmp/byok elyra/enterprise-gateway-demo:$(TAG) --gateway @(r="1"; attempts=0; while [ "$$r" == "1" -a $$attempts -lt $(PREP_TIMEOUT) ]; do echo "Waiting for enterprise-gateway to start..."; sleep 2; ((attempts++)); docker logs itest-yarn |grep --regexp "Jupyter Enterprise Gateway .* is available at http"; r=$$?; done; if [ $$attempts -ge $(PREP_TIMEOUT) ]; then echo "Wait for startup timed out!"; exit 1; fi;) # This should get cleaned up once docker support is more mature ITEST_DOCKER_PORT?=8889 ITEST_DOCKER_HOST?=localhost:$(ITEST_DOCKER_PORT) ITEST_DOCKER_TESTS?=enterprise_gateway/itests/test_r_kernel.py::TestRKernelLocal enterprise_gateway/itests/test_python_kernel.py::TestPythonKernelLocal enterprise_gateway/itests/test_scala_kernel.py::TestScalaKernelLocal ITEST_DOCKER_KERNELS=PYTHON_KERNEL_LOCAL_NAME=python_docker SCALA_KERNEL_LOCAL_NAME=scala_docker R_KERNEL_LOCAL_NAME=R_docker itest-docker-debug: ## Run integration tests (optionally) against docker container with print statements make LOG_LEVEL=DEBUG TEST_DEBUG_OPTS="--nocapture --nologcapture --logging-level=10" itest-docker PREP_ITEST_DOCKER?=1 itest-docker: ## Run integration tests (optionally) against docker swarm ifeq (1, $(PREP_ITEST_DOCKER)) make itest-docker-prep endif (GATEWAY_HOST=$(ITEST_DOCKER_HOST) LOG_LEVEL=$(LOG_LEVEL) KERNEL_USERNAME=$(ITEST_USER) KERNEL_LAUNCH_TIMEOUT=$(ITEST_KERNEL_LAUNCH_TIMEOUT) $(ITEST_DOCKER_KERNELS) ITEST_HOSTNAME_PREFIX=$(ITEST_USER) pytest -vv $(TEST_DEBUG_OPTS) $(ITEST_DOCKER_TESTS)) @echo "Run \`docker service logs itest-docker\` to see enterprise-gateway log." PREP_TIMEOUT?=180 itest-docker-prep: @-docker service rm enterprise-gateway_enterprise-gateway enterprise-gateway_enterprise-gateway-proxy @-docker swarm leave --force # Check if swarm mode is active, if not attempt to create the swarm @(docker info | grep -q 'Swarm: active'; if [ $$? -eq 1 ]; then docker swarm init; fi;) @echo "Starting enterprise-gateway swarm service (run \`docker service logs enterprise-gateway_enterprise-gateway\` to see service log)..." @KG_PORT=${ITEST_DOCKER_PORT} EG_DOCKER_NETWORK=enterprise-gateway docker stack deploy -c etc/docker/docker-compose.yml enterprise-gateway @(r="1"; attempts=0; while [ "$$r" == "1" -a $$attempts -lt $(PREP_TIMEOUT) ]; do echo "Waiting for enterprise-gateway to start..."; sleep 2; ((attempts++)); docker service logs enterprise-gateway_enterprise-gateway 2>&1 |grep --regexp "Jupyter Enterprise Gateway .* is available at http"; r=$$?; done; if [ $$attempts -ge $(PREP_TIMEOUT) ]; then echo "Wait for startup timed out!"; exit 1; fi;) ================================================ FILE: README.md ================================================ **[Website](https://jupyter-enterprise-gateway.readthedocs.io/)** | **[Technical Overview](#technical-overview)** | **[Installation](#installation)** | **[System Architecture](#system-architecture)** | **[Contributing](#contributing)** # Jupyter Enterprise Gateway [![Actions Status](https://github.com/jupyter-server/enterprise_gateway/workflows/Builds/badge.svg)](https://github.com/jupyter-server/enterprise_gateway/actions) [![PyPI version](https://badge.fury.io/py/jupyter-enterprise-gateway.svg)](https://badge.fury.io/py/jupyter-enterprise-gateway) [![Downloads](https://pepy.tech/badge/jupyter-enterprise-gateway/month)](https://pepy.tech/project/jupyter-enterprise-gateway) [![Documentation Status](https://readthedocs.org/projects/jupyter-enterprise-gateway/badge/?version=latest)](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/?badge=latest) [![Google Group](https://img.shields.io/badge/google-group-blue.svg)](https://groups.google.com/forum/#!forum/jupyter) Jupyter Enterprise Gateway enables Jupyter Notebook to launch remote kernels in a distributed cluster, including Apache Spark managed by YARN, IBM Spectrum Conductor, Kubernetes or Docker Swarm. It provides out of the box support for the following kernels: - Python using IPython kernel - R using IRkernel - Scala using Apache Toree kernel Full Documentation for Jupyter Enterprise Gateway can be found [here](https://jupyter-enterprise-gateway.readthedocs.io/en/latest) Jupyter Enterprise Gateway does not manage multiple Jupyter Notebook deployments, for that you should use [JupyterHub](https://github.com/jupyterhub/jupyterhub). ## Technical Overview Jupyter Enterprise Gateway is a web server that provides headless access to Jupyter kernels within an enterprise. Inspired by Jupyter Kernel Gateway, Jupyter Enterprise Gateway provides feature parity with Kernel Gateway's [jupyter-websocket mode](https://jupyter-kernel-gateway.readthedocs.io/en/latest/websocket-mode.html) in addition to the following: - Adds support for remote kernels hosted throughout the enterprise where kernels can be launched in the following ways: - Local to the Enterprise Gateway server (today's Kernel Gateway behavior) - On specific nodes of the cluster utilizing a round-robin algorithm - On nodes identified by an associated resource manager - Provides support for Apache Spark managed by YARN, IBM Spectrum Conductor, Kubernetes or Docker Swarm out of the box. Others can be configured via Enterprise Gateway's extensible framework. - Secure communication from the client, through the Enterprise Gateway server, to the kernels - Multi-tenant capabilities - Persistent kernel sessions - Ability to associate profiles consisting of configuration settings to a kernel for a given user (see [Project Roadmap](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/roadmap.html)) ![Deployment Diagram](https://github.com/jupyter-server/enterprise_gateway/blob/main/docs/source/images/deployment.png?raw=true) ## Installation Detailed installation instructions are located in the [Users Guide](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/users/index.html) of the project docs. Here's a quick start using `pip`: ```bash # install from pypi pip install --upgrade jupyter_enterprise_gateway # show all config options jupyter enterprisegateway --help-all # run it with default options jupyter enterprisegateway ``` Please check the [configuration options within the Operators Guide](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/operators/index.html#configuring-enterprise-gateway) for information about the supported options. ## System Architecture The [System Architecture page](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/system-architecture.html) includes information about Enterprise Gateway's remote kernel, process proxy, and launcher frameworks. ## Contributing The [Contribution page](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/contrib.html) includes information about how to contribute to Enterprise Gateway along with our roadmap. While there, you'll want to [set up a development environment](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/contributors/devinstall.html) and check out typical developer tasks. ================================================ FILE: codecov.yml ================================================ codecov: notify: require_ci_to_pass: yes coverage: precision: 2 round: down range: "70...100" status: project: no patch: no changes: no parsers: gcov: branch_detection: conditional: yes loop: yes method: no macro: no comment: off ================================================ FILE: conftest.py ================================================ def pytest_addoption(parser): parser.addoption("--host", action="store", default="localhost:8888") parser.addoption("--username", action="store", default="elyra") parser.addoption("--impersonation", action="store", default="false") def pytest_generate_tests(metafunc): # This is called for every test. Only get/set command line arguments # if the argument is specified in the list of test "fixturenames". if "host" in metafunc.fixturenames: metafunc.parametrize("host", [metafunc.config.option.host]) if "username" in metafunc.fixturenames: metafunc.parametrize("username", [metafunc.config.option.username]) if "impersonation" in metafunc.fixturenames: metafunc.parametrize("impersonation", [metafunc.config.option.impersonation]) ================================================ FILE: docs/Makefile ================================================ # Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = -n SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source DOC_REQUIREMENTS = doc-requirements.txt .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext requirements help: @echo "Please use \`make ' where is one of" @echo " requirements to install required packages" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* requirements: pip install -q -r $(DOC_REQUIREMENTS) html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/JupyterHub.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/JupyterHub.qhc" applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/JupyterHub" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/JupyterHub" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." ================================================ FILE: docs/doc-requirements.txt ================================================ # https://github.com/miyakogi/m2r/issues/66 mistune<4 myst-parser pydata_sphinx_theme sphinx sphinx-markdown-tables sphinx_book_theme sphinxcontrib-mermaid sphinxcontrib-openapi sphinxcontrib_github_alt sphinxcontrib_spelling sphinxemoji tornado ================================================ FILE: docs/environment.yml ================================================ name: enterprise_gateway_docs channels: - conda-forge - defaults - free dependencies: - pip - python=3.11 - pip: - -r doc-requirements.txt ================================================ FILE: docs/make.bat ================================================ @ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source set I18NSPHINXOPTS=%SPHINXOPTS% source if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled echo. coverage to run coverage check of the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) REM Check if sphinx-build is available and fallback to Python version if any %SPHINXBUILD% 1>NUL 2>NUL if errorlevel 9009 goto sphinx_python goto sphinx_ok :sphinx_python set SPHINXBUILD=python -m sphinx.__init__ %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.https://sphinx-doc.org/ exit /b 1 ) :sphinx_ok if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\JupyterHub.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\JupyterHub.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "coverage" ( %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage if errorlevel 1 exit /b 1 echo. echo.Testing of coverage in the sources finished, look at the ^ results in %BUILDDIR%/coverage/python.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) :end ================================================ FILE: docs/source/_static/custom.css ================================================ body div.sphinxsidebarwrapper p.logo { text-align: left; } .mermaid svg { height: 100%; } ================================================ FILE: docs/source/conf.py ================================================ # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = "3.0" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "myst_parser", "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.intersphinx", "sphinx.ext.autosummary", "sphinx.ext.mathjax", "sphinxcontrib_github_alt", "sphinxcontrib.mermaid", "sphinxcontrib.openapi", "sphinxemoji.sphinxemoji", ] try: import enchant # noqa extensions += ["sphinxcontrib.spelling"] except ImportError: pass myst_enable_extensions = ["html_image"] myst_heading_anchors = 4 # Needs to be 4 or higher # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. source_suffix = { ".rst": "restructuredtext", ".txt": "markdown", ".md": "markdown", } # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # General information about the project. project = "Jupyter Enterprise Gateway" copyright = "2022, Project Jupyter" # noqa author = "Jupyter Server Team" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # _version_py = os.path.join("..", "..", "enterprise_gateway", "_version.py") version_ns = {} with open(_version_py) as version_file: exec(version_file.read(), version_ns) # noqa # The short X.Y version. version = version_ns["__version__"][:3] # The full version, including alpha/beta/rc tags. release = version_ns["__version__"] # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "default" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # eep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "pydata_sphinx_theme" # html_theme = "sphinx_book_theme" html_logo = "_static/jupyter-logo.png" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # 'logo_only': html_logo # 'description': "Enterprise Gateway", # 'fixed_sidebar': False, # 'show_relbars': True, # 'github_user': 'jupyter', # 'github_repo': 'enterprise_gateway', # 'github_type': 'star', # 'logo': 'jupyter-logo.png', # 'logo_text_align': 'left', # 'analytics_id': 'UA-130853690-1', # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # These paths are either relative to html_static_path # or fully qualified paths (eg. https://...) html_css_files = [ "custom.css", ] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = "EnterpriseGatewaydoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', # Latex figure (float) alignment # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( master_doc, "EnterpriseGateway.tex", "Enterprise Gateway Documentation", "https://jupyter.org", "manual", ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, "enterprise_gateway", "Enterprise Gateway Documentation", [author], 1)] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "enterprise_gateway", "Enterprise Gateway Documentation", author, "EnterpriseGateway", "One line description of project.", "Miscellaneous", ), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright # The basename for the epub file. It defaults to the project name. # epub_basename = project # The HTML theme for the epub output. Since the default themes are not optimized # for small screen space, using the same theme for HTML and epub output is # usually not wise. This defaults to 'epub', a theme designed to save visual # space. # epub_theme = 'epub' # The language of the text. It defaults to the language option # or 'en' if the language is not set. # epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. # epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. # epub_identifier = '' # A unique identification for the text. # epub_uid = '' # A tuple containing the cover image and cover page html template filenames. # epub_cover = () # A sequence of (type, uri, title) tuples for the guide element of content.opf. # epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_post_files = [] # A list of files that should not be packed into the epub file. epub_exclude_files = ["search.html"] # The depth of the table of contents in toc.ncx. # epub_tocdepth = 3 # Allow duplicate toc entries. # epub_tocdup = True # Choose between 'default' and 'includehidden'. # epub_tocscope = 'default' # Fix unsupported image types using the Pillow. # epub_fix_images = False # Scale large images. # epub_max_image_width = 0 # How to display URL addresses: 'footnote', 'no', or 'inline'. # epub_show_urls = 'inline' # If false, no index is generated. # epub_use_index = True # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "python": ("https://docs.python.org/", None), "ipython": ("https://ipython.readthedocs.io/en/stable/", None), "jupyter": ("https://jupyter.readthedocs.io/en/latest/", None), } spelling_lang = "en_US" spelling_word_list_filename = "spelling_wordlist.txt" # Read The Docs # on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org on_rtd = os.environ.get("READTHEDOCS", None) == "True" # if not on_rtd: # only import and set the theme if we're building docs locally # import sphinx_rtd_theme # html_theme = 'alabaster' # html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # otherwise, readthedocs.org uses their theme by default, so no need to specify it ================================================ FILE: docs/source/contributors/contrib.md ================================================ # Contributing to Jupyter Enterprise Gateway Thank you for your interest in Jupyter Enterprise Gateway! If you would like to contribute to the project please first take a look at the [Project Jupyter Contributor Documentation](https://jupyter.readthedocs.io/en/latest/contributing/content-contributor.html). Enterprise Gateway has recently joined the [Jupyter Server organization](https://github.com/jupyter-server). Please check out our [team compass page](https://github.com/jupyter-server/team-compass#jupyter-server-team-compass) and try to attend our weekly dev meeting as we have a common goal of making all Jupyter server-side applications better! Prior to your contribution, we strongly recommend getting acquainted with Enterprise Gateway by checking out the [System Architecture](system-architecture.md) and [Development Workflow](devinstall.md) pages. ================================================ FILE: docs/source/contributors/debug.md ================================================ # Debugging Jupyter Enterprise Gateway This page discusses how to go about debugging Enterprise Gateway. We also provide troubleshooting information in our [Troubleshooting Guide](../other/troubleshooting.md). ## Configuring your IDE While your mileage may vary depending on which IDE you are using, the steps below (using PyCharm as an example) should be useful for configuring a debugging session for Enterprise Gateway with minimum adjustments for different IDEs. ### Creating a new Debug Configuration Go to Run->Edit Configuration and create a new python configuration with the following settings: ![Enterprise Gateway debug configuration](../images/debug_configuration.png) **Script Path:** ```bash /Users/jovyan/opensource/jupyter/elyra/scripts/jupyter-enterprisegateway ``` **Parameters:** ```bash --ip=0.0.0.0 --log-level=DEBUG --EnterpriseGatewayApp.yarn_endpoint=“http://elyra-fyi-node-1.fyre.ibm.com:8088/ws/v1/cluster” --EnterpriseGatewayApp.remote_hosts=['localhost'] ``` **Environment Variables:** ```bash EG_ENABLE_TUNNELING=False ``` **Working Directory:** ```bash /Users/jovyan/opensource/jupyter/elyra/scripts ``` ### Running in debug mode Now that you have handled the necessary configuration, use Run-Debug and select the debug configuration you just created and happy debugging! ================================================ FILE: docs/source/contributors/devinstall.md ================================================ # Development Workflow Here are instructions for setting up a development environment for the [Jupyter Enterprise Gateway](https://github.com/jupyter-server/enterprise_gateway) server. It also includes common steps in the developer workflow such as building Enterprise Gateway, running tests, building docs, packaging kernel specifications, etc. ## Prerequisites Install GNU make on your system. ## Clone the repo Clone this repository into a local directory. ```bash # make a directory under your HOME directory to put the source code mkdir -p ~/projects cd !$ # clone this repo git clone https://github.com/jupyter-server/enterprise_gateway.git ``` ## Make Enterprise Gateway's build environment is centered around `make` and the corresponding [`Makefile`](https://github.com/jupyter-server/enterprise_gateway/blob/main/Makefile). Entering `make` with no parameters yields the following: ``` activate Print instructions to activate the virtualenv (default: enterprise-gateway-dev) clean-env Remove conda env clean-images Remove docker images (includes kernel-based images) clean-kernel-images Remove kernel-based images clean Make a clean source tree dist Make source, binary, kernelspecs and helm chart distributions to dist folder docker-images Build docker images (includes kernel-based images) docs Make HTML documentation env Make a dev environment helm-chart Make helm chart distribution itest-docker-debug Run integration tests (optionally) against docker container with print statements itest-docker Run integration tests (optionally) against docker swarm itest-yarn-debug Run integration tests (optionally) against docker demo (YARN) container with print statements itest-yarn Run integration tests (optionally) against docker demo (YARN) container kernel-images Build kernel-based docker images kernelspecs Create archives with sample kernelspecs lint Check code style release Make a wheel + source release on PyPI run-dev Make a server in jupyter_websocket mode test-install Install and minimally run EG with the wheel and tar distributions test Run unit tests ``` Some of the more useful commands are listed below. ## Build the conda environment Build a Python 3 conda environment that can be used to run the Enterprise Gateway server within an IDE. May be necessary prior to [debugging Enterprise Gateway](./debug.md) based on your local Python environment. See [Conda's Managing environments](https://docs.conda.io/projects/conda/en/stable/user-guide/tasks/manage-environments.html#managing-environments) for background on environments and why you may find them useful as you develop on Enterprise Gateway. ```bash make env ``` By default, the env built will be named `enterprise-gateway-dev`. To produce a different conda env, you can specify the name via the `ENV=` parameter. ```bash make ENV=my-conda-env env ``` To delete your existing environment, use `clean-env` task. ```bash make clean-env ``` ## Build the wheel file Build a wheel file that can then be installed via `pip install` ``` make bdist ``` The wheel file will reside in the `dist` directory. ## Build the kernelspec tar file Enterprise Gateway includes several sets of kernel specifications for each of the three primary kernels: `IPython Kernel`,`IRkernel`, and `Apache Toree` to demonstrate remote kernels and their corresponding launchers. These sets of files are then added to tar files corresponding to their target resource managers. In addition, a _combined_ tar file is also built containing all kernel specifications. Like the wheel file, these tar files will reside in the `dist` directory. ```bash make kernelspecs ``` ```{note} Because the scala launcher requires a jar file, `make kernelspecs` requires the use of `sbt` to build the scala launcher jar. Please consult the [sbt site](https://www.scala-sbt.org/) for directions to install/upgrade `sbt` on your platform. We currently use version 1.3.12. ``` ## Build distribution files Builds the files necessary for a given release: the wheel file, the source tar file, and the kernel specification tar files. This is essentially a helper target consisting of the `bdist` `sdist` and `kernelspecs` targets. ```bash make dist ``` ## Run the Enterprise Gateway server Run an instance of the Enterprise Gateway server. ```bash make run-dev ``` Then access the running server at the URL printed in the console. ## Build the docs Run Sphinx to build the HTML documentation. ```bash make docs ``` This command actually issues `make requirements html` from the `docs` sub-directory. ## Run the unit tests Run the unit test suite. ``` make test ``` To Run a test a subset of tests, we support passing "TEST" argument to the make command as below ``` make test TEST="test_gatewayapp.py" make test TEST="test_gatewayapp.py::TestGatewayAppConfig make test TEST="test_gatewayapp.py::TestGatewayAppConfig::test_config_env_vars_bc" ``` ## Run the integration tests Run the integration tests suite. These tests will bootstrap the [`elyra/enterprise-gateway-demo`](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) docker image with Apache Spark using YARN resource manager and Jupyter Enterprise Gateway and perform various tests for each kernel in local, YARN client, and YARN cluster modes. ```bash make itest-yarn ``` ## Build the docker images The following can be used to build all docker images used within the project. See [docker images](docker.md) for specific details. ```bash make docker-images ``` If you only want to build the kernel images, use ```bash make kernel-images ``` ================================================ FILE: docs/source/contributors/docker.md ================================================ # Docker Images All docker images can be pulled from docker hub's [elyra organization](https://hub.docker.com/u/elyra/) and their docker files can be found in the github repository in the appropriate directory of [etc/docker](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/docker). Local images can also be built via `make docker-images`. ```{note} Base images and versions change over time. Check the Dockerfiles in [etc/docker](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/docker) for the current base images used in each build. ``` The following sections describe the docker images used within Kubernetes and Docker Swarm environments. ## elyra/enterprise-gateway The primary image for Kubernetes and Docker Swarm support, [elyra/enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) contains the Enterprise Gateway server software and default kernel specifications. For Kubernetes it is deployed using the [helm chart](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/kubernetes/helm/enterprise-gateway). For Docker Swarm, deployment can be accomplished using [docker-componse.yml](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/docker/docker-compose.yml). We recommend that a persistent/mounted volume be used so that the kernel specifications can be accessed outside the container since we've found those to require post-deployment modifications from time to time. ## elyra/kernel-py Image [elyra/kernel-py](https://hub.docker.com/r/elyra/kernel-py/) contains the IPython kernel. It is currently built on the [jupyter/scipy-notebook](https://hub.docker.com/r/jupyter/scipy-notebook) image with additional support necessary for remote operation. ## elyra/kernel-spark-py Image [elyra/kernel-spark-py](https://hub.docker.com/r/elyra/kernel-spark-py/) is built on [elyra/kernel-py](https://hub.docker.com/r/elyra/kernel-py) and includes the Spark 2.4 distribution for use in Kubernetes clusters. Please note that the ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results. ## elyra/kernel-tf-py Image [elyra/kernel-tf-py](https://hub.docker.com/r/elyra/kernel-tf-py/) contains the IPython kernel. It is currently built on the [jupyter/tensorflow-notebook](https://hub.docker.com/r/jupyter/tensorflow-notebook) image with additional support necessary for remote operation. ## elyra/kernel-scala Image [elyra/kernel-scala](https://hub.docker.com/r/elyra/kernel-scala/) contains the Scala (Apache Toree) kernel and is built on [elyra/spark](https://hub.docker.com/r/elyra/spark) which is, itself, built using the scripts provided by the Spark 2.4 distribution for use in Kubernetes clusters. As a result, the ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results. Since Apache Toree is currently tied to Spark, creation of a _vanilla_ mode Scala kernel is not high on our current set of priorities. ## elyra/kernel-r Image [elyra/kernel-r](https://hub.docker.com/r/elyra/kernel-r/) contains the IRKernel and is currently built on the [jupyter/r-notebook](https://hub.docker.com/r/jupyter/r-notebook/) image. ## elyra/kernel-spark-r Image [elyra/kernel-spark-r](https://hub.docker.com/r/elyra/kernel-spark-r/) also contains the IRKernel but is built on [elyra/kernel-r](https://hub.docker.com/r/elyra/kernel-r) and includes the Spark 2.4 distribution for use in Kubernetes clusters. ## Ancillary Docker Images The project produces two docker images to make testing easier: `elyra/demo-base` and `elyra/enterprise-gateway-demo`. ### elyra/demo-base The [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/) image is considered the base image upon which [elyra/enterprise-gateway-demo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) is built. It consists of a Hadoop YARN installation that includes Spark, Java, miniconda, and various kernel installations. The primary use of this image is to quickly build elyra/enterprise-gateway images for testing and development purposes. To build a local image, run `make demo-base`. This image can be used to start a separate Hadoop YARN cluster that, when combined with another instance of elyra/enterprise-gateway can better demonstrate remote kernel functionality. ### elyra/enterprise-gateway-demo Built on [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/), [elyra/enterprise-gateway-demo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) also includes the various example kernel specifications contained in the repository. By default, this container will start with enterprise gateway running as a service user named `jovyan`. This user is enabled for `sudo` so that it can emulate other users where necessary. Other users included in this image are `elyra`, `bob` and `alice` (names commonly used in security-based examples). We plan on producing one image per release to the [enterprise-gateway-demo docker repo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/) where the image's tag reflects the corresponding release. To build a local image, run `make enterprise-gateway-demo`. Because this is a development build, the tag for this image will not reflect the value of the VERSION variable in the root `Makefile` but will be 'dev'. ================================================ FILE: docs/source/contributors/index.rst ================================================ Contributors Guide ================== These pages target people who are interested in contributing directly to the Jupyter Enterprise Gateway Project. .. admonition:: Use cases - *As a contributor, I want to learn more about kernel management within the Jupyter ecosystem.* - *As a contributor, I want to make Enterprise Gateway a more stable service for my organization and the community as a whole.* - *As a contributor, I'm interested in adding the ability for Enterprise Gateway to be highly available and fault tolerant.* .. note:: As a *contributor*, we encourage you to be familiar with all of the guides (Users, Developers, Operators) to best support Enterprise Gateway. This guide provides an overview of Enterprise Gateway along with instructions on how to get set up. .. toctree:: :maxdepth: 1 :name: contributors contrib system-architecture docker devinstall sequence-diagrams debug roadmap ================================================ FILE: docs/source/contributors/roadmap.md ================================================ # Project Roadmap We have plenty to do, now and in the future. Here's where we're headed: ## Completed in 3.x - Spark 3.0 support (including pod template files) - Spark Operator support via `SparkOperatorProcessProxy` - Custom Resource Definition support via `CustomResourceProcessProxy` - Session persistence (file-based and webhook-based) - `KERNEL_VOLUMES` and `KERNEL_VOLUME_MOUNTS` for Kubernetes and Spark Operator kernels - Authorizer class override support (`EG_AUTHORIZER_CLASS`) - SSTI prevention in `KERNEL_POD_NAME` template substitution - Python 3.9 and below dropped; Python 3.10+ required ## Planned for 4.0 - Kernel Provisioners - Provisioners will replace process proxies and enable Enterprise Gateway to remove its cap on `jupyter_client < 7` and `jupyter_server < 2`. - Parameterized Kernels - Enable the ability to prompt for parameters - These will likely be based on kernel provisioners ## Wish list - High Availability - Session persistence using a shared location (NoSQL DB) (file-based persistence has been implemented) - Active/active support - Multi-gateway support on client-side - Enables the ability for a single Jupyter Server to be configured against multiple Gateway servers simultaneously. This work will primarily be in Jupyter Server. - Pluggable load-balancers into `DistributedProcessProxy` (currently uses simple round-robin) - Support for other resource managers - Slurm? - Mesos? - User Environments - Improve the way user files are made available to remote kernels - Administration UI - Dashboard with running kernels - Lifecycle management - Time running, stop/kill, Profile Management, etc We'd love to hear any other use cases you might have and look forward to your contributions to Jupyter Enterprise Gateway! ================================================ FILE: docs/source/contributors/sequence-diagrams.md ================================================ # Sequence Diagrams The following consists of various sequence diagrams you might find helpful. We plan to add diagrams based on demand and contributions. ## Kernel launch: Jupyter Lab to Enterprise Gateway This diagram depicts the interactions between components when a kernel start request is submitted from Jupyter Lab running against [Jupyter Server configured to use Enterprise Gateway](../users/connecting-to-eg.md). The diagram also includes the retrieval of kernel specifications (kernelspecs) prior to the kernel's initialization. ```{mermaid} sequenceDiagram participant JupyterLab participant JupyterServer participant EnterpriseGateway participant ProcessProxy participant Kernel participant ResourceManager Note left of JupyterLab: fetch kernelspecs JupyterLab->>JupyterServer: https GET api/kernelspecs JupyterServer->>EnterpriseGateway: https GET api/kernelspecs EnterpriseGateway-->>JupyterServer: api/kernelspecs response JupyterServer-->>JupyterLab: api/kernelspecs response Note left of JupyterLab: kernel initialization JupyterLab->>JupyterServer: https POST api/sessions JupyterServer->>EnterpriseGateway: https POST api/kernels EnterpriseGateway->>ProcessProxy: launch_process() ProcessProxy->>Kernel: launch kernel ProcessProxy->>ResourceManager: confirm startup Kernel-->>ProcessProxy: connection info ResourceManager-->>ProcessProxy: state & host info ProcessProxy-->>EnterpriseGateway: complete connection info EnterpriseGateway->>Kernel: TCP socket requests Kernel-->>EnterpriseGateway: TCP socket handshakes EnterpriseGateway-->>JupyterServer: api/kernels response JupyterServer-->>JupyterLab: api/sessions response JupyterLab->>JupyterServer: ws GET api/kernels JupyterServer->>EnterpriseGateway: ws GET api/kernels EnterpriseGateway->>Kernel: kernel_info_request message Kernel-->>EnterpriseGateway: kernel_info_reply message EnterpriseGateway-->>JupyterServer: websocket upgrade response JupyterServer-->>JupyterLab: websocket upgrade response ``` ================================================ FILE: docs/source/contributors/system-architecture.md ================================================ # System Architecture Below are sections presenting details of the Enterprise Gateway internals and other related items. While we will attempt to maintain its consistency, the ultimate answers are in the code itself. ## Enterprise Gateway Process Proxy Extensions Enterprise Gateway is follow-on project to Jupyter Kernel Gateway with additional abilities to support remote kernel sessions on behalf of multiple users within resource-managed frameworks such as [Apache Hadoop YARN](https://apache.github.io/hadoop/hadoop-yarn/hadoop-yarn-site/YARN.html) or [Kubernetes](https://kubernetes.io/). Enterprise Gateway introduces these capabilities by extending the existing class hierarchies for `AsyncKernelManager` and `AsyncMultiKernelManager` classes, along with an additional abstraction known as a _process proxy_. ### Overview At its basic level, a running kernel consists of two components for its communication - a set of ports and a process. ### Kernel Ports The first component is a set of five zero-MQ ports used to convey the Jupyter protocol between the Notebook and the underlying kernel. In addition to the 5 ports, is an IP address, a key, and a signature scheme indicator used to interpret the key. These eight pieces of information are conveyed to the kernel via a json file, known as the connection file. Within the base framework, the IP address must be a local IP address meaning that the kernel cannot be remote from the library launching the kernel. The enforcement of this restriction is down in the `jupyter_client` module - two levels below Enterprise Gateway. This component is the core communication mechanism between the Notebook and the kernel. All aspects, including lifecycle management, can occur via this component. The kernel process (below) comes into play only when port-based communication becomes unreliable or additional information is required. ### Kernel Process When a kernel is launched, one of the fields of the kernel's associated kernel specification is used to identify a command to invoke. In today's implementation, this command information, along with other environment variables (also described in the kernel specification), is passed to `popen()` which returns a process class. This class supports four basic methods following its creation: 1. `poll()` to determine if the process is still running 1. `wait()` to block the caller until the process has terminated 1. `send_signal(signum)` to send a signal to the process 1. `kill()` to terminate the process As you can see, other forms of process communication can be achieved by abstracting the launch mechanism. ### Kernel Specifications The primary vehicle for indicating a given kernel should be handled in a different manner is the kernel specification, otherwise known as the _kernel spec_. Enterprise Gateway leverages the natively extensible `metadata` stanza within the kernel specification to introduce a new stanza named `process_proxy`. The `process_proxy` stanza identifies the class that provides the kernel's process abstraction (while allowing for future extensions). This class then provides the kernel's lifecycle management operations relative to the managed resource or functional equivalent. Here's an example of a kernel specification that uses the `DistributedProcessProxy` class for its abstraction: ```json { "language": "scala", "display_name": "Spark - Scala (YARN Client Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy" } }, "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "__TOREE_SPARK_OPTS__": "--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID}", "__TOREE_OPTS__": "", "LAUNCH_OPTS": "", "DEFAULT_INTERPRETER": "Scala" }, "argv": [ "/usr/local/share/jupyter/kernels/spark_scala_yarn_client/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ``` See the [Process Proxy](#process-proxy) section for more details on process proxies and those provided as part of the Enterprise Gateway release. ## Remote Mapping Kernel Manager `RemoteMappingKernelManager` is a subclass of Jupyter Server's [`AsyncMappingKernelManager`](https://github.com/jupyter-server/jupyter_server/blob/745f5ba3f00280c1e1900326a7e08463d48a3912/jupyter_server/services/kernels/kernelmanager.py#L633) and provides two functions. 1. It provides the vehicle for making the `RemoteKernelManager` class known and available. 1. It overrides `start_kernel` to look at the target kernel's kernel spec to see if it contains a remote process proxy class entry. If so, it records the name of the class in its member variable to be made available to the kernel start logic. ## Remote Kernel Manager `RemoteKernelManager` is a subclass of jupyter_client's [`AsyncIOLoopKernelManager` class](https://github.com/jupyter/jupyter_client/blob/10decd25308c306b6005cbf271b96493824a83e8/jupyter_client/ioloop/manager.py#L62) and provides the primary integration points for remote process proxy invocations. It implements a number of methods which allow Enterprise Gateway to circumvent functionality that might otherwise be prevented. As a result, some of these overrides may not be necessary if lower layers of the Jupyter framework were modified. For example, some methods are required because Jupyter makes assumptions that the kernel process is local. Its primary functionality, however, is to override the `_launch_kernel` method (which is the method closest to the process invocation) and instantiates the appropriate process proxy instance - which is then returned in place of the process instance used in today's implementation. Any interaction with the process then takes place via the process proxy. Both `RemoteMappingKernelManager` and `RemoteKernelManager` class definitions can be found in [remotemanager.py](https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/services/kernels/remotemanager.py) ## Process Proxy Process proxy classes derive from the abstract base class `BaseProcessProxyABC` - which defines the four basic process methods. There are two immediate subclasses of `BaseProcessProxyABC` - `LocalProcessProxy` and `RemoteProcessProxy`. `LocalProcessProxy` is essentially a pass-through to the current implementation. Kernel specifications that do not contain a `process_proxy` stanza will use `LocalProcessProxy`. `RemoteProcessProxy` is an abstract base class representing remote kernel processes. Currently, there are seven built-in subclasses of `RemoteProcessProxy` ... - `DistributedProcessProxy` - largely a proof of concept class, `DistributedProcessProxy` is responsible for the launch and management of kernels distributed across an explicitly defined set of hosts using ssh. Hosts are determined via a round-robin algorithm (that we should make pluggable someday). - `YarnClusterProcessProxy` - is responsible for the discovery and management of kernels hosted as Hadoop YARN applications within a managed cluster. - `KubernetesProcessProxy` - is responsible for the discovery and management of kernels hosted within a Kubernetes cluster. - `DockerSwarmProcessProxy` - is responsible for the discovery and management of kernels hosted within a Docker Swarm cluster. - `DockerProcessProxy` - is responsible for the discovery and management of kernels hosted within Docker configuration. Note: because these kernels will always run local to the corresponding Enterprise Gateway instance, these process proxies are of limited use. - `ConductorClusterProcessProxy` - is responsible for the discovery and management of kernels hosted within an IBM Spectrum Conductor cluster. - `SparkOperatorProcessProxy` - is responsible for the discovery and management of kernels hosted within a Kubernetes cluster but created as a `SparkApplication` instead of a Pod. The `SparkApplication` is a Kubernetes custom resource defined inside the project [spark-on-k8s-operator](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator), which makes all kinds of spark on k8s components better organized and easy to configure. ```{note} Before you run a kernel associated with `SparkOperatorProcessProxy`, ensure that the [Kubernetes Operator for Apache Spark is installed](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator#installation) in your Kubernetes cluster. ``` You might notice that the last six process proxies do not necessarily control the _launch_ of the kernel. This is because the native jupyter framework is utilized such that the script that is invoked by the framework is what launches the kernel against that particular resource manager. As a result, the _startup time_ actions of these process proxies is more about discovering where the kernel _landed_ within the cluster in order to establish a mechanism for determining lifetime. _Discovery_ typically consists of using the resource manager's API to locate the kernel whose name includes its kernel ID in some fashion. On the other hand, the `DistributedProcessProxy` essentially wraps the kernel specification's argument vector (i.e., invocation string) in a remote shell since the host is determined by Enterprise Gateway, eliminating the discovery step from its implementation. These class definitions can be found in the [processproxies package](https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/services/processproxies). However, Enterprise Gateway is architected such that additional process proxy implementations can be provided and are not required to be located within the Enterprise Gateway hierarchy - i.e., we embrace a _bring your own process proxy_ model. ![Process Class Hierarchy](../images/process_proxy_hierarchy.png) The complete process proxy class hierarchy is: ```text BaseProcessProxyABC ├── LocalProcessProxy └── RemoteProcessProxy ├── DistributedProcessProxy ├── YarnClusterProcessProxy ├── ConductorClusterProcessProxy └── ContainerProcessProxy ├── DockerSwarmProcessProxy ├── DockerProcessProxy └── KubernetesProcessProxy └── CustomResourceProcessProxy └── SparkOperatorProcessProxy ``` The process proxy constructor looks as follows: ```python def __init__(self, kernel_manager, proxy_config): ``` where - `kernel_manager` is an instance of a `RemoteKernelManager` class. - `proxy_config` is a dictionary of configuration values present in the `kernel.json` file. These values can be used to override or amend various global configuration values on a per-kernel basis. See [Process Proxy Configuration](#process-proxy-configuration) for more information. ```python @abstractmethod def launch_process(self, kernel_cmd, *kw): ``` where - `kernel_cmd` is a list (argument vector) that should be invoked to launch the kernel. This parameter is an artifact of the kernel manager `_launch_kernel()` method. - `**kw` is a set keyword arguments which includes an `env` dictionary element consisting of the names and values of which environment variables to set at launch time. The `launch_process()` method is the primary method exposed on the Process Proxy classes. It's responsible for performing the appropriate actions relative to the target type. The process must be in a running state prior to returning from this method - otherwise attempts to use the connections will not be successful since the (remote) kernel needs to have created the sockets. All process proxy subclasses should ensure `BaseProcessProxyABC.launch_process()` is called - which will automatically place a variable named `KERNEL_ID` (consisting of the kernel's unique ID) into the corresponding kernel's environment variable list since `KERNEL_ID` is a primary mechanism for associating remote applications to a specific kernel instance. ```python def poll(self): ``` The `poll()` method is used by the Jupyter framework to determine if the process is still alive. By default, the framework's heartbeat mechanism calls `poll()` every 3 seconds. This method returns `None` if the process is still running, `False` otherwise (per the `popen()` contract). ```python def wait(self): ``` The `wait()` method is used by the Jupyter framework when terminating a kernel. Its purpose is to block return to the caller until the process has terminated. Since this could be awhile, it's best to return control in a reasonable amount of time since the kernel instance is destroyed anyway. This method does not return a value. ```python def send_signal(self, signum): ``` The `send_signal()` method is used by the Jupyter framework to send a signal to the process. Currently, `SIGINT (2)` (to interrupt the kernel) is the signal sent. It should be noted that for normal processes - both local and remote - `poll()` and `kill()` functionality can be implemented via `send_signal` with `signum` values of `0` and `9`, respectively. This method returns `None` if the process is still running, `False` otherwise. ```python def kill(self): ``` The `kill()` method is used by the Jupyter framework to terminate the kernel process. This method is only necessary when the request to shutdown the kernel - sent via the control port of the zero-MQ ports - does not respond in an appropriate amount of time. This method returns `None` if the process is killed successfully, `False` otherwise. ### RemoteProcessProxy As noted above, `RemoteProcessProxy` is an abstract base class that derives from `BaseProcessProxyABC`. Subclasses of `RemoteProcessProxy` must implement two methods - `confirm_remote_startup()` and `handle_timeout()`: ```python @abstractmethod def confirm_remote_startup(self, kernel_cmd, **kw): ``` where - `kernel_cmd` is a list (argument vector) that should be invoked to launch the kernel. This parameter is an artifact of the kernel manager `_launch_kernel()` method. - `**kw` is a set key-word arguments. `confirm_remote_startup()` is responsible for detecting that the remote kernel has been appropriately launched and is ready to receive requests. This can include gathering application status from the remote resource manager but is really a function of having received the connection information from the remote kernel launcher. (See [Kernel Launchers](#kernel-launchers)) ```python @abstractmethod def handle_timeout(self): ``` `handle_timeout()` is responsible for detecting that the remote kernel has failed to startup in an acceptable time. It should be called from `confirm_remote_startup()`. If the timeout expires, `handle_timeout()` should throw HTTP Error 500 (`Internal Server Error`). Kernel launch timeout expiration is expressed via the environment variable `KERNEL_LAUNCH_TIMEOUT`. If this value does not exist, it defaults to the Enterprise Gateway process environment variable `EG_KERNEL_LAUNCH_TIMEOUT` - which defaults to 30 seconds if unspecified. Since all `KERNEL_` environment variables "flow" from the Notebook server, the launch timeout can be specified as a client attribute of the Notebook session. #### YarnClusterProcessProxy As part of its base offering, Enterprise Gateway provides an implementation of a process proxy that communicates with the Hadoop YARN resource manager that has been instructed to launch a kernel on one of its worker nodes. The node on which the kernel is launched is up to the resource manager - which enables an optimized distribution of kernel resources. Derived from `RemoteProcessProxy`, `YarnClusterProcessProxy` uses the `yarn-api-client` library to locate the kernel and monitor its lifecycle. However, once the kernel has returned its connection information, the primary kernel operations naturally take place over the ZeroMQ ports. This process proxy is reliant on the `--EnterpriseGatewayApp.yarn_endpoint` command line option or the `EG_YARN_ENDPOINT` environment variable to determine where the YARN resource manager is located. To accommodate increased flexibility, the endpoint definition can be defined within the process proxy stanza of the kernel specification, enabling the ability to direct specific kernels to different YARN clusters. In cases where the YARN cluster is configured for high availability, then the `--EnterpriseGatewayApp.alt_yarn_endpoint` command line option or the `EG_ALT_YARN_ENDPOINT` environment variable should also be defined. When set, the underlying `yarn-api-client` library will choose the active Resource Manager between the two. ```{note} If Enterprise Gateway is running on an edge node of the cluster and has a valid `yarn-site.xml` file in HADOOP_CONF_DIR, neither of these values are required (default = None). In such cases, the `yarn-api-client` library will choose the active Resource Manager from the configuration files. ``` ```{seealso} [Hadoop YARN deployments](../operators/deploy-yarn-cluster.md) in the Operators Guide for details. ``` #### DistributedProcessProxy Like `YarnClusterProcessProxy`, Enterprise Gateway also provides an implementation of a basic round-robin remoting mechanism that is part of the `DistributedProcessProxy` class. This class uses the `--EnterpriseGatewayApp.remote_hosts` command line option (or `EG_REMOTE_HOSTS` environment variable) to determine on which hosts a given kernel should be launched. It uses a basic round-robin algorithm to index into the list of remote hosts for selecting the target host. It then uses ssh to launch the kernel on the target host. As a result, all kernel specification files must reside on the remote hosts in the same directory structure as on the Enterprise Gateway server. It should be noted that kernels launched with this process proxy run in YARN _client_ mode - so their resources (within the kernel process itself) are not managed by the Hadoop YARN resource manager. Like the yarn endpoint parameter the `remote_hosts` parameter can be specified within the process proxy configuration to override the global value - enabling finer-grained kernel distributions. ```{seealso} [Distributed deployments](../operators/deploy-distributed.md) in the Operators Guide for details. ``` #### KubernetesProcessProxy With the popularity of Kubernetes within the enterprise, Enterprise Gateway provides an implementation of a process proxy that communicates with the Kubernetes resource manager via the Kubernetes API. Unlike the other offerings, in the case of Kubernetes, Enterprise Gateway is itself deployed within the Kubernetes cluster as a _Service_ and _Deployment_. The primary vehicle by which this is accomplished is via [Helm](https://helm.sh/) and Enterprise Gateway provides a set of [helm chart](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/kubernetes/helm/enterprise-gateway) files to simplify deployment. ```{seealso} [Kubernetes deployments](../operators/deploy-kubernetes.md) in the Operators Guide for details. ``` #### DockerSwarmProcessProxy Enterprise Gateway provides an implementation of a process proxy that communicates with the Docker Swarm resource manager via the Docker API. When used, the kernels are launched as swarm services and can reside anywhere in the managed cluster. To leverage kernels configured in this manner, Enterprise Gateway can be deployed either as a Docker Swarm _service_ or a traditional Docker container. A similar `DockerProcessProxy` implementation has also been provided. When used, the corresponding kernel will be launched as a traditional docker container that runs local to the launching Enterprise Gateway instance. As a result, its use has limited value. ```{seealso} [Docker and Docker Swarm deployments](../operators/deploy-docker.md) in the Operators Guide for details. ``` #### ConductorClusterProcessProxy Enterprise Gateway also provides an implementation of a process proxy that communicates with an IBM Spectrum Conductor resource manager that has been instructed to launch a kernel on one of its worker nodes. The node on which the kernel is launched is up to the resource manager - which enables an optimized distribution of kernel resources. Derived from `RemoteProcessProxy`, `ConductorClusterProcessProxy` uses Conductor's REST-ful API to locate the kernel and monitor its life-cycle. However, once the kernel has returned its connection information, the primary kernel operations naturally take place over the ZeroMQ ports. This process proxy is reliant on the `--EnterpriseGatewayApp.conductor_endpoint` command line option or the `EG_CONDUCTOR_ENDPOINT` environment variable to determine where the Conductor resource manager is located. ```{seealso} [IBM Spectrum Conductor deployments](../operators/deploy-conductor.md) in the Operators Guide for details. ``` #### CustomResourceProcessProxy Enterprise Gateway also provides a implementation of a process proxy derived from `KubernetesProcessProxy` called `CustomResourceProcessProxy`. Instead of creating kernels based on a Kubernetes pod, `CustomResourceProcessProxy` manages kernels via a custom resource definition (CRD). For example, `SparkApplication` is a CRD that includes many components of a Spark-on-Kubernetes application. If you are going to extend `CustomResourceProcessProxy`, just follow steps below: - override custom resource related variables(i.e. `group`, `version` and `plural` and `get_container_status` method, wrt [launch_kubernetes.py](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py). - define a jinja template like [kernel-pod.yaml.j2](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2). As a generic design, the template file should be named as {crd_group}-{crd_version} so that you can reuse [launch_kubernetes.py](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py) in the kernelspec. - define a kernel specification like [spark_python_operator/kernel.json](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_operator/kernel.json). ### Process Proxy Configuration Each `kernel.json`'s `process-proxy` stanza can specify an optional `config` stanza that is converted into a dictionary of name/value pairs and passed as an argument to each process-proxy constructor relative to the class identified by the `class_name` entry. How each dictionary entry is interpreted is completely a function of the constructor relative to that process-proxy class or its superclass. For example, an alternate list of remote hosts has meaning to the `DistributedProcessProxy` but not to its superclasses. As a result, the superclass constructors will not attempt to interpret that value. In addition, certain dictionary entries can override or amend system-level configuration values set on the command-line, thereby allowing administrators to tune behaviors down to the kernel level. For example, an administrator might want to constrain Python kernels configured to use specific resources to an entirely different set of hosts (and ports) that other remote kernels might be targeting in order to isolate valuable resources. Similarly, an administrator might want to only authorize specific users to a given kernel. In such situations, one might find the following `process-proxy` stanza: ```json { "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy", "config": { "remote_hosts": "priv_host1,priv_host2", "port_range": "40000..41000", "authorized_users": "bob,alice" } } } } ``` In this example, the kernel associated with this `kernel.json` file is relegated to the hosts `priv_host1` and `priv_host2` where kernel ports will be restricted to a range between `40000` and `41000` and only users `bob` and `alice` can launch such kernels (provided neither appear in the global set of `unauthorized_users` since denial takes precedence). For a current enumeration of which system-level configuration values can be overridden or amended on a per-kernel basis see [Per-kernel overrides](../operators/config-kernel-override.md). ## Kernel Launchers As noted above, a kernel is considered started once the `launch_process()` method has conveyed its connection information back to the Enterprise Gateway server process. Conveyance of connection information from a remote kernel is the responsibility of the remote kernel _launcher_. Kernel launchers provide a means of normalizing behaviors across kernels while avoiding kernel modifications. Besides providing a location where connection file creation can occur, they also provide a 'hook' for other kinds of behaviors - like establishing virtual environments or sandboxes, providing collaboration behavior, adhering to port range restrictions, etc. There are four primary tasks of a kernel launcher: 1. Creation of the connection file and ZMQ ports on the remote (target) system along with a _gateway listener_ socket 1. Conveyance of the connection (and listener socket) information back to the Enterprise Gateway process 1. Invocation of the target kernel 1. Listen for interrupt and shutdown requests from Enterprise Gateway and carry out the action when appropriate Kernel launchers are minimally invoked with three parameters (all of which are conveyed by the `argv` stanza of the corresponding `kernel.json` file) - the kernel's ID as created by the server and conveyed via the placeholder `{kernel_id}`, a response address consisting of the Enterprise Gateway server IP and port on which to return the connection information similarly represented by the placeholder `{response_address}`, and a public-key used by the launcher to encrypt an AES key that encrypts the kernel's connection information back to the server and represented by the placeholder `{public_key}`. The kernel's ID is identified by the parameter `--RemoteProcessProxy.kernel-id`. Its value (`{kernel_id}`) is essentially used to build a connection file to pass to the to-be-launched kernel, along with any other things - like log files, etc. The response address is identified by the parameter `--RemoteProcessProxy.response-address`. Its value (`{response_address}`) consists of a string of the form `` where the IPV4 address points back to the Enterprise Gateway server - which is listening for a response on the provided port. The port's default value is `8877`, but can be specified via the environment variable `EG_RESPONSE_PORT`. The public key is identified by the parameter `--RemoteProcessProxy.public-key`. Its value (`{public_key}`) is used to encrypt an AES key created by the launcher to encrypt the kernel's connection information. The server, upon receipt of the response, uses the corresponding private key to decrypt the AES key, which it then uses to decrypt the connection information. Both the public and private keys are ephemeral; created upon Enterprise Gateway's startup. They can be ephemeral because they are only needed during a kernel's startup and never again. Here's a [kernel.json](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_cluster/kernel.json) file illustrating these parameters... ```json { "language": "python", "display_name": "Spark - Python (YARN Cluster Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy" } }, "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "SPARK_OPTS": "--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false", "LAUNCH_OPTS": "" }, "argv": [ "/usr/local/share/jupyter/kernels/spark_python_yarn_cluster/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ``` Other options supported by launchers include: - `--RemoteProcessProxy.port-range {port_range}` - passes configured port-range to launcher where launcher applies that range to kernel ports. The port-range may be configured globally or on a per-kernel specification basis, as previously described. - `--RemoteProcessProxy.spark-context-initialization-mode [lazy|eager|none]` - indicates the _timeframe_ in which the spark context will be created. - `lazy` (default) attempts to defer initialization as late as possible - although this can vary depending on the underlying kernel and launcher implementation. - `eager` attempts to create the spark context as soon as possible. - `none` skips spark context creation altogether. Note that some launchers may not be able to support all modes. For example, the scala launcher uses the Apache Toree kernel - which currently assumes a spark context will exist. As a result, a mode of `none` doesn't apply. Similarly, the `lazy` and `eager` modes in the Python launcher are essentially the same, with the spark context creation occurring immediately, but in the background thereby minimizing the kernel's startup time. Kernel.json files also include a `LAUNCH_OPTS:` section in the `env` stanza to allow for custom parameters to be conveyed in the launcher's environment. `LAUNCH_OPTS` are then referenced in the [run.sh](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_cluster/bin/run.sh) script as the initial arguments to the launcher (see [launch_ipykernel.py](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/python/scripts/launch_ipykernel.py)) ... ```bash eval exec \ "${SPARK_HOME}/bin/spark-submit" \ "${SPARK_OPTS}" \ "${PROG_HOME}/scripts/launch_ipykernel.py" \ "${LAUNCH_OPTS}" \ "$@" ``` ## Extending Enterprise Gateway Theoretically speaking, enabling a kernel for use in other frameworks amounts to the following: 1. Build a kernel specification file that identifies the process proxy class to be used. 1. Implement the process proxy class such that it supports the four primitive functions of `poll()`, `wait()`, `send_signal(signum)` and `kill()` along with `launch_process()`. 1. If the process proxy corresponds to a remote process, derive the process proxy class from `RemoteProcessProxy` and implement `confirm_remote_startup()` and `handle_timeout()`. 1. Insert invocation of a launcher (if necessary) which builds the connection file and returns its contents on the `{response_address}` socket and following the encryption protocol set forth in the other launchers. ```{seealso} This topic is covered in the [Developers Guide](../developers/index.rst). ``` ================================================ FILE: docs/source/developers/custom-images.md ================================================ # Custom Kernel Images This section presents information needed for how a custom kernel image could be built for your own uses with Enterprise Gateway. This is typically necessary if one desires to extend the existing image with additional supporting libraries or an image that encapsulates a different set of functionality altogether. ## Extending Existing Kernel Images A common form of customization occurs when the existing kernel image is serving the fundamentals but the user wishes it be extended with additional libraries to prevent the need of their imports within the Notebook interactions. Since the image already meets the [basic requirements](#requirements-for-custom-kernel-images), this is really just a matter of referencing the existing image in the `FROM` statement and installing additional libraries. Because the EG kernel images do not run as the `root` user, you may need to switch users to perform the update. ```dockerfile FROM elyra/kernel-py:VERSION USER root # switch to root user to perform installation (if necessary) RUN pip install my-libraries USER $NB_UID # switch back to the jovyan user ``` ## Bringing Your Own Kernel Image Users that do not wish to extend an existing kernel image must be cognizant of a couple of things. 1. Requirements of a kernel-based image to be used by Enterprise Gateway. 1. Is the base image one from [Jupyter Docker-stacks](https://github.com/jupyter/docker-stacks)? ### Requirements for Custom Kernel Images Custom kernel images require some support files from the Enterprise Gateway repository. These are packaged into a tar file for each release starting in `2.5.0`. This tar file (named `jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz`) is composed of a few files - one bootstrap script and a kernel launcher (one per kernel type). #### Bootstrap-kernel.sh Enterprise Gateway provides a single [bootstrap-kernel.sh](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/bootstrap/bootstrap-kernel.sh) script that handles the three kernel languages supported out of the box - Python, R, and Scala. When a kernel image is started by Enterprise Gateway, parameters used within the bootstrap-kernel.sh script are conveyed via environment variables. The bootstrap script is then responsible for validating and converting those parameters to meaningful arguments to the appropriate launcher. #### Kernel Launcher The kernel launcher, as discussed [here](kernel-launcher.md) does a number of things. In particular, it creates the connection ports and conveys that connection information back to Enterprise Gateway via the socket identified by the response address parameter. Although not a requirement for container-based usage, it is recommended that the launcher be written in the same language as the kernel. (This is more of a requirement when used in applications like Hadoop YARN.) ### About Jupyter Docker-stacks Images Most of what is presented assumes the base image for your custom image is derived from the [Jupyter Docker-stacks](https://github.com/jupyter/docker-stacks) repository. As a result, it's good to cover what makes up those assumptions so you can build your own image independently of the docker-stacks repository. All images produced from the docker-stacks repository come with a certain user configured. This user is named `jovyan` and is mapped to a user id (UID) of `1000` and a group id (GID) of `100` - named `users`. The various startup scripts and commands typically reside in `/usr/local/bin` and we recommend trying to adhere to that policy. The base jupyter image, upon which most all images from docker-stacks are built, also contains a `fix-permissions` script that is responsible for _gracefully_ adjusting permissions based on its given parameters. By only changing the necessary permissions, use of this script minimizes the size of the docker layer in which that command is invoked during the build of the docker image. ### Sample Dockerfiles for Custom Kernel Images Below we provide two working Dockerfiles that produce custom kernel images. One based on an existing image from Jupyter docker-stacks, the other from an independent base image. #### Custom Kernel Image Built on Jupyter Image Here's an example Dockerfile that installs the minimally necessary items for a Python-based kernel image built on the docker-stack image `jupyter/scipy-notebook`. Note: the string `VERSION` must be replaced with the appropriate value. ```dockerfile # Choose a base image. Preferrably one from https://github.com/jupyter/docker-stacks FROM jupyter/scipy-notebook:61d8aaedaeaf # Switch user to root since, if from docker-stacks, its probably jovyan USER root # Install any packages required for the kernel-wrapper. If the image # does not contain the target kernel (i.e., IPython, IRkernel, etc., # it should be installed as well. RUN pip install pycrypto # Download and extract the enterprise gateway kernel launchers and bootstrap # files and deploy to /usr/local/bin. Change permissions to NB_UID:NB_GID. RUN wget https://github.com/jupyter-server/enterprise_gateway/releases/download/vVERSION/jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz &&\ tar -xvf jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz -C /usr/local/bin &&\ rm -f jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz &&\ fix-permissions /usr/local/bin # Switch user back to jovyan and setup language and default CMD USER $NB_UID ENV KERNEL_LANGUAGE python CMD /usr/local/bin/bootstrap-kernel.sh ``` #### Independent Custom Kernel Image If your base image is not from docker-stacks, it is recommended that you NOT run the image as USER `root` and create an _image user_ that is not UID 0. For this example, we will create the `jovyan` user with UID `1000` and a primary group of `users`, GID `100`. Note that Enterprise Gateway makes no assumption relative to the user in which the kernel image is running. Aside from configuring the image user, all other aspects of customization are the same. In this case, we'll use the tensorflow-gpu image and convert it to be usable via Enterprise Gateway as a custom kernel image. Note that because this image didn't have `wget` we used `curl` to download the supporting kernel-image files. ```dockerfile FROM tensorflow/tensorflow:2.5.0-gpu-jupyter USER root # Install OS dependencies required for the kernel-wrapper. Missing # packages can be installed later only if container is running as # privileged user. RUN apt-get update && apt-get install -yq --no-install-recommends \ build-essential \ libsm6 \ libxext-dev \ libxrender1 \ netcat \ python3-dev \ tzdata \ unzip \ && rm -rf /var/lib/apt/lists/* # Install any packages required for the kernel-wrapper. If the image # does not contain the target kernel (i.e., IPython, IRkernel, etc., # it should be installed as well. RUN pip install pycrypto # Download and extract the enterprise gateway kernel launchers and bootstrap # files and deploy to /usr/local/bin. Change permissions to NB_UID:NB_GID. RUN curl -L https://github.com/jupyter-server/enterprise_gateway/releases/download/vVERSION/jupyter_enterprise_gateway_kernel_image_files-VERSION.tar.gz | \ tar -xz -C /usr/local/bin RUN adduser --system --uid 1000 --gid 100 jovyan && \ chown jovyan:users /usr/local/bin/bootstrap-kernel.sh && \ chmod 0755 /usr/local/bin/bootstrap-kernel.sh && \ chown -R jovyan:users /usr/local/bin/kernel-launchers ENV NB_UID 1000 ENV NB_GID 100 USER jovyan ENV KERNEL_LANGUAGE python CMD /usr/local/bin/bootstrap-kernel.sh ``` ## Deploying Your Custom Kernel Image The final step in deploying a customer kernel image is creating a corresponding kernel specifications directory that is available to Enterprise Gateway. Since Enterprise Gateway is also running in a container, its import that its kernel specifications directory either be mounted externally or a new Enterprise Gateway image is created with the appropriate directory in place. For the purposes of this discussion, we'll assume the kernel specifications directory, `/usr/local/share/jupyter/kernels`, is externally mounted. - Find a similar kernel specification directory from which to create your custom kernel specification. The most important aspect to this is matching the language of your kernel since it will use the same [kernel launcher](#kernel-launcher). Another important question is whether your custom kernel uses Spark, because those kernel specifications will vary significantly since many of the spark options reside in the `kernel.json`'s `env` stanza. Since our examples use _vanilla_ (non-Spark) python kernels we'll use the `python_kubernetes` kernel specification as our basis. ```bash cd /usr/local/share/jupyter/kernels cp -r python_kubernetes python_myCustomKernel ``` - Edit the `kernel.json` file and change the `display_name:`, `image_name:` and path to `launch_kubernetes.py` script. ```json { "language": "python", "display_name": "My Custom Kernel", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy", "config": { "image_name": "myDockerHub/myCustomKernelImage:myTag" } } }, "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_myCustomKernel/scripts/launch_kubernetes.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ``` - If using kernel filtering (`EG_ALLOWED_KERNELS`), be sure to update it with the new kernel specification directory name (e.g., `python_myCustomKernel`) and restart/redeploy Enterprise Gateway. - Launch or refresh your Notebook session and confirm `My Custom Kernel` appears in the _new kernel_ drop-down. - Create a new notebook using `My Custom Kernel`. ================================================ FILE: docs/source/developers/dev-process-proxy.md ================================================ # Implementing a process proxy A process proxy implementation is necessary if you want to interact with a resource manager that is not currently supported or extend some existing behaviors. For example, recently, we've had [contributions](https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/services/processproxies/crd.py#L18) that interact with [Kubernetes Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions), which is an example of _extending_ the `KubernetesProcessProxy` to accomplish a slightly different task. Examples of resource managers in which there's been some interest include [Slurm Workload Manager](https://slurm.schedmd.com/documentation.html) and [Apache Mesos](https://mesos.apache.org/), for example. In the end, it's really a matter of having access to an API and the ability to apply "tags" or "labels" in order to _discover_ where the kernel is running within the managed cluster. Once you have that information, then it becomes of matter of implementing the appropriate methods to control the kernel's lifecycle. ```{admonition} Important! :class: error Before continuing, it is important to consider timeframes here. You may instead want to implement a [_Kernel Provisioner_](https://jupyter-client.readthedocs.io/en/latest/provisioning.html) rather an a Process Proxy since _provisioners_ are available to the general framework! The [Enterprise Gateway 4.0 release is slated to adopt Kernel Provisioners](../contributors/roadmap.md) but must remain on a down-level `jupyter_client` release (< 7.x) until that time as Enterprise Gateway (and process proxies) are currently incompatible. That said, if you and your organization plan to stay on Enterprise Gateway 2.x or 3.x for the next couple years, then implementing a process proxy may be in your best interest. Fortunately, the two constructs are nearly identical since Kernel Provisioners are essentially Process Proxies _properly_ integrated into the Jupyter framework thereby eliminating the need for various `KernelManager` hooks. ``` ## General approach Please refer to the [Process Proxy section](../contributors/system-architecture.md#process-proxy) in the System Architecture pages for descriptions and structure of existing process proxies. Here is the general guideline for the process of implementing a process proxy. 1. Identify and understand how to _decorate_ your "job" within the resource manager. In Hadoop YARN, this is done by using the kernel's ID as the _application name_ by setting the [`--name` parameter to `${KERNEL_ID}`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_cluster/kernel.json). In Kubernetes, we apply the kernel's ID to the [`kernel-id` label on the POD](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2). 1. Today, all invocations of kernels into resource managers use a shell or python script mechanism configured into the `argv` stanza of the kernelspec. If you take this approach, you need to apply the necessary changes to integrate with your resource manager. 1. Determine how to interact with the resource manager's API to _discover_ the kernel and determine on which host it's running. This interaction should occur immediately following Enterprise Gateway's receipt of the kernel's connection information in its response from the kernel launcher. This extra step, performed within `confirm_remote_startup()`, is necessary to get the appropriate host name as reflected in the resource manager's API. 1. Determine how to monitor the "job" using the resource manager API. This will become part of the `poll()` implementation to determine if the kernel is still running. This should be as quick as possible since it occurs every 3 seconds. If this is an expensive call, you may need to make some adjustments like skip the call every so often. 1. Determine how to terminate "jobs" using the resource manager API. This will become part of the termination sequence, but probably only necessary if the message-based shutdown does not work (i.e., a last resort). ```{tip} Because kernel IDs are globally unique, they serve as ideal identifiers for discovering where in the cluster the kernel is running. ``` You will likely need to provide implementations for `launch_process()`, `poll()`, `wait()`, `send_signal()`, and `kill()`, although, depending on where your process proxy resides in the class hierarchy, some implementations may be reused. For example, if your process proxy is going to service remote kernels, you should consider deriving your implementation from the [`RemoteProcessProxy` class](https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/services/processproxies/processproxy.py#L1070). If this is the case, then you'll need to implement `confirm_remote_startup()`. Likewise, if your process proxy is based on containers, you should consider deriving your implementation from the [`ContainerProcessProxy`](https://github.com/jupyter-server/enterprise_gateway/blob/main/enterprise_gateway/services/processproxies/container.py#L39). If this is the case, then you'll need to implement `get_container_status()` and `terminate_container_resources()` rather than `confirm_remote_startup()`, etc. Once the process proxy has been implemented, construct an appropriate kernel specification that references your process proxy and iterate until you are satisfied with how your remote kernels behave. ================================================ FILE: docs/source/developers/index.rst ================================================ Developers Guide ================ These pages target *developers* writing applications against the REST API, authoring process proxies for other resource managers, or integrating applications with remote kernel functionality. .. admonition:: Use cases - *As a developer, I want to explore supporting a different resource manager with Enterprise Gateway, by implementing a new `ProcessProxy` class such that I can easily take advantage of specific functionality provided by the resource manager.* - *As a developer, I want to extend the `nbclient` application to use a `KernelManager` that can leverage remote kernels spawned from Enterprise Gateway.* - *As a developer, I want to easily integrate the ability to launch remote kernels with existing platforms, so I can leverage my compute cluster in a customizable way.* - *As a developer, I am currently using Golang and need to implement a kernel launcher to allow the Go kernel I use to run remotely in my Kubernetes cluster.* - *As a developer, I'd like to extend some of the kernel container images and, eventually, create my own to better enable the data scientists I support.* - *As a developer, I need want to author my own Kernel-as-a-Service application.* .. toctree:: :maxdepth: 1 :name: developers dev-process-proxy kernel-launcher kernel-specification custom-images kernel-library kernel-manager rest-api ================================================ FILE: docs/source/developers/kernel-launcher.md ================================================ # Implementing a kernel launcher A new implementation for a [_kernel launcher_](../contributors/system-architecture.md#kernel-launchers) becomes necessary when you want to introduce another kind of kernel to an existing configuration. Out of the box, Enterprise Gateway provides [kernel launchers](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/kernel-launchers) that support the IPython kernel, the Apache Toree scala kernel, and the R kernel - IRKernel. There are other "language-agnostic kernel launchers" provided by Enterprise Gateway, but those are used in container environments to start the container or pod where the "kernel image" uses on the three _language-based_ launchers to start the kernel within the container. Its generally recommended that the launcher be written in the language of the kernel, but that is not a requirement so long as the launcher can start and manage the kernel's lifecycle and issue interrupts (if the kernel does not support message-based interrupts itself). To reiterate, the four tasks of a kernel launcher are: 1. Create the necessary connection information based on the 5 zero-mq ports, a signature key and algorithm specifier, along with a _gateway listener_ socket. 1. Conveyance of the connection (and listener socket) information back to the Enterprise Gateway process after encrypting the information using AES, then encrypting the AES key using the provided public key. 1. Invocation of the target kernel. 1. Listen for interrupt and shutdown requests from Enterprise Gateway on the communication socket and carry out the action when appropriate. ## Creating the connection information If your target kernel exists, then there is probably support for creating ZeroMQ ports. If this proves difficult, you may be able to take a _hybrid approach_ where the connection information, encryption and listener portion of things is implemented in Python, while invocation takes place in the native language. This is how the [R kernel-launcher](https://github.com/jupyter-server/enterprise_gateway/tree/main/etc/kernel-launchers/R/scripts) support is implemented. When creating the connection information, your kernel launcher should handle the possibility that the `--port-range` option has been specified such that each port should reside within the specified range. The port used between Enterprise Gateway and the launcher, known as the _communication port_ should also adhere to the port range. It is not required that this port be ZeroMQ (and is not a ZMQ port in existing implementations). ## Encrypting the connection information The next task of the kernel launcher is sending the connection information back to the Enterprise Gateway server. Prior to doing this, the connection information, including the communication port, are encrypted using AES encryption and a 16-byte key. The AES key is then encrypted using the public key specified in the `public_key` parameter. These two fields (the AES-encrypted payload and the publice-key-encrypted AES key) are then included into a JSON structure that also include the launcher's version information and base64 encoded. Here's such an example from the [Python kernel launcher](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/python/scripts/launch_ipykernel.py#L207). The payload is then [sent back on a socket](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/python/scripts/launch_ipykernel.py#L235) identified by the `--response-address` option. ## Invoking the target kernel For the R kernel launcher, the kernel is started using [`IRKernel::main()`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/R/scripts/launch_IRkernel.R#L256) after the `SparkContext` is initialized based on the `spark-context-initialization-mode` parameter. The scala kernel launcher works similarly in that the Apache Toree kernel provides an ["entrypoint" to start the kernel](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/scala/toree-launcher/src/main/scala/launcher/ToreeLauncher.scala#L315), however, because the Toree kernel initializes a `SparkContext` itself, the need to do so is conveyed directly to the kernel. For the Python kernel launcher, it creates a namespace instance that contains the `SparkContext` information, if requested to do so via the `spark-context-initialization-mode` parameter, instantiates an `IPKernelApp` instance using the configured namespace, then calls the [`start()`](https://github.com/ipython/ipykernel/blob/6f448d280dadbff7245f4b28b5e210c899d79342/ipykernel/kernelapp.py#L694) method. ### Invoking subclasses of `ipykernel.kernelbase.Kernel` Because the python kernel launcher uses `IPKernelApp`, support for any subclass of `ipykernel.kernelbase.Kernel` can be launched by EG's Python kernel launcher. To specify an alternate subclass, add `--kernel-class-name` (along with the specified dotted class string) to the `kernel.json` file's `argv` stanza. EG's Python launcher will import that class and pass it as a parameter to `IPKernelApp.initialize()`. Here's an example `kernel.json` file that launches the "echo" kernel using the `DistributedProcessProxy`: ```JSON { "display_name": "Echo", "language": "text", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy" } }, "argv": [ "python", "/usr/local/share/jupyter/kernels/echo/scripts/launch_ipykernel.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.spark-context-initialization-mode", "none", "--kernel-class-name", "echo_kernel.kernel.EchoKernel" ] } ``` ```{admonition} Important! The referenced `kernel-class-name` package must first be properly installed on all nodes where the associated process-proxy will run. ``` ## Listening for interrupt and shutdown requests The last task that must be performed by a kernel launcher is to listen on the communication port for work. There are currently two requests sent on the port, a signal event and a shutdown request. The signal event is of the form `{"signum": n}` where the string `'signum'` indicates a signal event and `'n'` is an integer specifying the signal number to send to the kernel. Typically, the value of 'n' is `2` representing `SIGINT` and used to interrupt any current processing. As more kernels adopt a message-based interrupt approach, this will not be as common. Enterprise Gateway also uses this event to perform its `poll()` implementation by sending `{"signum": 0}`. Raising a signal of 0 to a process is common way to determine the process is still alive. The event is a shutdown request. This is sent when the process proxy has typically terminated the kernel and it's just performing its final cleanup. The form of this request is `{"shutdown": 1}`. This is what instructs the launcher to abandon listening on the communication socket and to exit. ## Other parameters Besides `--port-range`, `--public-key`, and `--response-address`, the kernel launcher needs to support `--kernel-id` that indicates the kernel's ID as known to the Gateway server. It should also tolerate the existence of `--spark-context-initialization-mode` but, unless applicable for Spark enviornments, should only support values of `"none"` for this option. ================================================ FILE: docs/source/developers/kernel-library.md ================================================ # Standalone Remote Kernel Execution Remote kernels can be executed by using the `RemoteKernelManager` class directly. This enables running kernels using `ProcessProxy`s without requiring deployment of the Enterprise Gateway web application. This approach is also known as _Library Mode_. This can be useful in niche situations, for example, using [nbconvert](https://nbconvert.readthedocs.io/) or [nbclient](https://nbclient.readthedocs.io/) to execute a kernel on a remote cluster. Sample code using nbclient 0.2.0: ```python import nbformat from nbclient import NotebookClient from enterprise_gateway.services.kernels.remotemanager import RemoteKernelManager with open("my_notebook.ipynb") as fp: test_notebook = nbformat.read(fp, as_version=4) client = NotebookClient(nb=test_notebook, kernel_manager_class=RemoteKernelManager) client.execute(kernel_name='my_remote_kernel') ``` The above code will execute the notebook on a kernel named `my_remote_kernel` using its configured `ProcessProxy`. Depending on the process proxy, the _hosting application_ (e.g., `nbclient`) will likely need to be configured to run on the same network as the remote kernel. So, for example, with Kubernetes, `nbclient` would need to be configured as a Kubernetes POD. ================================================ FILE: docs/source/developers/kernel-manager.md ================================================ # Using Jupyter Server's `GatewayKernelManager` Another way to expose other Jupyter applications like `nbclient` or `papermill` to remote kernels is to use the [`GatewayKernelManager`](https://github.com/jupyter-server/jupyter_server/blob/745f5ba3f00280c1e1900326a7e08463d48a3912/jupyter_server/gateway/managers.py#L317) (and, implicitly, [`GatewayKernelClient`](https://github.com/jupyter-server/jupyter_server/blob/745f5ba3f00280c1e1900326a7e08463d48a3912/jupyter_server/gateway/managers.py#L562)) classes that are embedded in Jupyter Server. These classes essentially emulate the lower level [`KernelManager`](https://github.com/jupyter/jupyter_client/blob/10decd25308c306b6005cbf271b96493824a83e8/jupyter_client/manager.py#L84) and [`KernelClient`](https://github.com/jupyter/jupyter_client/blob/10decd25308c306b6005cbf271b96493824a83e8/jupyter_client/client.py#L75) classes but _forward_ their requests to/from a configured gateway server. Their necessary configuration for interacting with the gateway server is set on the [`GatewayClient` configurable](../users/client-config.md#gateway-client-configuration). This allows for the _hosting application_ to remain **outside** the resource-managed cluster since the kernel is actually being managed by the target gateway server. So, using the previous example, one my have... ```python import nbformat from nbclient import NotebookClient from jupyter_server.gateway.gateway_client import GatewayClient from jupyter_server.gateway.managers import GatewayKernelManager with open("my_notebook.ipynb") as fp: test_notebook = nbformat.read(fp, as_version=4) # Set any other gateway-specific parameters on the GatewayClient (singleton) instance gw_client = GatewayClient.instance() gw_client.url = "http://my-gateway-server.com:8888" client = NotebookClient(nb=test_notebook, kernel_manager_class=GatewayKernelManager) client.execute(kernel_name='my_remote_kernel') ``` In this case, `my_remote_kernel`'s kernel specification file actually resides on the Gateway server. `NotebookClient` will _think_ its talking to local `KernelManager` and `KernelClient` instances, when, in actuality, they are forwarding requests to (and getting response from) the Gateway server at `http://my-gateway-server.com:8888`. ================================================ FILE: docs/source/developers/kernel-specification.md ================================================ # Implementing a kernel specification If you find yourself [implementing a kernel launcher](kernel-launcher.md), you'll need a way to make that kernel and kernel launcher available to applications. This is accomplished via the _kernel specification_ or _kernelspec_. Kernelspecs reside in well-known directories. For Enterprise Gateway, we generally recommend they reside in `/usr/local/share/jupyter/kernels` where each entry in this directory is a directory representing the name of the kernel. The kernel specification is represented by the file `kernel.json`, the contents of which essentially indicate what environment variables should be present in the kernel process (via the `env` _stanza_) and which command (and arguments) should be issued to start the kernel process (via the `argv` _stanza_). The JSON also includes a `metadata` stanza that contains the process_proxy configuration, along with which process proxy class to instantiate to help manage the kernel process's lifecycle. One approach the sample Enterprise Gateway kernel specifications take is to include a shell script that actually issues the `spark-submit` request. It is this shell script (typically named `run.sh`) that is referenced in the `argv` stanza. Here's an example from the [`spark_python_yarn_cluster`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_cluster/kernel.json) kernel specification: ```JSON { "language": "python", "display_name": "Spark - Python (YARN Cluster Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy" }, "debugger": true }, "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "PYSPARK_PYTHON": "/opt/conda/bin/python", "PYTHONPATH": "${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip", "SPARK_OPTS": "--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.appMasterEnv.PYTHONUSERBASE=/home/${KERNEL_USERNAME}/.local --conf spark.yarn.appMasterEnv.PYTHONPATH=${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip --conf spark.yarn.appMasterEnv.PATH=/opt/conda/bin:$PATH ${KERNEL_EXTRA_SPARK_OPTS}", "LAUNCH_OPTS": "" }, "argv": [ "/usr/local/share/jupyter/kernels/spark_python_yarn_cluster/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.spark-context-initialization-mode", "lazy" ] } ``` where [`run.sh`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_cluster/bin/run.sh) issues `spark-submit` specifying the kernel launcher as the "application": ```bash eval exec \ "${SPARK_HOME}/bin/spark-submit" \ "${SPARK_OPTS}" \ "${IMPERSONATION_OPTS}" \ "${PROG_HOME}/scripts/launch_ipykernel.py" \ "${LAUNCH_OPTS}" \ "$@" ``` For container-based environments, the `argv` may instead reference a script that is meant to create the container pod (for Kubernetes). For these, we use a [template file](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2) that operators can adjust to meet the needs of their environment. Here's how that `kernel.json` looks: ```json { "language": "python", "display_name": "Python on Kubernetes", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy", "config": { "image_name": "elyra/kernel-py:VERSION" } }, "debugger": true }, "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_kubernetes/scripts/launch_kubernetes.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ``` When using the `launch_ipykernel` launcher (aka the Python kernel launcher), subclasses of `ipykernel.kernelbase.Kernel` can be launched. By default, this launcher uses the classname `"ipykernel.ipkernel.IPythonKernel"`, but other subclasses of `ipykernel.kernelbase.Kernel` can be specified by adding a `--kernel-class-name` parameter to the `argv` stanza. See [Invoking subclasses of `ipykernel.kernelbase.Kernel`](kernel-launcher.md#invoking-subclasses-of-ipykernelkernelbasekernel) for more information. As should be evident, kernel specifications are highly tuned to the runtime environment so your needs may be different, but _should_ resemble the approaches we've taken so far. ================================================ FILE: docs/source/developers/rest-api.rst ================================================ Using the REST API =============================== The REST API is used to author new applications that need to interact with Enterprise Gateway. Generally speaking, only the ``/api/kernels`` and ``/api/kernelspecs`` endpoints are used. The ``/api/sessions`` endpoint *can* be used to manage a kernel's lifecycle, but it is not necessary. For example, while the Jupyter Notebook and JupyterLab applications start kernels using ``/api/sessions``, the only interaction they perform with Enterprise Gateway is via the ``/api/kernelspecs`` to retrieve a list of available kernel specifications, and ``/api/kernels`` to start, stop, interrupt and restart a kernel. The "session" remains on the client. General sequence ---------------- Here's the general sequence of events to implement a REST-based application to *discover*, *start*, *execute code*, *interrupt*, and *shutdown* a kernel. To demonstrate each call, we'll use `curl` against a running Enterprise Gateway server at ``http://my-gateway-server.com:8888``. Kernel discovery ~~~~~~~~~~~~~~~~ Issue a `GET` request against the ``/api/kernelspecs`` endpoint to discover available kernel specifications. Each entry corresponds to a ``kernel.json`` file located in a directory that corresponds to the kernel's name. This *name* is what will be used in the subsequent start request. The response is a JSON object where the ``default`` is a string specifying the name of the default kernel. This kernel specification will be used if the start request (e.g., ``POST /api/kernels``) does not specify a kernel name in its JSON body. The other key in the response is `kernelspecs` and consists of a JSON indexed by kernel name with a value corresponding to the corresponding ``kernel.json`` in addition to any *resources* associated with the kernel. These are typically the icon filenames to be used by the front-end application. .. code-block:: console curl http://my-gateway-server.com:8888/api/kernelspecs .. raw:: html
GET /api/kernelspecs response .. code-block:: json { "default": "python3", "kernelspecs": { "python3": { "name": "python3", "spec": { "argv": [ "/usr/bin/env", "/opt/anaconda2/envs/py3/bin/python", "-m", "ipykernel_launcher", "-f", "{connection_file}" ], "display_name": "Python 3", "language": "python", "interrupt_mode": "signal", "metadata": {} }, "resources": { "logo-32x32": "/kernelspecs/python3/logo-32x32.png", "logo-64x64": "/kernelspecs/python3/logo-64x64.png" } }, "ir": { "name": "ir", "spec": { "argv": [ "R", "--slave", "-e", "IRkernel::main()", "--args", "{connection_file}" ], "env": {}, "display_name": "R", "language": "R", "interrupt_mode": "signal", "metadata": {} }, "resources": { "kernel.js": "/kernelspecs/ir/kernel.js", "logo-64x64": "/kernelspecs/ir/logo-64x64.png" } }, "spark_r_yarn_client": { "name": "spark_r_yarn_client", "spec": { "argv": [ "/usr/local/share/jupyter/kernels/spark_R_yarn_client/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.spark-context-initialization-mode", "lazy" ], "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "SPARK_OPTS": "--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.sparkr.r.command=/opt/conda/lib/R/bin/Rscript ${KERNEL_EXTRA_SPARK_OPTS}", "LAUNCH_OPTS": "" }, "display_name": "Spark - R (YARN Client Mode)", "language": "R", "interrupt_mode": "signal", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy" } } }, "resources": { "kernel.js": "/kernelspecs/spark_r_yarn_client/kernel.js", "logo-64x64": "/kernelspecs/spark_r_yarn_client/logo-64x64.png" } }, "spark_r_yarn_cluster": { "name": "spark_r_yarn_cluster", "spec": { "argv": [ "/usr/local/share/jupyter/kernels/spark_R_yarn_cluster/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.spark-context-initialization-mode", "eager" ], "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "SPARK_OPTS": "--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.am.waitTime=1d --conf spark.yarn.appMasterEnv.PATH=/opt/conda/bin:$PATH --conf spark.sparkr.r.command=/opt/conda/lib/R/bin/Rscript ${KERNEL_EXTRA_SPARK_OPTS}", "LAUNCH_OPTS": "" }, "display_name": "Spark - R (YARN Cluster Mode)", "language": "R", "interrupt_mode": "signal", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy" } } }, "resources": { "kernel.js": "/kernelspecs/spark_r_yarn_cluster/kernel.js", "logo-64x64": "/kernelspecs/spark_r_yarn_cluster/logo-64x64.png" } }, "spark_python_yarn_client": { "name": "spark_python_yarn_client", "spec": { "argv": [ "/usr/local/share/jupyter/kernels/spark_python_yarn_client/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.spark-context-initialization-mode", "lazy" ], "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "PYSPARK_PYTHON": "/opt/conda/bin/python", "PYTHONPATH": "${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip", "SPARK_OPTS": "--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} ${KERNEL_EXTRA_SPARK_OPTS}", "LAUNCH_OPTS": "" }, "display_name": "Spark - Python (YARN Client Mode)", "language": "python", "interrupt_mode": "signal", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy" }, "debugger": true } }, "resources": { "logo-64x64": "/kernelspecs/spark_python_yarn_client/logo-64x64.png" } }, "spark_python_yarn_cluster": { "name": "spark_python_yarn_cluster", "spec": { "argv": [ "/usr/local/share/jupyter/kernels/spark_python_yarn_cluster/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.spark-context-initialization-mode", "lazy" ], "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "PYSPARK_PYTHON": "/opt/conda/bin/python", "PYTHONPATH": "${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip", "SPARK_OPTS": "--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.appMasterEnv.PYTHONUSERBASE=/home/${KERNEL_USERNAME}/.local --conf spark.yarn.appMasterEnv.PYTHONPATH=${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip --conf spark.yarn.appMasterEnv.PATH=/opt/conda/bin:$PATH ${KERNEL_EXTRA_SPARK_OPTS}", "LAUNCH_OPTS": "" }, "display_name": "Spark - Python (YARN Cluster Mode)", "language": "python", "interrupt_mode": "signal", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy" }, "debugger": true } }, "resources": { "logo-64x64": "/kernelspecs/spark_python_yarn_cluster/logo-64x64.png" } }, "spark_scala_yarn_client": { "name": "spark_scala_yarn_client", "spec": { "argv": [ "/usr/local/share/jupyter/kernels/spark_scala_yarn_client/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.spark-context-initialization-mode", "lazy" ], "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "__TOREE_SPARK_OPTS__": "--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} ${KERNEL_EXTRA_SPARK_OPTS}", "__TOREE_OPTS__": "--alternate-sigint USR2", "LAUNCH_OPTS": "", "DEFAULT_INTERPRETER": "Scala" }, "display_name": "Spark - Scala (YARN Client Mode)", "language": "scala", "interrupt_mode": "signal", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy" } } }, "resources": { "logo-64x64": "/kernelspecs/spark_scala_yarn_client/logo-64x64.png" } }, "spark_scala_yarn_cluster": { "name": "spark_scala_yarn_cluster", "spec": { "argv": [ "/usr/local/share/jupyter/kernels/spark_scala_yarn_cluster/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.spark-context-initialization-mode", "lazy" ], "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "__TOREE_SPARK_OPTS__": "--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.am.waitTime=1d ${KERNEL_EXTRA_SPARK_OPTS}", "__TOREE_OPTS__": "--alternate-sigint USR2", "LAUNCH_OPTS": "", "DEFAULT_INTERPRETER": "Scala" }, "display_name": "Spark - Scala (YARN Cluster Mode)", "language": "scala", "interrupt_mode": "signal", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy" } } }, "resources": { "logo-64x64": "/kernelspecs/spark_scala_yarn_cluster/logo-64x64.png" } } } } .. raw:: html
Kernel start ~~~~~~~~~~~~~~~~ A kernel is started by issuing a ``POST`` request against the ``/api/kernels`` endpoint. The JSON body can take a ``name``, indicating the kernel to start, and an ``env`` JSON, corresponding to environment variables to set in the kernel's environment. In this example, we will start the ``spark_python_yarn_cluster`` kernel with a ``KERNEL_USERNAME`` environment variable of ``jovyan``. .. code-block:: console curl -X POST -i 'http://my-gateway-server.com:8888/api/kernels' --data '{ "name": "spark_python_yarn_cluster", "env": { "KERNEL_USERNAME": "jovyan" }}' .. raw:: html
POST /api/kernels response .. code-block:: json { "id": "f88bdc84-04c6-4021-963d-6811a61eca18", "name": "spark_python_yarn_cluster", "last_activity": "2022-02-12T00:40:45.080107Z", "execution_state": "starting", "connections": 0 } .. raw:: html
Kernel code execution ~~~~~~~~~~~~~~~~~~~~~ Upgrading the connection to a websocket and issuing code against that websocket is currently beyond the knowledge of our maintainers. For this aspect of this discussion we will refer you to our Python `GatewayClient class `_ that we use in our integration tests. .. note:: The name ``GatewayClient`` in our ``enterprise_gateway/client`` subdirectory is not to be confused with the ``GatewayClient`` class defined in the client applications in Jupyter Server and Notebook. In addition, the internal test class ``KernelClient`` is not to be confused with the ``KernelClient`` that lives in the ``jupyter_client`` package. Kernel interrupt ~~~~~~~~~~~~~~~~ A kernel is interrupted by issuing a ``POST`` request against the ``/api/kernels//interrupt`` endpoint. In this example, we will interrupt the ``spark_python_yarn_cluster`` kernel with ID ``f88bdc84-04c6-4021-963d-6811a61eca18`` that was started previously. .. note:: Restarting a kernel is nearly identical to interrupting a kernel; just replace ``interrupt`` in the endpoint with ``restart``. .. code-block:: console curl -X POST -i 'http://ymy-gateway-server.com:8888/api/kernels/f88bdc84-04c6-4021-963d-6811a61eca18/interrupt' An expected response of ``Status Code`` equal ``204`` (No Content) is returned. (The expected response for ``restart`` is ``200`` (OK).) Kernel shutdown ~~~~~~~~~~~~~~~~ A kernel is shutdown by issuing a ``DELETE`` request against the ``/api/kernels/`` endpoint. In this example, we will shutdown the ``spark_python_yarn_cluster`` kernel with ID ``f88bdc84-04c6-4021-963d-6811a61eca18`` that was started previously. .. code-block:: console curl -X DELETE -i 'http://my-gateway-server.com:8888/api/kernels/f88bdc84-04c6-4021-963d-6811a61eca18' An expected response of ``Status Code`` equal ``204`` (No Content) is returned. OpenAPI Specification ~~~~~~~~~~~~~~~~~~~~~ Here's the current `OpenAPI `_ specification available from Enterprise Gateway. An interactive version is available `here `_. .. openapi:: ../../../enterprise_gateway/services/api/swagger.yaml ================================================ FILE: docs/source/index.rst ================================================ Welcome to Jupyter Enterprise Gateway! ====================================== Jupyter Enterprise Gateway is a headless web server with a pluggable framework for anyone supporting multiple notebook users in a managed-cluster environment. Some of the core functionality it provides is better optimization of compute resources, improved multi-user support, and more granular security for your Jupyter notebook environment - making it suitable for enterprise, scientific, and academic implementations. From a technical perspective, Jupyter Enterprise Gateway is a web server that enables the ability to launch kernels on behalf of remote notebooks. This leads to better resource management, as the web server is no longer the single location for kernel activity. It essentially exposes a *Kernel as a Service* model. By default, the Jupyter framework runs kernels locally - potentially exhausting the server of resources. By leveraging the functionality of the underlying resource management applications like Hadoop YARN, Kubernetes, and others, Jupyter Enterprise Gateway distributes kernels across the compute cluster, dramatically increasing the number of simultaneously active kernels while leveraging the available compute resources. .. figure:: images/Scalability-After-JEG.gif :align: center Kernel Gateway vs. Enterprise Gateway ------------------------------------- Jupyter Enterprise Gateway was formerly built directly on Jupyter Kernel Gateway. At that time, it had complete feature parity with Kernel Gateway. However, in order to address various roadmap items, Enterprise Gateway removed its dependency on Kernel Gateway, so now the question arises, when does one choose Enterprise Gateway over Kernel Gateway? Use Enterprise Gateway if... ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. You have a large compute cluster consisting of limited resources (GPUs, large memory, etc) and users require those resources from notebooks 2. You have large numbers of users requiring access to a shared compute cluster 3. You require some amount of High Availability/Disaster Recovery such that another Gateway server can be spun up to service existing (and remote) kernels Use Kernel Gateway if... ~~~~~~~~~~~~~~~~~~~~~~~~ 1. You have a small user pool where the resources of the Gateway server can be shared amongst those users (no remote kernel support) 2. You wish to configured the `notebook-http mode `_ functionality where a specific Notebook provides HTTP endpoints Who's this for? --------------- Jupyter Enterprise Gateway is a highly technical piece of the Jupyter Stack, so we've separated documentation to help specific personas: 1. `Users `_: people using Jupyter web applications that wish to connect to an Enterprise Gateway instance. 2. `Operators `_: people deploying or serving Jupyter Enterprise Gateway to others. 3. `Developers `_: people writing applications or deploying kernels for other resource managers. 4. `Contributors `_: people contributing directly to the Jupyter Enterprise Gateway project. If you find gaps in our documentation, please open an issue (or better yet, a pull request) on the Jupyter Enterprise Gateway `Github repo `_. Table of Contents ----------------- .. toctree:: :maxdepth: 2 Users Operators Developers Contributors Other ================================================ FILE: docs/source/operators/config-add-env.md ================================================ # Additional environment variables Besides those environment variables associated with configurable options, the following environment variables can also be used to influence functionality: ```text EG_DEFAULT_KERNEL_SERVICE_ACCOUNT_NAME=default Kubernetes only. This value indicates the default service account name to use for kernel namespaces when the Enterprise Gateway needs to create the kernel's namespace and KERNEL_SERVICE_ACCOUNT_NAME has not been provided. EG_DOCKER_NETWORK=enterprise-gateway or bridge Docker only. Used by the docker deployment and launch scripts, this indicates the name of the docker network docker network to use. The start scripts default this value to 'enterprise-gateway' because they create the network. The docker kernel launcher (launch_docker.py) defaults this value to 'bridge' only in cases where it wasn't previously set by the deployment script. EG_ENABLE_TUNNELING=False Indicates whether tunneling (via ssh) of the kernel and communication ports is enabled (True) or not (False). EG_KERNEL_CLUSTER_ROLE=kernel-controller or cluster-admin Kubernetes only. The role to use when binding with the kernel service account. The eg-clusterrole.yaml file creates the cluster role 'kernel-controller' and conveys that name via EG_KERNEL_CLUSTER_ROLE. Should the deployment script not set this valuem, Enterprise Gateway will then use 'cluster-admin'. It is recommended this value be set to something other than 'cluster-admin'. EG_KERNEL_LAUNCH_TIMEOUT=30 The time (in seconds) Enterprise Gateway will wait for a kernel's startup completion status before deeming the startup a failure, at which time a second startup attempt will take place. If a second timeout occurs, Enterprise Gateway will report a failure to the client. EG_KERNEL_INFO_TIMEOUT=60 The time (in seconds) Enterprise Gateway will wait for kernel info response before deeming the request a failure. EG_SENSITIVE_ENV_KEYS="" A comma separated list (e.g. "secret,pwd,auth") of sensitive environment variables. Any environment variables that contain any of the words from this list will have their values as EG_REDACTION_MASK whenever logged. EG_REDACTION_MASK=******** The redaction mask used if EG_SENSITIVE_ENV_KEYS is set. Sensitive environment variables will be logged as this redaction mask instead. EG_KERNEL_LOG_DIR=/tmp The directory used during remote kernel launches of DistributedProcessProxy kernels. Files in this directory will be of the form kernel-.log. EG_KERNEL_SESSION_PERSISTENCE=False **Experimental** Enables kernel session persistence. Currently, this is purely experiemental and writes kernel session information to a local file. Should Enterprise Gateway terminate with running kernels, a subsequent restart of Enterprise Gateway will attempt to reconnect to the persisted kernels. See also EG_KERNEL_SESSION_LOCATION and --KernelSessionManager.enable_persistence. EG_KERNEL_SESSION_LOCATION= **Experimental** The location in which the kernel session information is persisted. By default, this is located in the configured JupyterDataDir. See also EG_KERNEL_SESSION_PERSISTENCE. EG_MAX_PORT_RANGE_RETRIES=5 The number of attempts made to locate an available port within the specified port range. Only applies when --EnterpriseGatewayApp.port_range (or EG_PORT_RANGE) has been specified or is in use for the given kernel. EG_MIN_PORT_RANGE_SIZE=1000 The minimum port range size permitted when --EnterpriseGatewayApp.port_range (or EG_PORT_RANGE) is specified or is in use for the given kernel. Port ranges reflecting smaller sizes will result in a failure to launch the corresponding kernel (since port-range can be specified within individual kernel specifications). EG_MIRROR_WORKING_DIRS=False Containers only. If True, kernel creation requests that specify KERNEL_WORKING_DIR will set the kernel container's working directory to that value. See also KERNEL_WORKING_DIR. EG_NAMESPACE=enterprise-gateway or default Kubernetes only. Used during Kubernetes deployment, this indicates the name of the namespace in which the Enterprise Gateway service is deployed. The namespace is created prior to deployment, and is set into the EG_NAMESPACE env via deployment.yaml script. This value is then used within Enterprise Gateway to coordinate kernel configurations. Should this value not be set during deployment, Enterprise Gateway will default its value to namespace 'default'. EG_PROHIBITED_GIDS=0 Containers only. A comma-separated list of group ids (GID) whose values are not allowed to be referenced by KERNEL_GID. This defaults to the root group id (0). Attempts to launch a kernel where KERNEL_GID's value is in this list will result in an exception indicating error 403 (Forbidden). See also EG_PROHIBITED_UIDS. EG_PROHIBITED_LOCAL_IPS='' A comma-separated list of local IPv4 addresses (or regular expressions) that should not be used when determining the response address used to convey connection information back to Enterprise Gateway from a remote kernel. In some cases, other network interfaces (e.g., docker with 172.17.0.*) can interfere - leading to connection failures during kernel startup. Example: EG_PROHIBITED_LOCAL_IPS=172.17.0.*,192.168.0.27 will eliminate the use of all addresses in 172.17.0 as well as 192.168.0.27 EG_PROHIBITED_UIDS=0 Containers only. A comma-separated list of user ids (UID) whose values are not allowed to be referenced by KERNEL_UID. This defaults to the root user id (0). Attempts to launch a kernel where KERNEL_UID's value is in this list will result in an exception indicating error 403 (Forbidden). See also EG_PROHIBITED_GIDS. EG_RESPONSE_IP=None Experimental. The IP address to use to formulate the response address (with `EG_RESPONSE_PORT`). By default, the server's IP is used. However, we may find it necessary to use a different IP in cases where the target kernels are external to the Enterprise Gateway server (for example). It's value may also need to be set in cases where the computed (default) is not correct for the current topology. EG_RESPONSE_PORT=8877 The single response port used to receive connection information from launched kernels. EG_RESPONSE_PORT_RETRIES=10 The number of retries to attempt when the original response port (EG_RESPONSE_PORT) is found to be in-use. This value should be set to 0 (zero) if no port retries are desired. EG_SHARED_NAMESPACE=False Kubernetes only. This value indicates whether (True) or not (False) all kernel pods should reside in the same namespace as Enterprise Gateway. This is not a recommended configuration. EG_SSH_PORT=22 The port number used for ssh operations for installations choosing to configure the ssh server on a port other than the default 22. EG_REMOTE_PWD=None The password to use to ssh to remote hosts EG_REMOTE_USER=None The username to use when connecting to remote hosts (default to `getpass.getuser()` when not set). EG_REMOTE_GSS_SSH=False Use gss instead of EG_REMOTE_USER and EG_REMOTE_PWD to connect to remote host via SSH. Case insensitive. 'True' to enable, 'False', '' or unset to disable. Any other value will error. EG_YARN_CERT_BUNDLE= The path to a .pem or any other custom truststore used as a CA bundle in yarn-api-client. EG_ZMQ_IO_THREADS=1 The size of the ZMQ thread pool used to handle I/O operations. Applies only to shared contexts which are enabled by default but can be specified via `RemoteMappingKernelManager.shared_context = True`. EG_ZMQ_MAX_SOCKETS=1023 Specifies the maximum number of sockets to allow on the ZMQ context. Applies only to shared contexts which are enabled by default but can be specified via `RemoteMappingKernelManager.shared_context = True`. ``` ================================================ FILE: docs/source/operators/config-availability.md ================================================ # Availability modes Enterprise Gateway can be optionally configured in one of two "availability modes": _standalone_ or _replication_. When configured, Enterprise Gateway can recover from failures and reconnect to any active remote kernels that were previously managed by the terminated EG instance. As such, both modes require that kernel session persistence also be enabled via `KernelSessionManager.enable_persistence=True`. ```{note} Kernel session persistence will be automtically enabled whenever availability mode is configured. ``` ```{caution} **Availability modes and kernel session persistence should be considered experimental!** Known issues include: 1. Culling configurations do not account for different nodes and therefore could result in the incorrect culling of kernels. 2. Each "node switch" requires a manual reconnect to the kernel. We hope to address these in future releaases (depending on demand). ``` ## Standalone availability _Standalone availability_ assumes that, upon failure of the original EG instance, another EG instance will be started. Upon startup of the second instance (following the termination of the first), EG will attempt to load and reconnect to all kernels that were deemed active when the previous instance terminated. This mode is somewhat analogous to the classic HA/DR mode of _active-passive_ and is typically used when node resources are at a premium or the number of replicas (in the Kubernetes sense) must remain at 1. To enable Enterprise Gateway for 'standalone' availability, configure `EnterpiseGatewayApp.availability_mode=standalone` or set env `EG_AVAILABILITY_MODE=standalone`. Here's an example for starting Enterprise Gateway with standalone availability: ```bash #!/bin/bash LOG=/var/log/enterprise_gateway.log PIDFILE=/var/run/enterprise_gateway.pid jupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --log-level=DEBUG \ --EnterpriseGatewayApp.availability_mode=standalone > $LOG 2>&1 & if [ "$?" -eq 0 ]; then echo $! > $PIDFILE else exit 1 fi ``` ## Replication availability With _replication availability_, multiple EG instances (or replicas) are operating at the same time, and fronted with some kind of reverse proxy or load balancer. Because state still resides within each `KernelManager` instance executing within a given EG instance, we strongly suggest configuring some form of _client affinity_ (a.k.a, "sticky session") to avoid node switches wherever possible since each node switch requires manual reconnection of the front-end (today). ```{tip} Configuring client affinity is **strongly recommended**, otherwise functionality that relies on state within the servicing node (e.g., culling) can be affected upon node switches, resulting in incorrect behavior. ``` In this mode, when one node goes down, the subsequent request will be routed to a different node that doesn't know about the kernel. Prior to returning a `404` (not found) status code, EG will check its persisted store to determine if the kernel was managed and, if so, attempt to "hydrate" a `KernelManager` instance associated with the remote kernel. (Of course, if the kernel was running local to the downed server, chances are it cannot be _revived_.) Upon successful "hydration" the request continues as if on the originating node. Because _client affinity_ is in place, subsequent requests should continue to be routed to the "servicing node". To enable Enterprise Gateway for 'replication' availability, configure `EnterpiseGatewayApp.availability_mode=replication` or set env `EG_AVAILABILITY_MODE=replication`. ```{attention} To preserve backwards compatibility, if only kernel session persistence is enabled via `KernelSessionManager.enable_persistence=True`, the availability mode will be automatically configured to 'replication' if `EnterpiseGatewayApp.availability_mode` is not configured. ``` Here's an example for starting Enterprise Gateway with replication availability: ```bash #!/bin/bash LOG=/var/log/enterprise_gateway.log PIDFILE=/var/run/enterprise_gateway.pid jupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --log-level=DEBUG \ --EnterpriseGatewayApp.availability_mode=replication > $LOG 2>&1 & if [ "$?" -eq 0 ]; then echo $! > $PIDFILE else exit 1 fi ``` # Kernel Session Persistence Enabling kernel session persistence allows Jupyter Notebooks to reconnect to kernels when Enterprise Gateway is restarted and forms the basis for the _availability modes_ described above. Enterprise Gateway provides two ways of persisting kernel sessions: _File Kernel Session Persistence_ and _Webhook Kernel Session Persistence_, although others can be provided by subclassing `KernelSessionManager` (see below). ```{attention} Due to its experimental nature, kernel session persistence is disabled by default. To enable this functionality, you must configure `KernelSessionManger.enable_persistence=True` or configure `EnterpriseGatewayApp.availability_mode` to either `standalone` or `replication`. ``` As noted above, the availability modes rely on the persisted information relative to the kernel. This information consists of the arguments and options used to launch the kernel, along with its connection information. In essence, it consists of any information necessary to re-establish communication with the kernel. ## File Kernel Session Persistence File Kernel Session Persistence stores kernel sessions as files in a specified directory. To enable this form of persistence, set the environment variable `EG_KERNEL_SESSION_PERSISTENCE=True` or configure `FileKernelSessionManager.enable_persistence=True`. To change the directory in which the kernel session file is being saved, either set the environment variable `EG_PERSISTENCE_ROOT` or configure `FileKernelSessionManager.persistence_root` to the directory. By default, the directory used to store a given kernel's session information is the `JUPYTER_DATA_DIR`. ```{note} Enterprise Gateway handles corrupted or invalid session files gracefully. If a persisted session file contains invalid JSON or cannot be read, the error is logged and that session is skipped rather than preventing Enterprise Gateway from starting. ``` ```{note} Because `FileKernelSessionManager` is the default class for kernel session persistence, configuring `EnterpriseGatewayApp.kernel_session_manager_class` to `enterprise_gateway.services.sessions.kernelsessionmanager.FileKernelSessionManager` is not necessary. ``` ## Webhook Kernel Session Persistence Webhook Kernel Session Persistence stores all kernel sessions to any database. In order for this to work, an API must be created. The API must include four endpoints: - A `GET` that will retrieve a list of all kernel sessions from a database - A `GET` that will take the kernel id as a path variable and retrieve that information from a database - A `DELETE` that will delete all kernel sessions, where the body of the request is a list of kernel ids - A `POST` that will take kernel id as a path variable and kernel session in the body of the request and save it to a database where the object being saved is: ``` { kernel_id: UUID string, kernel_session: JSON } ``` To enable the webhook kernel session persistence, set the environment variable `EG_KERNEL_SESSION_PERSISTENCE=True` or configure `WebhookKernelSessionManager.enable_persistence=True`. To connect the API, set the environment variable `EG_WEBHOOK_URL` or configure `WebhookKernelSessionManager.webhook_url` to the API endpoint. Because `WebhookKernelSessionManager` is not the default kernel session persistence class, an additional configuration step must be taken to instruct EG to use this class: `EnterpriseGatewayApp.kernel_session_manager_class = enterprise_gateway.services.sessions.kernelsessionmanager.WebhookKernelSessionManager`. ### Enabling Authentication Enabling authentication is an option if the API requires it for requests. Set the environment variable `EG_AUTH_TYPE` or configure `WebhookKernelSessionManager.auth_type` to be either `Basic` or `Digest`. If it is set to an empty string authentication won't be enabled. Then set the environment variables `EG_WEBHOOK_USERNAME` and `EG_WEBHOOK_PASSWORD` or configure `WebhookKernelSessionManager.webhook_username` and `WebhookKernelSessionManager.webhook_password` to provide the username and password for authentication. ## Bring Your Own Kernel Session Persistence To introduce a different implementation, you must configure the kernel session manager class. Here's an example for starting Enterprise Gateway using a custom `KernelSessionManager` and 'standalone' availability. Note that setting `--MyCustomKernelSessionManager.enable_persistence=True` is not necessary because an availability mode is specified, but displayed here for completeness: ```bash #!/bin/bash LOG=/var/log/enterprise_gateway.log PIDFILE=/var/run/enterprise_gateway.pid jupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --log-level=DEBUG \ --EnterpriseGatewayApp.kernel_session_manager_class=custom.package.MyCustomKernelSessionManager \ --MyCustomKernelSessionManager.enable_persistence=True \ --EnterpriseGatewayApp.availability_mode=standalone > $LOG 2>&1 & if [ "$?" -eq 0 ]; then echo $! > $PIDFILE else exit 1 fi ``` Alternative persistence implementations using SQL and NoSQL databases would be ideal and, as always, contributions are welcome! ## Testing Kernel Session Persistence Once kernel session persistence has been enabled and configured, create a kernel by opening up a Jupyter Notebook. Save some variable in that notebook and shutdown Enterprise Gateway using `kill -9 PID`, where `PID` is the PID of gateway. Restart Enterprise Gateway and refresh you notebook tab. If all worked correctly, the variable should be loaded without the need to rerun the cell. If you are using docker, ensure the container isn't tied to the PID of Enterprise Gateway. The container should still run after killing that PID. ================================================ FILE: docs/source/operators/config-cli.md ================================================ # Command-line options In some cases, it may be easier to use command line options. These can also be used for _static_ values that should not be the targeted for [_dynamic configurables_](config-dynamic.md/#dynamic-configurables). To see the same configuration options at the command line, run the following: ```bash jupyter enterprisegateway --help-all ``` A snapshot of this help appears below for ease of reference. The options for the superclass `EnterpriseGatewayConfigMixin` have been omitted. As with the `--generate-config` option, each option includes its corresponding environment variable, if applicable. ```text Jupyter Enterprise Gateway Provisions remote Jupyter kernels and proxies HTTP/Websocket traffic to them. Options ------- Arguments that take values are actually convenience aliases to full Configurables, whose aliases are listed on the help line. For more information on full configurables, see '--help-all'. --debug set log level to logging.DEBUG (maximize logging output) --generate-config generate default config file -y Answer yes to any questions instead of prompting. --log-level= (Application.log_level) Default: 30 Choices: (0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL') Set the log level by value or name. --config= (JupyterApp.config_file) Default: '' Full path of a config file. --ip= (EnterpriseGatewayApp.ip) Default: '127.0.0.1' IP address on which to listen (EG_IP env var) --port= (EnterpriseGatewayApp.port) Default: 8888 Port on which to listen (EG_PORT env var) --port_retries= (EnterpriseGatewayApp.port_retries) Default: 50 Number of ports to try if the specified port is not available (EG_PORT_RETRIES env var) --keyfile= (EnterpriseGatewayApp.keyfile) Default: None The full path to a private key file for usage with SSL/TLS. (EG_KEYFILE env var) --certfile= (EnterpriseGatewayApp.certfile) Default: None The full path to an SSL/TLS certificate file. (EG_CERTFILE env var) --client-ca= (EnterpriseGatewayApp.client_ca) Default: None The full path to a certificate authority certificate for SSL/TLS client authentication. (EG_CLIENT_CA env var) Class parameters ---------------- Parameters are set from command-line arguments of the form: `--Class.trait=value`. This line is evaluated in Python, so simple expressions are allowed, e.g.:: `--C.a='range(3)'` For setting C.a=[0,1,2]. EnterpriseGatewayApp(EnterpriseGatewayConfigMixin, JupyterApp) options ---------------------------------------------------------------------- --EnterpriseGatewayApp.allow_credentials= Sets the Access-Control-Allow-Credentials header. (EG_ALLOW_CREDENTIALS env var) Default: '' --EnterpriseGatewayApp.allow_headers= Sets the Access-Control-Allow-Headers header. (EG_ALLOW_HEADERS env var) Default: '' --EnterpriseGatewayApp.allow_methods= Sets the Access-Control-Allow-Methods header. (EG_ALLOW_METHODS env var) Default: '' --EnterpriseGatewayApp.allow_origin= Sets the Access-Control-Allow-Origin header. (EG_ALLOW_ORIGIN env var) Default: '' --EnterpriseGatewayApp.alt_yarn_endpoint= The http url specifying the alternate YARN Resource Manager. This value should be set when YARN Resource Managers are configured for high availability. Note: If both YARN endpoints are NOT set, the YARN library will use the files within the local HADOOP_CONFIG_DIR to determine the active resource manager. (EG_ALT_YARN_ENDPOINT env var) Default: None --EnterpriseGatewayApp.answer_yes= Answer yes to any prompts. Default: False --EnterpriseGatewayApp.auth_token= Authorization token required for all requests (EG_AUTH_TOKEN env var) Default: '' --EnterpriseGatewayApp.authorized_users=... Comma-separated list of user names (e.g., ['bob','alice']) against which KERNEL_USERNAME will be compared. Any match (case-sensitive) will allow the kernel's launch, otherwise an HTTP 403 (Forbidden) error will be raised. The set of unauthorized users takes precedence. This option should be used carefully as it can dramatically limit who can launch kernels. (EG_AUTHORIZED_USERS env var - non-bracketed, just comma-separated) Default: set() --EnterpriseGatewayApp.authorized_origin= Hostname (e.g. 'localhost', 'reverse.proxy.net') which the handler will match against the request's SSL certificate. An HTTP 403 (Forbidden) error will be raised on a failed match. This option requires TLS to be enabled. It does not support IP addresses. (EG_AUTHORIZED_ORIGIN env var) Default: '' --EnterpriseGatewayApp.availability_mode= Specifies the type of availability. Values must be one of "standalone" or "replication". (EG_AVAILABILITY_MODE env var) Choices: any of ['standalone', 'replication'] (case-insensitive) or None Default: None --EnterpriseGatewayApp.base_url= The base path for mounting all API resources (EG_BASE_URL env var) Default: '/' --EnterpriseGatewayApp.certfile= The full path to an SSL/TLS certificate file. (EG_CERTFILE env var) Default: None --EnterpriseGatewayApp.client_ca= The full path to a certificate authority certificate for SSL/TLS client authentication. (EG_CLIENT_CA env var) Default: None --EnterpriseGatewayApp.client_envs=... Environment variables allowed to be set when a client requests a new kernel. (EG_CLIENT_ENVS env var) Default: [] --EnterpriseGatewayApp.conductor_endpoint= The http url for accessing the Conductor REST API. (EG_CONDUCTOR_ENDPOINT env var) Default: None --EnterpriseGatewayApp.config_file= Full path of a config file. Default: '' --EnterpriseGatewayApp.config_file_name= Specify a config file to load. Default: '' --EnterpriseGatewayApp.default_kernel_name= Default kernel name when spawning a kernel (EG_DEFAULT_KERNEL_NAME env var) Default: '' --EnterpriseGatewayApp.dynamic_config_interval= Specifies the number of seconds configuration files are polled for changes. A value of 0 or less disables dynamic config updates. (EG_DYNAMIC_CONFIG_INTERVAL env var) Default: 0 --EnterpriseGatewayApp.env_process_whitelist=... DEPRECATED, use inherited_envs Default: [] --EnterpriseGatewayApp.env_whitelist=... DEPRECATED, use client_envs. Default: [] --EnterpriseGatewayApp.expose_headers= Sets the Access-Control-Expose-Headers header. (EG_EXPOSE_HEADERS env var) Default: '' --EnterpriseGatewayApp.generate_config= Generate default config file. Default: False --EnterpriseGatewayApp.impersonation_enabled= Indicates whether impersonation will be performed during kernel launch. (EG_IMPERSONATION_ENABLED env var) Default: False --EnterpriseGatewayApp.inherited_envs=... Environment variables allowed to be inherited from the spawning process by the kernel. (EG_INHERITED_ENVS env var) Default: [] --EnterpriseGatewayApp.ip= IP address on which to listen (EG_IP env var) Default: '127.0.0.1' --EnterpriseGatewayApp.kernel_headers=... Request headers to make available to kernel launch framework. (EG_KERNEL_HEADERS env var) Default: [] --EnterpriseGatewayApp.kernel_manager_class= The kernel manager class to use. Must be a subclass of `enterprise_gateway.services.kernels.RemoteMappingKernelManager`. Default: 'enterprise_gateway.services.kernels.remotemanager.RemoteMapp... --EnterpriseGatewayApp.kernel_session_manager_class= The kernel session manager class to use. Must be a subclass of `enterprise_gateway.services.sessions.KernelSessionManager`. Default: 'enterprise_gateway.services.sessions.kernelsessionmanager.Fi... --EnterpriseGatewayApp.kernel_spec_cache_class= The kernel spec cache class to use. Must be a subclass of `enterprise_gateway.services.kernelspecs.KernelSpecCache`. Default: 'enterprise_gateway.services.kernelspecs.kernelspec_cache.Ker... --EnterpriseGatewayApp.kernel_spec_manager_class= The kernel spec manager class to use. Must be a subclass of `jupyter_client.kernelspec.KernelSpecManager`. Default: 'jupyter_client.kernelspec.KernelSpecManager' --EnterpriseGatewayApp.keyfile= The full path to a private key file for usage with SSL/TLS. (EG_KEYFILE env var) Default: None --EnterpriseGatewayApp.list_kernels= Permits listing of the running kernels using API endpoints /api/kernels and /api/sessions. (EG_LIST_KERNELS env var) Note: Jupyter Notebook allows this by default but Jupyter Enterprise Gateway does not. Default: False --EnterpriseGatewayApp.load_balancing_algorithm= Specifies which load balancing algorithm DistributedProcessProxy should use. Must be one of "round-robin" or "least-connection". (EG_LOAD_BALANCING_ALGORITHM env var) Default: 'round-robin' --EnterpriseGatewayApp.log_datefmt= The date format used by logging formatters for %(asctime)s Default: '%Y-%m-%d %H:%M:%S' --EnterpriseGatewayApp.log_format= The Logging format template Default: '[%(name)s]%(highlevel)s %(message)s' --EnterpriseGatewayApp.log_level= Set the log level by value or name. Choices: any of [0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'] Default: 30 --EnterpriseGatewayApp.max_age= Sets the Access-Control-Max-Age header. (EG_MAX_AGE env var) Default: '' --EnterpriseGatewayApp.max_kernels= Limits the number of kernel instances allowed to run by this gateway. Unbounded by default. (EG_MAX_KERNELS env var) Default: None --EnterpriseGatewayApp.max_kernels_per_user= Specifies the maximum number of kernels a user can have active simultaneously. A value of -1 disables enforcement. (EG_MAX_KERNELS_PER_USER env var) Default: -1 --EnterpriseGatewayApp.port= Port on which to listen (EG_PORT env var) Default: 8888 --EnterpriseGatewayApp.port_range= Specifies the lower and upper port numbers from which ports are created. The bounded values are separated by '..' (e.g., 33245..34245 specifies a range of 1000 ports to be randomly selected). A range of zero (e.g., 33245..33245 or 0..0) disables port-range enforcement. (EG_PORT_RANGE env var) Default: '0..0' --EnterpriseGatewayApp.port_retries= Number of ports to try if the specified port is not available (EG_PORT_RETRIES env var) Default: 50 --EnterpriseGatewayApp.remote_hosts=... Bracketed comma-separated list of hosts on which DistributedProcessProxy kernels will be launched e.g., ['host1','host2']. (EG_REMOTE_HOSTS env var - non-bracketed, just comma-separated) Default: ['localhost'] --EnterpriseGatewayApp.show_config= Instead of starting the Application, dump configuration to stdout Default: False --EnterpriseGatewayApp.show_config_json= Instead of starting the Application, dump configuration to stdout (as JSON) Default: False --EnterpriseGatewayApp.ssl_version= Sets the SSL version to use for the web socket connection. (EG_SSL_VERSION env var) Default: None --EnterpriseGatewayApp.trust_xheaders= Use x-* header values for overriding the remote-ip, useful when application is behind a proxy. (EG_TRUST_XHEADERS env var) Default: False --EnterpriseGatewayApp.unauthorized_users=... Comma-separated list of user names (e.g., ['root','admin']) against which KERNEL_USERNAME will be compared. Any match (case-sensitive) will prevent the kernel's launch and result in an HTTP 403 (Forbidden) error. (EG_UNAUTHORIZED_USERS env var - non-bracketed, just comma-separated) Default: {'root'} --EnterpriseGatewayApp.ws_ping_interval= Specifies the ping interval(in seconds) that should be used by zmq port associated with spawned kernels.Set this variable to 0 to disable ping mechanism. (EG_WS_PING_INTERVAL_SECS env var) Default: 30 --EnterpriseGatewayApp.yarn_endpoint= The http url specifying the YARN Resource Manager. Note: If this value is NOT set, the YARN library will use the files within the local HADOOP_CONFIG_DIR to determine the active resource manager. (EG_YARN_ENDPOINT env var) Default: None --EnterpriseGatewayApp.yarn_endpoint_security_enabled= Is YARN Kerberos/SPNEGO Security enabled (True/False). (EG_YARN_ENDPOINT_SECURITY_ENABLED env var) Default: False KernelSpecCache(SingletonConfigurable) options ---------------------------------------------- --KernelSpecCache.cache_enabled= Enable Kernel Specification caching. (EG_KERNELSPEC_CACHE_ENABLED env var) Default: False FileKernelSessionManager(KernelSessionManager) options ------------------------------------------------------ --FileKernelSessionManager.enable_persistence= Enable kernel session persistence (True or False). Default = False (EG_KERNEL_SESSION_PERSISTENCE env var) Default: False --FileKernelSessionManager.persistence_root= Identifies the root 'directory' under which the 'kernel_sessions' node will reside. This directory should exist. (EG_PERSISTENCE_ROOT env var) Default: '' WebhookKernelSessionManager(KernelSessionManager) options --------------------------------------------------------- --WebhookKernelSessionManager.enable_persistence= Enable kernel session persistence (True or False). Default = False (EG_KERNEL_SESSION_PERSISTENCE env var) Default: False --WebhookKernelSessionManager.persistence_root= Identifies the root 'directory' under which the 'kernel_sessions' node will reside. This directory should exist. (EG_PERSISTENCE_ROOT env var) Default: None --WebhookKernelSessionManager.webhook_url= URL endpoint for webhook kernel session manager Default: None --WebhookKernelSessionManager.auth_type= Authentication type for webhook kernel session manager API. Either basic, digest or None Default: None --WebhookKernelSessionManager.webhook_username= Username for webhook kernel session manager API auth Default: None --WebhookKernelSessionManager.webhook_password= Password for webhook kernel session manager API auth Default: None RemoteMappingKernelManager(AsyncMappingKernelManager) options ------------------------------------------------------------- --RemoteMappingKernelManager.allowed_message_types=... White list of allowed kernel message types. When the list is empty, all message types are allowed. Default: [] --RemoteMappingKernelManager.buffer_offline_messages= Whether messages from kernels whose frontends have disconnected should be buffered in-memory. When True (default), messages are buffered and replayed on reconnect, avoiding lost messages due to interrupted connectivity. Disable if long-running kernels will produce too much output while no frontends are connected. Default: True --RemoteMappingKernelManager.cull_busy= Whether to consider culling kernels which are busy. Only effective if cull_idle_timeout > 0. Default: False --RemoteMappingKernelManager.cull_connected= Whether to consider culling kernels which have one or more connections. Only effective if cull_idle_timeout > 0. Default: False --RemoteMappingKernelManager.cull_idle_timeout= Timeout (in seconds) after which a kernel is considered idle and ready to be culled. Values of 0 or lower disable culling. Very short timeouts may result in kernels being culled for users with poor network connections. Default: 0 --RemoteMappingKernelManager.cull_interval= The interval (in seconds) on which to check for idle kernels exceeding the cull timeout value. Default: 300 --RemoteMappingKernelManager.default_kernel_name= The name of the default kernel to start Default: 'python3' --RemoteMappingKernelManager.kernel_info_timeout= Timeout for giving up on a kernel (in seconds). On starting and restarting kernels, we check whether the kernel is running and responsive by sending kernel_info_requests. This sets the timeout in seconds for how long the kernel can take before being presumed dead. This affects the MappingKernelManager (which handles kernel restarts) and the ZMQChannelsHandler (which handles the startup). Default: 60 --RemoteMappingKernelManager.kernel_manager_class= The kernel manager class. This is configurable to allow subclassing of the AsyncKernelManager for customized behavior. Default: 'jupyter_client.ioloop.AsyncIOLoopKernelManager' --RemoteMappingKernelManager.root_dir= Default: '' --RemoteMappingKernelManager.shared_context= Share a single zmq.Context to talk to all my kernels Default: True ``` ================================================ FILE: docs/source/operators/config-culling.md ================================================ # Culling idle kernels With the adoption of notebooks and interactive development for data science, a new "resource utilization" pattern has arisen, where kernel resources are locked for a given notebook, but due to interactive development processes it might be idle for a long period of time causing the cluster resources to starve. One way to workaround this problem is to enable the culling of idle kernels after a specific timeout period. Idle kernel culling is set to “off” by default. It’s enabled by setting `--RemoteKernelManager.cull_idle_timeout` to a positive value representing the number of seconds a kernel must remain idle to be culled (default: 0, recommended: 43200, 12 hours). ```{tip} When managing large clusters with limited resources, we recommend enabling the culling of idle kernels. ``` You can also configure the interval that the kernels are checked for their idle timeouts by adjusting the setting `--RemoteKernelManager.cull_interval` to a positive value. If the interval is not set or set to a non-positive value, the system uses 300 seconds as the default value: (default: 300 seconds). There are use-cases where we would like to enable only culling of idle kernels that have no connections (e.g. the notebook browser was closed without stopping the kernel first), this can be configured by adjusting the setting `--RemoteKernelManager.cull_connected` (default: False). Here's an updated start script that provides some default configuration to enable the culling of idle kernels: ```bash #!/bin/bash LOG=/var/log/enterprise_gateway.log PIDFILE=/var/run/enterprise_gateway.pid jupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --log-level=DEBUG \ --RemoteKernelManager.cull_idle_timeout=43200 --MappingKernelManager.cull_interval=60 > $LOG 2>&1 & if [ "$?" -eq 0 ]; then echo $! > $PIDFILE else exit 1 fi ``` ================================================ FILE: docs/source/operators/config-dynamic.md ================================================ # Dynamic configurables Enterprise Gateway also supports the ability to update configuration variables without having to restart Enterprise Gateway. This enables the ability to do things like enable debug logging or adjust the maximum number of kernels per user, all without having to restart Enterprise Gateway. To enable dynamic configurables configure `EnterpriseGatewayApp.dynamic_config_interval` to a positive value (default is 0 or disabled). Since this is the number of seconds to poll Enterprise Gateway's configuration files, a value greater than 60 (1 minute) is recommended. This functionality works for most configuration values, but does have the following caveats: 1. Any configuration variables set on the command line (CLI) or via environment variables are NOT eligible for dynamic updates. This is because Jupyter gives those values priority over file-based configuration variables. 1. Any configuration variables tied to background processing may not reflect their update if the variable is not _observed_ for changes. For example, the code behind `RemoteKernelManager.cull_idle_timeout` may not reflect changes to the timeout period if that variable is not monitored (i.e., observed) for changes. 1. Only `Configurables` registered by Enterprise Gateway are eligible for dynamic updates. Currently, that list consists of the following (and their subclasses): EnterpriseGatewayApp, RemoteKernelManager, KernelSpecManager, and KernelSessionManager. As a result, operators and adminstrators are encouraged to configure Enterprise Gateway via configuration files with only static values configured via the command line or environment. Note that if `EnterpriseGatewayApp.dynamic_config_interval` is configured with a positive value via the configuration file (i.e., is eligible for updates) and is subsequently set to 0, then dynamic configuration updates will be disabled until Enterprise Gateway is restarted with a positive value. Therefore, we recommend `EnterpriseGatewayApp.dynamic_config_interval` be configured via the command line or environment. ================================================ FILE: docs/source/operators/config-env-debug.md ================================================ # Environment variables that assist in troubleshooting The following environment variables may be useful for troubleshooting: ```text EG_DOCKER_LOG_LEVEL=WARNING By default, the docker client library is too verbose for its logging. This value can be adjusted in situations where docker troubleshooting may be warranted. EG_KUBERNETES_LOG_LEVEL=WARNING By default, the kubernetes client library is too verbose for its logging. This value can be adjusted in situations where kubernetes troubleshooting may be warranted. EG_LOG_LEVEL=10 Used by remote launchers and gateway listeners (where the kernel runs), this indicates the level of logging used by those entities. Level 10 (DEBUG) is recommended since they don't do verbose logging. EG_MAX_POLL_ATTEMPTS=10 Polling is used in various places during life-cycle management operations - like determining if a kernel process is still alive, stopping the process, waiting for the process to terminate, etc. As a result, it may be useful to adjust this value during those kinds of troubleshooting scenarios, although that should rarely be necessary. EG_POLL_INTERVAL=0.5 The interval (in seconds) to wait before checking poll results again. EG_RESTART_STATUS_POLL_INTERVAL=1.0 The interval (in seconds) to wait before polling for the restart status again when duplicate restart request for the same kernel is received or when a shutdown request is received while kernel is still restarting. EG_REMOVE_CONTAINER=True Used by launch_docker.py, indicates whether the kernel's docker container should be removed following its shutdown. Set this value to 'False' if you want the container to be left around in order to troubleshoot issues. Remember to set back to 'True' to restore normal operation. EG_SOCKET_TIMEOUT=5.0 The time (in seconds) the enterprise gateway will wait on its connection file socket waiting on return from a remote kernel launcher. Upon timeout, the operation will be retried immediately, until the overall time limit has been exceeded. EG_SSH_LOG_LEVEL=WARNING By default, the paramiko ssh library is too verbose for its logging. This value can be adjusted in situations where ssh troubleshooting may be warranted. EG_YARN_LOG_LEVEL=WARNING By default, the yarn-api-client library is too verbose for its logging. This value can be adjusted in situations where YARN troubleshooting may be warranted. ``` ================================================ FILE: docs/source/operators/config-file.md ================================================ # Configuration file options Placing configuration options into the configuration file `jupyter_enterprise_gateway_config.py` is recommended because this will enabled the use of the [_dynamic configurables_](config-dynamic.md/#dynamic-configurables) functionality. To generate a template configuration file, run the following: ```bash jupyter enterprisegateway --generate-config ``` This command will produce a `jupyter_enterprise_gateway_config.py` file, typically located in the invoking user's `$HOME/.jupyter` directory. The file contains python code, including comments, relative to each available configuration option. The actual option itself will also be commented out. To enable that option, set its value and uncomment the code. ```{Note} Some options may appear duplicated. For example, the `remote_hosts` trait appears on both `c.EnterpriseGatewayConfigMixin` and `c.EnterpriseGatewayApp`. This is due to how configurable traits appear in the class hierarchy. Since `EnterpriseGatewayApp` derives from `EnterpriseGatewayConfigMixin` and both are configurable classes, the output contains duplicated values. If both values are set, the value _closest_ to the derived class will be used (in this case, `EnterpriseGatewayApp`). ``` Here's an example entry. Note that its default value, when defined, is also displayed, along with the corresponding environment variable name: ```python ## Bracketed comma-separated list of hosts on which DistributedProcessProxy # kernels will be launched e.g., ['host1','host2']. # (EG_REMOTE_HOSTS env var - non-bracketed, just comma-separated) # Default: ['localhost'] # c.EnterpriseGatewayConfigMixin.remote_hosts = ['localhost'] ``` ================================================ FILE: docs/source/operators/config-kernel-override.md ================================================ # Per-kernel overrides As mentioned in the overview of [Process Proxy Configuration](../contributors/system-architecture.md#process-proxy-configuration) capabilities, it's possible to override or amend specific system-level configuration values on a per-kernel basis. These capabilities can be implemented with the kernel specification's process-proxy `config` stanza or via environment variables. ## Per-kernel configuration overrides The following enumerates the set of per-kernel configuration overrides: - `remote_hosts`: This process proxy configuration entry can be used to override `--EnterpriseGatewayApp.remote_hosts`. Any values specified in the config dictionary override the globally defined values. These apply to all `DistributedProcessProxy` kernels. - `yarn_endpoint`: This process proxy configuration entry can be used to override `--EnterpriseGatewayApp.yarn_endpoint`. Any values specified in the config dictionary override the globally defined values. These apply to all `YarnClusterProcessProxy` kernels. Note that you'll likely be required to specify a different `HADOOP_CONF_DIR` setting in the kernel.json's `env` stanza in order of the `spark-submit` command to target the appropriate YARN cluster. - `authorized_users`: This process proxy configuration entry can be used to override `--EnterpriseGatewayApp.authorized_users`. Any values specified in the config dictionary override the globally defined values. These values apply to **all** process-proxy kernels, including the default `LocalProcessProxy`. Note that the typical use-case for this value is to not set `--EnterpriseGatewayApp.authorized_users` at the global level, but then restrict access at the kernel level. - `unauthorized_users`: This process proxy configuration entry can be used to **_amend_** `--EnterpriseGatewayApp.unauthorized_users`. Any values specified in the config dictionary are **added** to the globally defined values. As a result, once a user is denied access at the global level, they will _always be denied access at the kernel level_. These values apply to **all** process-proxy kernels, including the default `LocalProcessProxy`. - `port_range`: This process proxy configuration entry can be used to override `--EnterpriseGatewayApp.port_range`. Any values specified in the config dictionary override the globally defined values. These apply to all `RemoteProcessProxy` kernels. ## Per-kernel environment overrides In some cases, it is useful to allow specific values that exist in a kernel.json `env` stanza to be overridden on a per-kernel basis. For example, if the kernel.json supports resource limitations you may want to allow some requests to have access to more memory or GPUs than another. Enterprise Gateway enables this capability by honoring environment variables provided in the json request over those same-named variables in the kernel.json `env` stanza. Environment variables for which this can occur are any variables prefixed with `KERNEL_` as well as any variables listed in the `EnterpriseGatewayApp.client_envs` configurable trait (or via the `EG_CLIENT_ENVS` variable). Likewise, environment variables of the Enterprise Gateway server process listed in the `EnterpriseGatewayApp.inherited_envs` configurable trait (or via the `EG_INHERITED_ENVS` variable) are also available for replacement in the kernel process' environment. See [Kernel Environment Variables](../users/kernel-envs.md) in the Users documentation section for a complete set of recognized `KERNEL_` variables. ================================================ FILE: docs/source/operators/config-security.md ================================================ # Configuring security Jupyter Enterprise Gateway does not currently perform user _authentication_ but, instead, assumes that all users issuing requests have been previously authenticated. Recommended applications for this are [Apache Knox](https://knox.apache.org/) or [Jupyter Hub](https://jupyterhub.readthedocs.io/en/latest/) (e.g., if gateway-enabled notebook servers were spawned targeting an Enterprise Gateway cluster). This section introduces some security features inherent in Enterprise Gateway (with more to come). ## KERNEL_USERNAME In order to convey the name of the authenticated user, `KERNEL_USERNAME` should be sent in the kernel creation request via the `env:` entry. This will occur automatically within the gateway-enabled Notebook server since it propagates all environment variables prefixed with `KERNEL_`. If the request does not include a `KERNEL_USERNAME` entry, one will be added to the kernel's launch environment with the value of the gateway user. This value is then used within the _authorization_ and _impersonation_ functionality. ## Authorization By default, all users are authorized to start kernels. This behavior can be adjusted when situations arise where more control is required. Basic authorization can be expressed in two ways. ### Authorized Users The command-line or configuration file option: `EnterpriseGatewayApp.authorized_users` can be specified to contain a list of user names indicating which users are permitted to launch kernels within the current gateway server. On each kernel launched, the authorized users list is searched for the value of `KERNEL_USERNAME` (case-sensitive). If the user is found in the list the kernel's launch sequence continues, otherwise HTTP Error 403 (Forbidden) is raised and the request fails. ```{warning} Since the `authorized_users` option must be exhaustive, it should be used only in situations where a small and limited set of users are allowed access and empty otherwise. ``` ### Unauthorized Users The command-line or configuration file option: `EnterpriseGatewayApp.unauthorized_users` can be specified to contain a list of user names indicating which users are **NOT** permitted to launch kernels within the current gateway server. The `unauthorized_users` list is always checked prior to the `authorized_users` list. If the value of `KERNEL_USERNAME` appears in the `unauthorized_users` list, the request is immediately failed with the same 403 (Forbidden) HTTP Error. From a system security standpoint, privileged users (e.g., `root` and any users allowed `sudo` privileges) should be added to this option. ### Authorization Failures It should be noted that the corresponding messages logged when each of the above authorization failures occur are slightly different. This allows the administrator to discern from which authorization list the failure was generated. Failures stemming from _inclusion_ in the `unauthorized_users` list will include text similar to the following: ``` User 'bob' is not authorized to start kernel 'Spark - Python (YARN Client Mode)'. Ensure KERNEL_USERNAME is set to an appropriate value and retry the request. ``` Failures stemming from _exclusion_ from a non-empty `authorized_users` list will include text similar to the following: ``` User 'bob' is not in the set of users authorized to start kernel 'Spark - Python (YARN Client Mode)'. Ensure KERNEL_USERNAME is set to an appropriate value and retry the request. ``` ## User Impersonation The Enterprise Gateway server leverages other technologies to implement user impersonation when launching kernels. This option is configured via two pieces of information: `EG_IMPERSONATION_ENABLED` and `KERNEL_USERNAME`. `EG_IMPERSONATION_ENABLED` indicates the intention that user impersonation should be performed and can also be conveyed via the command-line boolean option `EnterpriseGatewayApp.impersonation_enabled` (default = False). `KERNEL_USERNAME` is also conveyed within the environment of the kernel launch sequence where its value is used to indicate the user that should be impersonated. ### Impersonation in Hadoop YARN clusters In a cluster managed by the Hadoop YARN resource manager, impersonation is implemented by leveraging kerberos, and thus require this security option as a pre-requisite for user impersonation. When user impersonation is enabled, kernels are launched with the `--proxy-user ${KERNEL_USERNAME}` which will tell YARN to launch the kernel in a container used by the provided user name. ```{admonition} Important! :class: warning When using kerberos in a YARN managed cluster, the gateway user (`elyra` by default) needs to be set up as a `proxyuser` superuser in hadoop configuration. Please refer to the [Hadoop documentation](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/Superusers.html) regarding the proper configuration steps. ``` ### SPNEGO Authentication to YARN APIs When kerberos is enabled in a YARN managed cluster, the administration uis can be configured to require authentication/authorization via SPENEGO. When running Enterprise Gateway in a environment configured this way, we need to convey an extra configuration to enable the proper authorization when communicating with YARN via the YARN APIs. `YARN_ENDPOINT_SECURITY_ENABLED` indicates the requirement to use SPNEGO authentication/authorization when connecting with the YARN APIs and can also be conveyed via the command-line boolean option `EnterpriseGatewayApp.yarn_endpoint_security_enabled` (default = False) ### Impersonation in Standalone or YARN Client Mode Impersonation performed in standalone or YARN cluster modes tends to take the form of using `sudo` to perform the kernel launch as the target user. This can also be configured within the [run.sh](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernelspecs/spark_python_yarn_client/bin/run.sh) script and requires the following: 1. The gateway user (i.e., the user in which Enterprise Gateway is running) must be enabled to perform sudo operations on each potential host. This enablement must also be done to prevent password prompts since Enterprise Gateway runs in the background. Refer to your operating system documentation for details. 1. Each user identified by `KERNEL_USERNAME` must be associated with an actual operating system user on each host. 1. Once the gateway user is configured for `sudo` privileges it is **strongly recommended** that that user be included in the set of `unauthorized_users`. Otherwise, kernels not configured for impersonation, or those requests that do not include `KERNEL_USERNAME`, will run as the, now, highly privileged gateway user! ```{warning} Should impersonation be disabled after granting the gateway user elevated privileges, it is **strongly recommended** those privileges be revoked (on all hosts) prior to starting kernels since those kernels will run as the gateway user **regardless of the value of KERNEL_USERNAME**. ``` ## SSH Tunneling Jupyter Enterprise Gateway is configured to perform SSH tunneling on the five ZeroMQ kernel sockets as well as the communication socket created within the launcher and used to perform remote and cross-user signalling functionality. SSH tunneling is NOT enabled by default. Tunneling can be enabled/disabled via the environment variable `EG_ENABLE_TUNNELING=False`. Note, there is no command-line or configuration file support for this variable. Note that SSH by default validates host keys before connecting to remote hosts and the connection will fail for invalid or unknown hosts. Enterprise Gateway honors this requirement, and invalid or unknown hosts will cause tunneling to fail. Please perform necessary steps to validate all hosts before enabling SSH tunneling, such as: - SSH to each node cluster and accept the host key properly - Configure SSH to disable `StrictHostKeyChecking` ## Using Generic Security Service (Kerberos) Jupyter Enterprise Gateway has support for SSH connections using GSS (for example Kerberos), which enables its deployment without the use of an ssh key. The `EG_REMOTE_GSS_SSH` environment variable can be used to control this behavior. ```{seealso} The list of [additional supported environment variables](config-add-env.md#additional-environment-variables). ``` ## Securing Enterprise Gateway Server ### Using SSL for encrypted communication Enterprise Gateway supports Secure Sockets Layer (SSL) communication with its clients. With SSL enabled, all the communication between the server and client are encrypted and highly secure. 1. You can start Enterprise Gateway to communicate via a secure protocol mode by setting the `certfile` and `keyfile` options with the command: ``` jupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --certfile=mycert.pem --keyfile=mykey.key ``` As server starts up, the log should reflect the following, ``` [EnterpriseGatewayApp] Jupyter Enterprise Gateway at https://localhost:8888 ``` Note: Enterprise Gateway server is started with `HTTPS` instead of `HTTP`, meaning server side SSL is enabled. ````{tip} A self-signed certificate can be generated with openssl. For example, the following command will create a certificate valid for 365 days with both the key and certificate data written to the same file: ```bash openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout mykey.key -out mycert.pem ```` 1. With Enterprise Gateway server SSL enabled, now you need to configure the client side SSL, which is accomplished via the Gateway configuration options embedded in Notebook server. During Jupyter Notebook server startup, export the following environment variables where the gateway-enabled server has access during runtime: ```bash export JUPYTER_GATEWAY_CLIENT_CERT=${PATH_TO_PEM_FILE} export JUPYTER_GATEWAY_CLIENT_KEY=${PATH_TO_KEY_FILE} export JUPYTER_GATEWAY_CA_CERTS=${PATH_TO_SELFSIGNED_CA} ``` ```{note} If using a self-signed certificate, you can set `JUPYTER_GATEWAY_CA_CERTS` same as `JUPYTER_GATEWAY_CLIENT_CERT`. ``` ### Using Enterprise Gateway configuration file You can also utilize the [Enterprise Gateway configuration file](config-file.md#configuration-file-options) to set static configurations for the server. To enable SSL from the configuration file, modify the corresponding parameter to the appropriate value. ``` c.EnterpriseGatewayApp.certfile = '/absolute/path/to/your/certificate/fullchain.pem' c.EnterpriseGatewayApp.keyfile = '/absolute/path/to/your/certificate/privatekey.key' ``` Using configuration file achieves the same result as starting the server with `--certfile` and `--keyfile`, this way provides better readability and maintainability. After configuring the above, the communication between gateway-enabled Notebook Server and Enterprise Gateway is SSL enabled. ================================================ FILE: docs/source/operators/config-sys-env.md ================================================ # System-owned environment variables The following environment variables are managed by Enterprise Gateway and listed here for completeness. ```{warning} Manually setting these variables could adversely affect operations. ``` ```text EG_DOCKER_MODE Docker only. Used by launch_docker.py to determine if the kernel container should be created using the swarm service API or the regular docker container API. Enterprise Gateway sets this value depending on whether the kernel is using the DockerSwarmProcessProxy or DockerProcessProxy. EG_RESPONSE_ADDRESS This value is set during each kernel launch and resides in the environment of the kernel launch process. Its value represents the address to which the remote kernel's connection information should be sent. Enterprise Gateway is listening on that socket and will associate that connnection information with the responding kernel. ``` ================================================ FILE: docs/source/operators/deploy-conductor.md ================================================ # IBM Spectrum Conductor deployments This information will be added shortly. The configuration is similar to that of [Hadoop YARN deployments](deploy-yarn-cluster.md) with the `ConductorClusterProcessProxy` used in place of `YARNClusterProcessProxy`. The following sample kernel specifications are currently available on IBM Spectrum Conductor: - spark_R_conductor_cluster - spark_python_conductor_cluster - spark_scala_conductor_cluster ================================================ FILE: docs/source/operators/deploy-distributed.md ================================================ # Distributed deployments This section describes how to deploy Enterprise Gateway to manage kernels across a distributed set of hosts. In this case, a resource manager is not used, but, rather, SSH is used to distribute the kernels. This functionality is accomplished via the [`DistributedProcessProxy`](../contributors/system-architecture.md#distributedprocessproxy). Steps required to complete deployment on a distributed cluster are: 1. [Install Enterprise Gateway](installing-eg.md) on the "primary node" of the cluster. 1. [Install the desired kernels](installing-kernels.md) 1. Install and configure the server and desired kernel specifications (see below) 1. [Launch Enterprise Gateway](launching-eg.md) The `DistributedProcessProxy` simply uses a fixed set of host names and selects the _next_ host using a simple round-robin algorithm (see the [Roadmap](../contributors/roadmap.md) for making this pluggable). In this case, you can still experience bottlenecks on a given node that receives requests to start "large" kernels, but otherwise, you will be better off compared to when all kernels are started on a single node or as local processes, which is the default for Jupyter Notebook and JupyterLab when not configured to use Enterprise Gateway. The following sample kernelspecs are configured to use the `DistributedProcessProxy`: - python_distributed - spark_python_yarn_client - spark_scala_yarn_client - spark_R_yarn_client ```{admonition} Important! :class: warning The `DistributedProcessProxy` utilizes SSH between the Enterprise Gateway server and the remote host. As a result, you must ensure passwordless SSH is configured between hosts. ``` The set of remote hosts used by the `DistributedProcessProxy` are derived from two places. - The configuration option `EnterpriseGatewayApp.remote_hosts`, whose default value comes from the env variable EG_REMOTE_HOSTS - which, itself, defaults to 'localhost'. - The config option can be [overridden on a per-kernel basis](config-kernel-override.md#per-kernel-configuration-overrides) if the process_proxy stanza contains a config stanza where there's a `remote_hosts` entry. If present, this value will be used instead. ```{tip} Entries in the remote hosts configuration should be fully qualified domain names (FQDN). For example, `host1.acme.com, host2.acme.com` ``` ```{admonition} Important! :class: warning All the kernel *specifications* configured to use the `DistributedProcessProxy` must be on all nodes to which there's a reference in the remote hosts configuration! With YARN cluster node, only the Python and R kernel _packages_ are required on each node, not the entire kernel specification. ``` The following installs the sample `python_distributed` kernel specification relative to the 3.2.3 release on the given node. This step must be repeated for each node and each kernel specification. ```Bash wget https://github.com/jupyter-server/enterprise_gateway/releases/download/v3.2.3/jupyter_enterprise_gateway_kernelspecs-3.2.3.tar.gz KERNELS_FOLDER=/usr/local/share/jupyter/kernels tar -zxvf jupyter_enterprise_gateway_kernelspecs-3.2.3.tar.gz --strip 1 --directory $KERNELS_FOLDER/python_distributed/ python_distributed/ ``` ```{tip} You may find it easier to install all kernel specifications on each node, then remove directories corresponding to specification you're not interested in using. ``` ## Specifying a load-balancing algorithm Jupyter Enterprise Gateway provides two ways to configure how kernels are distributed across the configured set of hosts: round-robin or least-connection. ### Round-robin The round-robin algorithm simply uses an index into the set of configured hosts, incrementing the index on each kernel startup so that it points to the next host in the configured set. To specify the use of round-robin, use one of the following: _Command-line_: ```bash --EnterpriseGatewayApp.load_balancing_algorithm=round-robin ``` _Configuration_: ```python c.EnterpriseGatewayApp.load_balancing_algorithm="round-robin" ``` _Environment_: ```bash export EG_LOAD_BALANCING_ALGORITHM=round-robin ``` Since _round-robin_ is the default load-balancing algorithm, this option is not necessary. ### Least-connection The least-connection algorithm tracks the hosts that are currently servicing kernels spawned by the Enterprise Gateway instance. Using this information, Enterprise Gateway selects the host with the least number of kernels. It does not consider other information, or whether there is _another_ Enterprise Gateway instance using the same set of hosts. To specify the use of least-connection, use one of the following: _Command-line_: ```bash --EnterpriseGatewayApp.load_balancing_algorithm=least-connection ``` _Configuration_: ```python c.EnterpriseGatewayApp.load_balancing_algorithm="least-connection" ``` _Environment_: ```bash export EG_LOAD_BALANCING_ALGORITHM=least-connection ``` ### Pinning a kernel to a host A kernel's start request can specify a specific remote host on which to run by specifying that host in the `KERNEL_REMOTE_HOST` environment variable within the request's body. When specified, the configured load-balancing algorithm will be by-passed and the kernel will be started on the specified host. ## YARN Client Mode YARN client mode kernel specifications can be considered _distributed mode kernels_. They just happen to use `spark-submit` from different nodes in the cluster but use the `DistributedProcessProxy` to manage their lifecycle. YARN Client kernel specifications require the following environment variable to be set within their `env` entries: - `SPARK_HOME` must point to the Apache Spark installation path ``` SPARK_HOME:/usr/hdp/current/spark2-client #For HDP distribution ``` In addition, they will leverage the aforementioned remote hosts configuration. After that, you should have a `kernel.json` that looks similar to the one below: ```json { "language": "python", "display_name": "Spark - Python (YARN Client Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy" } }, "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "PYSPARK_PYTHON": "/opt/conda/bin/python", "PYTHONPATH": "${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip", "SPARK_YARN_USER_ENV": "PYTHONUSERBASE=/home/yarn/.local,PYTHONPATH=${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip,PATH=/opt/conda/bin:$PATH", "SPARK_OPTS": "--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false", "LAUNCH_OPTS": "" }, "argv": [ "/usr/local/share/jupyter/kernels/spark_python_yarn_client/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ``` Make any necessary adjustments such as updating `SPARK_HOME` or other environment and path specific configurations. ```{tip} Each node of the cluster will typically be configured in the same manner relative to directory hierarchies and environment variables. As a result, you may find it easier to get kernel specifications working on one node, then, after confirming their operation, copy them to other nodes and update the remote-hosts configuration to include the other nodes. You will still need to _install_ the kernels themselves on each node. ``` ## Spark Standalone Although Enterprise Gateway does not provide sample kernelspecs for Spark standalone, here are the steps necessary to convert a `yarn_client` kernelspec to standalone. - Make a copy of the source `yarn_client` kernelspec into an applicable `standalone` directory. - Edit the `kernel.json` file: - Update the display_name with e.g. `Spark - Python (Spark Standalone)`. - Update the `--master` option in the SPARK_OPTS to point to the spark master node rather than indicate `--deploy-mode client`. - Update `SPARK_OPTS` and remove the `spark.yarn.submit.waitAppCompletion=false`. - Update the `argv` stanza to reference `run.sh` in the appropriate directory. After that, you should have a `kernel.json` that looks similar to the one below: ```json { "language": "python", "display_name": "Spark - Python (Spark Standalone)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy" } }, "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "PYSPARK_PYTHON": "/opt/conda/bin/python", "PYTHONPATH": "${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip", "SPARK_YARN_USER_ENV": "PYTHONUSERBASE=/home/yarn/.local,PYTHONPATH=${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip,PATH=/opt/conda/bin:$PATH", "SPARK_OPTS": "--master spark://127.0.0.1:7077 --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID}", "LAUNCH_OPTS": "" }, "argv": [ "/usr/local/share/jupyter/kernels/spark_python_spark_standalone/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ``` ================================================ FILE: docs/source/operators/deploy-docker.md ================================================ # Docker and Docker Swarm deployments This section describes how to deploy Enterprise Gateway into an existing Docker or Docker Swarm cluster. The two deployments are nearly identical and any differences will be noted. The base Enterprise Gateway image is [elyra/enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) and can be found in the Enterprise Gateway dockerhub organization [elyra](https://hub.docker.com/r/elyra/), along with other images. See [Docker Images](../contributors/docker.md) for image details. The following sample kernelspecs are currently available on Docker and Docker Swarm deployments: - R_docker - python_docker - python_tf_docker - python_tf_gpu_docker - scala_docker ## Docker Swarm deployment Enterprise Gateway manifests itself as a Docker Swarm service. It is identified by the name `enterprise-gateway` within the cluster. In addition, all objects related to Enterprise Gateway, including kernel instances, have a label of `app=enterprise-gateway` applied. The current deployment uses a compose stack definition, [docker-compose.yml](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/docker/docker-compose.yml) which creates an overlay network intended for use solely by Enterprise Gateway and any kernel-based services it launches. To deploy the stack to a swarm cluster from a manager node, use: ```bash docker stack deploy -c docker-compose.yml enterprise-gateway ``` More information about deploying and managing stacks can be found [here](https://docs.docker.com/engine/reference/commandline/stack_deploy/). Since Swarm's support for session-based affinity has not been investigated at this time, the deployment script configures a single replica. Once session affinity is available, the number of replicas can be increased. ```{note} Once session affinity has been figured out, we can (theretically) configure Enterprise Gateway for high availability by increasing the replicas. However, HA support cannot be fully realized until Enterprise Gateway has finalized its persistent sessions functionality. ``` ## Docker deployment An alternative deployment of Enterprise Gateway in docker environments is to deploy Enterprise Gateway as a traditional docker container. This can be accomplished via the [docker-compose.yml](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/docker/docker-compose.yml) file. However, keep in mind that in choosing this deployment approach, one loses leveraging swarm's monitoring & restart capabilities. That said, choosing this approach does not preclude one from leveraging swarm's scheduling capabilities for launching kernels. As noted below, kernel instances, and how they manifest as docker-based entities (i.e., a swarm service or a docker container), is purely a function of the process proxy class to which they're associated. To start the stack using compose: ```bash docker-compose up ``` The documentation for managing a compose stack can be found [here](https://docs.docker.com/compose/overview/). ## Kernelspec Modifications One of the more common areas of customization we see occur within the kernelspec files located in /usr/local/share/jupyter/kernels. To customize the kernel definitions, the kernels directory can be exposed as a mounted volume thereby making it available to all containers within the swarm cluster. As an example, we have included the necessary commands to mount these volumes, both in the deployment script and in the [launch_docker.py](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/docker/scripts/launch_docker.py) file used to launch docker-based kernels. By default, these references are commented out as they require the system administrator to ensure the directories are available throughout the cluster. Note that because the kernel launch script, [launch_docker.py](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/docker/scripts/launch_docker.py), resides in the kernelspecs hierarchy, updates or modifications to docker-based kernel instances can now also take place. ## Docker and Docker Swarm Kernel Instances Enterprise Gateway currently supports launching of _vanilla_ (i.e., non-spark) kernels within a Docker Swarm cluster. When kernels are launched, Enterprise Gateway is responsible for creating the appropriate entity. The kind of entity created is a function of the corresponding process proxy class. When the process proxy class is `DockerSwarmProcessProxy` the `launch_docker.py` script will create a Docker Swarm _service_. This service uses a restart policy of `none` meaning that it's configured to go away upon failures or completion. In addition, because the kernel is launched as a swarm service, the kernel can "land" on any node of the cluster. When the process proxy class is `DockerProcessProxy` the `launch_docker.py` script will create a traditional docker _container_. As a result, the kernel will always reside on the same host as the corresponding Enterprise Gateway. Items worth noting: 1. The Swarm service or Docker container name will be composed of the launching username (`KERNEL_USERNAME`) and kernel-id. 1. The service/container will have 3 labels applied: "kernel_id=", "component=kernel", and "app=enterprise-gateway" - similar to Kubernetes. 1. The service/container will be launched within the same docker network as Enterprise Gateway. ## DockerSwarmProcessProxy To indicate that a given kernel should be launched as a Docker Swarm service into a swarm cluster, the kernel.json file's `metadata` stanza must include a `process_proxy` stanza indicating a `class_name:` of `DockerSwarmProcessProxy`. This ensures the appropriate lifecycle management will take place relative to a Docker Swarm environment. Along with the `class_name:` entry, this process proxy stanza should also include a proxy configuration stanza which specifies the docker image to associate with the kernel's service container. If this entry is not provided, the Enterprise Gateway implementation will use a default entry of `elyra/kernel-py:VERSION`. In either case, this value is made available to the rest of the parameters used to launch the kernel by way of an environment variable: `KERNEL_IMAGE`. ```{note} _The use of `VERSION` in docker image tags is a placeholder for the appropriate version-related image tag. When kernelspecs are built via the Enterprise Gateway Makefile, `VERSION` is replaced with the appropriate version denoting the target release. A full list of available image tags can be found in the dockerhub repository corresponding to each image._ ``` ```json { "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.docker_swarm.DockerSwarmProcessProxy", "config": { "image_name": "elyra/kernel-py:VERSION" } } } } ``` As always, kernels are launched by virtue of the `argv:` stanza in their respective kernel.json files. However, when launching kernels in a docker environment, what gets invoked isn't the kernel's launcher, but, instead, a python script that is responsible for using the [Docker Python API](https://docker-py.readthedocs.io/en/stable/) to create the corresponding instance. ```json { "argv": [ "python", "/usr/local/share/jupyter/kernels/python_docker/scripts/launch_docker.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ``` ## DockerProcessProxy Running containers in Docker Swarm versus traditional Docker are different enough to warrant having separate process proxy implementations. As a result, the kernel.json file could reference the `DockerProcessProxy` class and, accordingly, a traditional docker container (as opposed to a swarm _service_) will be created. The rest of the kernel.json file, image name, argv stanza, etc. is identical. ```json { "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.docker_swarm.DockerProcessProxy", "config": { "image_name": "elyra/kernel-py:VERSION" } } }, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_docker/scripts/launch_docker.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ``` Upon invocation, the invoked process proxy will set a "docker mode" environment variable (`EG_DOCKER_MODE`) to either `swarm` or `docker`, depending on the process proxy instance, that the `launch_docker.py` script uses to determine whether a _service_ or _container_ should be created, respectively. It should be noted that each of these forms of process proxy usage does **NOT** need to match to the way in which the Enterprise Gateway instance was deployed. For example, if Enterprise Gateway was deployed as a Docker Swarm service and a `DockerProcessProxy` is used, that corresponding kernel will be launched as a traditional docker container and will reside on the same host as wherever the Enterprise Gateway (swarm) service is running. Similarly, if Enterprise Gateway was deployed using standard Docker container and a `DockerSwarmProcessProxy` is used (and assuming a swarm configuration is present), that corresponding kernel will be launched as a docker swarm service and will reside on whatever host the Docker Swarm scheduler decides is best. That is, the kernel container's lifecycle will be managed by the corresponding process proxy and the Enterprise Gateway's deployment has no bearing. ================================================ FILE: docs/source/operators/deploy-kubernetes.md ================================================ # Kubernetes deployments ## Overview This section describes how to deploy Enterprise Gateway into an existing Kubernetes cluster. Enterprise Gateway is provisioned as a Kubernetes _deployment_ and exposed as a Kubernetes _service_. Enterprise Gateway can leverage load balancing and high availability functionality provided by Kubernetes (although HA cannot be fully realized until Enterprise Gateway supports persistent sessions). The following sample kernel specifications apply to Kubernetes deployments: - R_kubernetes - python_kubernetes - python_tf_gpu_kubernetes - python_tf_kubernetes - scala_kubernetes - spark_R_kubernetes - spark_python_kubernetes - spark_scala_kubernetes - spark_python_operator Enterprise Gateway deployments use the [elyra/enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) image from the Enterprise Gateway dockerhub organization [elyra](https://hub.docker.com/r/elyra/) along with other kubernetes-based images. See [Docker Images](../contributors/docker.md) for image details. When deployed within a [spark-on-kubernetes](https://spark.apache.org/docs/latest/running-on-kubernetes.html) cluster, Enterprise Gateway can easily support cluster-managed kernels distributed across the cluster. Enterprise Gateway will also provide standalone (i.e., _vanilla_) kernel invocation (where spark contexts are not automatically created) which also benefits from their distribution across the cluster. ````{note} If you plan to use kernel specifications derived from the `spark_python_operator` sample, ensure that the [Kubernetes Operator for Apache Spark is installed](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator#installation) in your Kubernetes cluster. ```{tip} To ensure the proper flow of environment variables to your spark application, make sure the webhook server is enabled when deploying the helm chart: `helm install my-release spark-operator/spark-operator --namespace spark-operator --set webhook.enable=true` ```` We are using helm templates to manage Kubernetes resource configurations, which allows an end-user to easily customize their Enterprise Gateway deployment. There are two main deployment scenarios if RBAC is enabled in your Kubernetes cluster: 1. Deployment user has **_Cluster Administrator Access_**. In this scenario, you have full access to the cluster and can deploy all components as needed. 1. Deployment user has **_Namespace Administrator Access_**. This is typical for shared multi-tenant environments where each Team has control over their namespace, but not the cluster. In this scenario, your cluster Administrator can deploy the RBAC resources and Kernel Image Puller and you can deploy Enterprise Gateway. ## Prerequisites - Install and configure [kubectl](https://kubernetes.io/docs/tasks/tools/) and [helm3](https://helm.sh/docs/intro/install/) on your workstation. - Create the kubernetes namespace where you want to deploy Enterprise Gateway, for example: ```sh kubectl create namespace enterprise-gateway ``` - If you use RBAC, you will need cluster Admin access to configure RBAC resources - If you plan to use Private docker registry, you will need to have credentials (see configuration steps below) Once the Kubernetes cluster is configured and `kubectl` is demonstrated to be working, it is time to deploy Enterprise Gateway. There are a couple of different deployment options - using helm or kubectl. ## Deploying with helm Choose this option if you want to deploy via a [helm](https://helm.sh/) chart. You can customize your deployment using value files - review the configuration section below for details. ### Create the Enterprise Gateway kubernetes service and deployment You can execute the helm command from the checked-out release of the Enterprise Gateway git [repository](https://github.com/jupyter-server/enterprise_gateway.git): ```bash helm upgrade --install enterprise-gateway \ etc/kubernetes/helm/enterprise-gateway \ --kube-context [mycluster-context-name] \ --namespace [namespace-name] ``` Alternatively, the helm chart tarball is also accessible as an asset on our [release](https://github.com/jupyter-server/enterprise_gateway/releases) page, replace \[VERSION\] with specific release version you want to use: ```bash helm upgrade --install enterprise-gateway \ https://github.com/jupyter-server/enterprise_gateway/releases/download/v[VERSION]/jupyter_enterprise_gateway_helm-[VERSION].tar.gz \ --kube-context [mycluster-context-name] \ --namespace [namespace-name] ``` ### Access to Enterprise Gateway from outside the cluster Take a look at the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster-services/#ways-to-connect) on how you can access the Kubernetes service from outside the cluster. A Kubernetes Ingress is the most user-friendly way of interacting with the service and that is what we will cover in this section. If you do not have a Kubernetes Ingress configured on your cluster the easiest way to get access will be using the NodePort service. #### Kubernetes Ingress Setup ##### Prerequisites - Ingress controller deployed on your Kubernetes cluster. Review the Kubernetes [documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) for available options. - Wildcard DNS record is configured to point to the IP of the LoadBalancer, which frontends your ingress controller - Review specific Ingress controller configuration to enable wildcard path support if you are using Kubernetes version \< v1.18 - With Kubernetes v1.18 Ingress uses `PathType` parameter which is set to `Prefix` in the helm chart by default, so no additional configuration is required - Refer to your ingress controller documentation on how to set up TLS with your ingress ##### Update Helm deployment to enable ingress Create file `values-ingress.yaml` with the following content: ```bash ingress: enabled: true # Ingress resource host hostName: "[unique-fully-qualified-domain-name]" ``` Add this file to your helm command and apply to the cluster replacing \[PLACEHOLDER\] with appropriate values for your environment: ```bash helm upgrade --install enterprise-gateway \ etc/kubernetes/helm/enterprise-gateway \ --kube-context [mycluster-context-name] \ --namespace [namespace-name] \ -f values-ingress.yaml ``` ## Basic Full Configuration Example of Enterprise Gateway Deployment ### Option 1. Use Kubernetes Ingress Create file `values-full.yaml` with the following content: ```bash service: type: "ClusterIP" ports: # The primary port on which Enterprise Gateway is servicing requests. - name: "http" port: 8888 targetPort: 8888 # The port on which Enterprise Gateway will receive kernel connection info responses. - name: "http-response" port: 8877 targetPort: 8877 deployment: # Update CPU/Memory as needed resources: limits: cpu: 2 memory: 10Gi requests: cpu: 1 memory: 2Gi # Update to deploy multiple replicas of EG. replicas: 2 # Give Enteprise Gateway some time to gracefully shutdown terminationGracePeriodSeconds: 60 kip: enabled: false # turn this off, if running DaemonSets is restricted by your cluster Administrator ingress: enabled: true # Ingress resource host hostName: "[unique-fully-qualified-domain-name]" ``` ### Option 2. Use NodePort Service Create file `values-full.yaml` with the following content, you can set the node port value or have Kubernetes allocate a random port: ```bash service: type: "NodePort" ports: # The primary port on which Enterprise Gateway is servicing requests. - name: "http" port: 8888 targetPort: 8888 # nodePort: 32652 # optional nodePort # The port on which Enterprise Gateway will receive kernel connection info responses. - name: "http-response" port: 8877 targetPort: 8877 # nodePort: 30481 # optional nodePort deployment: # Update CPU/Memory as needed resources: limits: cpu: 2 memory: 10Gi requests: cpu: 1 memory: 2Gi # Update to deploy multiple replicas of EG. replicas: 2 # Give Enteprise Gateway some time to gracefully shutdown terminationGracePeriodSeconds: 60 kip: enabled: false # turn this off, if running DaemonSets is restricted by your cluster Administrator ingress: enabled: false ``` ### Option 3. Use NodePort Service with Private Docker Registry Create file `values-full.yaml` with the following content, you can set the node port value or have Kubernetes allocate a random port: ```bash global: # Create RBAC resources rbac: true # ImagePullSecrets for a ServiceAccount, list of secrets in the same namespace # to use for pulling any images in pods that reference this ServiceAccount. # Must be set for any cluster configured with private docker registry. imagePullSecrets: - private-registry-key # provide the name of the secret to use # You can optionally create imagePull Secrets imagePullSecretsCreate: enabled: false annotations: {} # this annotation allows you to keep the secret even if the helm release is deleted # "helm.sh/resource-policy": "keep" secrets: - private-registry-key # provide the name of the secret to create service: type: "NodePort" ports: # The primary port on which Enterprise Gateway is servicing requests. - name: "http" port: 8888 targetPort: 8888 # nodePort: 32652 # optional nodePort # The port on which Enterprise Gateway will receive kernel connection info responses. - name: "http-response" port: 8877 targetPort: 8877 # nodePort: 30481 # optional nodePort # Enterprise Gateway image name and tag to use from private registry. image: private.io/elyra/enterprise-gateway:dev deployment: # Update CPU/Memory as needed resources: limits: cpu: 2 memory: 10Gi requests: cpu: 1 memory: 2Gi # Update to deploy multiple replicas of EG. replicas: 2 # Give Enteprise Gateway some time to gracefully shutdown terminationGracePeriodSeconds: 60 kip: enabled: false # turn this off, if running DaemonSets is restricted by your cluster Administrator # Kernel Image Puller image name and tag to use from private registry. image: private.io/elyra/kernel-image-puller:dev ingress: enabled: false ``` ### Deploy with helm Add values file to your helm command and apply to the cluster replacing \[PLACEHOLDER\] with appropriate values for your environment: ```bash helm upgrade --install enterprise-gateway etc/kubernetes/helm/enterprise-gateway \ --kube-context [mycluster-context-name] \ --namespace [namespace-name] \ -f values-full.yaml ``` if you are using private registry add setting base64 encoded secret value to you command: `--set imagePullSecretsCreate.secrets[0].data="UHJvZCBTZWNyZXQgSW5mb3JtYXRpb24K"` ### Deploy with kubectl Choose this deployment option if you want to deploy directly from Kubernetes template files with kubectl, rather than using a package manager like helm. Add values file to your helm command and generate `yaml` files replacing \[PLACEHOLDER\] with appropriate values for your environment: ```bash helm template \ --output-dir [/tmp/mydeployment] \ enterprise-gateway \ etc/kubernetes/helm/enterprise-gateway \ --namespace [namespace-name] \ -f values-full.yaml ``` if you are using private registry add setting base64 encoded secret value to you command: `--set imagePullSecretsCreate.secrets[0].data="UHJvZCBTZWNyZXQgSW5mb3JtYXRpb24K"` Now you can review generated `yaml` files and apply them to your Kubernetes cluster: ```bash kubectl apply -f /tmp/mydeployment/enterprise-gateway/templates/ ``` ```{important} Never store secrets in your source code control files! ``` ### Validation You can start jupyter notebook to connect to the configured endpoint `http://jupyter-e-gw.example.com` ## Advanced Configuration Example of Enterprise Gateway Deployment If you need to deploy Enterprise Gateway to a restricted Kubernetes cluster with _RBAC_ and _PodSecurityPolicies_ enabled, you may want to consider deploying Enterprise Gateway components as separate helm releases: ### 1. Helm release which will configure required RBAC, PSP, and service accounts - Typically, this will be done by the Cluster Administrator. Create `values-rbac.yaml` file with the following content: ```bash global: # Create RBAC resources rbac: true serviceAccountName: 'enterprise-gateway-sa' deployment: enabled: false ingress: enabled: false kip: enabled: false serviceAccountName: 'kernel-image-puller-sa' podSecurityPolicy: create: true ``` Run helm deploy: ```bash helm upgrade --install enterprise-gateway \ etc/kubernetes/helm/enterprise-gateway \ --kube-context [mycluster-context-name] \ --namespace [namespace-name] \ -f values-rbac.yaml ``` ### 2. Helm release to deploy Kernel Image Puller - Typically, this will be done by the Cluster Administrator. Create `values-kip.yaml` file with the following content: ```bash global: # Create RBAC resources rbac: true deployment: enabled: false ingress: enabled: false # Kernel Image Puller (daemonset) kip: enabled: true serviceAccountName: 'kernel-image-puller-sa' podSecurityPolicy: create: false resources: limits: cpu: 100m memory: 200Mi requests: cpu: 50m memory: 100Mi ``` Run helm deploy: ```bash helm upgrade --install enterprise-gateway \ etc/kubernetes/helm/enterprise-gateway \ --kube-context [mycluster-context-name] \ --namespace [namespace-name] \ -f values-kip.yaml ``` ### 3. Helm release to deploy Enterprise Gateway - This can be done by namespace Administrator. Create `values-eg.yaml` file with the following content: ```bash global: rbac: false service: type: "ClusterIP" ports: # The primary port on which Enterprise Gateway is servicing requests. - name: "http" port: 8888 targetPort: 8888 # nodePort: 32652 # optional nodePort # The port on which Enterprise Gateway will receive kernel connection info responses. - name: "http-response" port: 8877 targetPort: 8877 # nodePort: 30481 # optional nodePort deployment: enabled: true resources: limits: cpu: 2 memory: 10Gi requests: cpu: 500m memory: 2Gi # Update to deploy multiple replicas of EG. replicas: 1 # Give Enteprise Gateway some time to gracefully shutdown terminationGracePeriodSeconds: 60 ingress: enabled: true # Ingress resource host hostName: "[unique-fully-qualified-domain-name]" kip: enabled: false ``` Run helm deploy: ```bash helm upgrade --install enterprise-gateway \ etc/kubernetes/helm/enterprise-gateway \ --kube-context [mycluster-context-name] \ --namespace [namespace-name] \ -f values-eg.yaml ``` ## Helm Configuration Parameters Here are the values that you can set when deploying the helm chart. You can override them with helm's `--set` or `--values` options. Always use `--set` to configure secrets. | **Parameter** | **Description** | **Default** | | ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | | `global.rbac` | Create Kubernetes RBAC resources | `true` | | `global.commonLabels` | Common labels to apply to daemonset and deployment resources | `{}` | | `global.imagePullSecrets` | Optional array of image pull secrets for Service Account for pulling images from private service registries | \[\] | | `imagePullSecretsCreate.enabled` | Optional enable creation of the Kubernetes secrets to access private registries. | 'false' | | `imagePullSecretsCreate.annotations` | Annotations for Kubernetes secrets | '{}' | | `imagePullSecretsCreate.secrets` | Array of Kubernetes secrets to create with the following structure: `name` - secret name and `data` - base64 encoded Secret value. Example: `{ name: "myregistrykey", data: "SGVsbG8gc2VjcmV0Cg==" }` | '\[\]' | | `image` | Enterprise Gateway image name and tag to use. Ensure the tag is updated to the version of Enterprise Gateway you wish to run. | `elyra/enterprise-gateway:VERSION`, where `VERSION` is the release being used | | `imagePullPolicy` | Enterprise Gateway image pull policy. Use `IfNotPresent` policy so that dev-based systems don't automatically update. This provides more control. Since formal tags will be release-specific this policy should be sufficient for them as well. | `IfNotPresent` | | `service.type` | Kubernetes Service Type - Nodeport,ClusterIP,LoadBalancer | `ClusterIP` | | `service.externalIPs.k8sMasterPublicIP` | Master public IP on which to expose EG. | nil | | `service.ports` | An array of service ports for Kubernetes Service | see below | | `service.ports[0].name` | The primary port name for Enterprise Gateway is servicing requests. | `http` | | `service.ports[0].port` | The primary port on which Enterprise Gateway is servicing requests. | `8888` | | `service.ports[1].name` | The port name on which Enterprise Gateway will receive kernel connection info responses. | `http-response` | | `service.ports[1].port` | The port on which Enterprise Gateway will receive kernel connection info responses. | `8877` | | `deployment.enabled` | flag to enable run Enterprise Gateway deployment | `true` | | `deployment.serviceAccountName` | Kubernetes Service Account to run Enterprise Gateway | `enterprise-gateway-sa` | | `deployment.tolerations` | Kubernetes tolerations for Enterprise Gateway pods to ensure that pods are not scheduled onto inappropriate nodes | `[]` | | `deployment.affinity` | Kubernetes affinity for Enterprise Gateway pods to keep pods scheduled onto appropriate nodes | `{}` | | `deployment.nodeSelector` | Kubernetes nodeselector for Enterprise Gateway pods to keep pods scheduled onto appropriate nodes - simpler alternative to tolerations and affinity | `{}` | | `deployment.terminationGracePeriodSeconds` | Time to wait for Enterprise Gateway to gracefully shutdown. | `30` | | `deployment.resources` | set Enterprise Gateway container resources. | valid Yaml resources, see values file for example | | `deployment.replicas` | Update to deploy multiple replicas of EG. | `1` | | `deployment.extraEnv` | Additional environment variables to set for Enterprise Gateway. | `{}` | | `logLevel` | Log output level. | `DEBUG` | | `mirrorWorkingDirs` | Whether to mirror working directories. NOTE: This requires appropriate volume mounts to make notebook dir accessible. | `false` | | `authToken` | Optional authorization token passed in all requests (see --EnterpriseGatewayApp.auth_token) | `nil` | | `kernel.clusterRole` | Kernel cluster role created by this chart. Used if no KERNEL_NAMESPACE is provided by client. | `kernel-controller` | | `kernel.shareGatewayNamespace` | Will start kernels in the same namespace as EG if True. | `false` | | `kernel.defaultServiceAccountName` | Service account name to use for kernel pods when no service account is specified. This service account should exist in the namespace where kernel pods are launched. | `default` | | `kernel.launchTimeout` | Timeout for kernel launching in seconds. | `60` | | `kernel.cullIdleTimeout` | Idle timeout in seconds. Default is 1 hour. | `3600` | | `kernel.cullConnected` | Whether to cull idle kernels that still have clients connected. | `false` | | `kernel.allowedKernels` | List of kernel names that are available for use. | `{r_kubernetes,...}` (see `values.yaml`) | | `kernel.defaultKernelName` | Default kernel name should be something from the allowedKernels | `python_kubernetes` | | `kernelspecs.image` | Optional custom data image containing kernelspecs to use. Cannot be used with NFS enabled. | `nil` | | `kernelspecs.imagePullPolicy` | Kernelspecs image pull policy. | `Always` | | `nfs.enabled` | Whether NFS-mounted kernelspecs are enabled. Cannot be used with `kernelspecs.image` set. | `false` | | `nfs.internalServerIPAddress` | IP address of NFS server. Required if NFS is enabled. | `nil` | | `nfs.internalServerIPAddress` | IP address of NFS server. Required if NFS is enabled. | `nil` | | `kernelspecsPvc.enabled` | Use a persistent volume claim to store kernelspecs in a persistent volume | `false` | | `kernelspecsPvc.name` | PVC name. Required if want mount kernelspecs without nfs. PVC should create in the same namespace before EG deployed. | `nil` | | `ingress.enabled` | Whether to include an EG ingress resource during deployment. | `false` | | `ingress.ingressClassName` | Specify a Kubernetes ingress class name for enterprise gateway deployment ingress deployment. | `""` | | `ingress.hostName` | Kubernetes Ingress hostname, required. . | nil | | `ingress.pathType` | Kubernetes Ingress PathType (`ImplementationSpecific`,`Prefix`). | `Prefix` | | `ingress.path` | Kubernetes Ingress Path. | `/` | | `ingress.annotations` | Use annotations to configure ingress. See examples for Traefik and nginx. NOTE: A traefik or nginx controller must be installed and `ingress.enabled` must be set to `true`. | see values file for examples | | `kip.enabled` | Whether the Kernel Image Puller should be used | `true` | | `kip.podSecurityPolicy.create` | enable creation of PSP for Image Puller, requires `global.rbac: true` and non-empy KIP service account | `false` | | `kip.podSecurityPolicy.annotatons` | annotations for Image Puller PSP account | `{}` | | `kip.tolerations` | Kubernetes tolerations for Kernel Image Puller pods to ensure that pods are not scheduled onto inappropriate nodes | `[]` | | `kip.affinity` | Kubernetes affinity for Kernel Image Puller pods to keep pods scheduled onto appropriate nodes | `{}` | | `kip.nodeSelector` | Kubernetes nodeselector for Kernel Image Puller pods to keep pods scheduled onto appropriate nodes - simpler alternative to tolerations and affinity | `{}` | | `kip.serviceAccountName` | Kubernetes Service Account to run Kernel Image Puller Gateway | `kernel-image-puller-sa` | | `kip.resources` | set Kernel Image Puller container resources. | valid Yaml resources, see values file for example | | `kip.image` | Kernel Image Puller image name and tag to use. Ensure the tag is updated to the version of the Enterprise Gateway release you wish to run. | `elyra/kernel-image-puller:VERSION`, where `VERSION` is the release being used | | `kip.imagePullPolicy` | Kernel Image Puller image pull policy. Use `IfNotPresent` policy so that dev-based systems don't automatically update. This provides more control. Since formal tags will be release-specific this policy should be sufficient for them as well. | `IfNotPresent` | | `kip.interval` | The interval (in seconds) at which the Kernel Image Puller fetches kernelspecs to pull kernel images. | `300` | | `kip.pullPolicy` | Determines whether the Kernel Image Puller will pull kernel images it has previously pulled (`Always`) or only those it hasn't yet pulled (`IfNotPresent`) | `IfNotPresent` | | `kip.criSocket` | The container runtime interface socket, use `/run/containerd/containerd.sock` for containerd installations | `/var/run/docker.sock` | | `kip.defaultContainerRegistry` | Prefix to use if a registry is not already specified on image name (e.g., elyra/kernel-py:VERSION) | `docker.io` | | `kip.fetcher` | fetcher to fetch image names, defaults to KernelSpecsFetcher | `KernelSpecsFetcher` | | `kip.images` | if StaticListFetcher is used KIP_IMAGES defines the list of images pullers will fetch | `[]` | | `kip.internalFetcher ` | if CombinedImagesFetcher is used KIP_INTERNAL_FETCHERS defines the fetchers that get used internally | `KernelSpecsFetcher` | ## Uninstalling Enterprise Gateway When using helm, you can uninstall Enterprise Gateway with the following command: ```bash helm uninstall enterprise-gateway \ --kube-context [mycluster-context-name] \ --namespace [namespace-name] ``` ## Enterprise Gateway Deployment Details Enterprise Gateway is deployed as a Kubernetes deployment and exposed by a Kubernetes service. It can be accessed by the service name `enterprise-gateway` within the cluster. In addition, all objects related to Enterprise Gateway, including kernel instances, have the kubernetes label of `app=enterprise-gateway` applied. The Enterprise Gateway Kubernetes service _type_ can be: - `NodePort`: allows to access Enterprise Gateway with `http://[worker IP]:[NodePort]` or having a load balancer route traffic to `http://[worker IP's]:[NodePort]` - `LoadBalancer`: requires appropriate network plugin available - `ClusterIP`: requires Kubernetes Ingress Controller Kernels are stateful, therefore service is configured with a `sessionAffinity` of `ClientIP`. As a result, kernel creation requests will be routed to the same pod. Increase the number of `replicas` of Enterprise Gateway Deployment to improve deployment availability, but because `sessionAffinity` of `ClientIP`, traffic from the same client will be sent to the same pod of the Enterprise Gateway and if that pod goes down, client will get an error and will need to reestablish connection to another pod of the Enterprise Gateway. ### Namespaces A best practice for Kubernetes applications running in an enterprise is to isolate applications via namespaces. Since Enterprise Gateway also requires isolation at the kernel level, it makes sense to use a namespace for each kernel, by default. The primary namespace is created prior to the initial Helm deployment (e.g., `enterprise-gateway`). This value is communicated to Enterprise Gateway via the env variable `EG_NAMESPACE`. All Enterprise Gateway components reside in this namespace. By default, kernel namespaces are created when the respective kernel is launched. At that time, the kernel namespace name is computed from the kernel username (`KERNEL_USERNAME`) and its kernel ID (`KERNEL_ID`) just like the kernel pod name. Upon a kernel's termination, this namespace - provided it was created by Enterprise Gateway - will be deleted. Installations wishing to pre-create the kernel namespace can do so by conveying the name of the kernel namespace via `KERNEL_NAMESPACE` in the `env` portion of the kernel creation request. (They must also provide the namespace's service account name via `KERNEL_SERVICE_ACCOUNT_NAME` - see next section.) When `KERNEL_NAMESPACE` is set, Enterprise Gateway will not attempt to create a kernel-specific namespace, nor will it attempt its deletion. As a result, kernel namespace lifecycle management is the user's responsibility. ```{tip} If you need to associate resources to users, one suggestion is to create a namespace per user and set `KERNEL_NAMESPACE = KERNEL_USERNAME` on the client (see [Kernel Environment Variables](../users/kernel-envs.md)). ``` Although **not recommended**, installations requiring everything in the same namespace - Enterprise Gateway and all its kernels - can do so by setting the helm chart value `kernel.shareGatewayNamespace` to `true` - which is then set into the `EG_SHARED_NAMESPACE` env. When set, all kernels will run in the Enterprise Gateway namespace, essentially eliminating all aspects of isolation between kernel instances (and resources). ### Role-Based Access Control (RBAC) Another best practice of Kubernetes applications is to define the minimally viable set of permissions for the application. Enterprise Gateway does this by defining role-based access control (RBAC) objects for both Enterprise Gateway and kernels. Because the Enterprise Gateway pod must create kernel namespaces, pods, services (for Spark support) and role bindings, a cluster-scoped role binding is required. The cluster role binding `enterprise-gateway-controller` also references the subject, `enterprise-gateway-sa`, which is the service account associated with the Enterprise Gateway namespace and also created by [eg-clusterrolebinding.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kubernetes/helm/enterprise-gateway/templates/eg-clusterrolebinding.yaml)). The [`eg-clusterrole.yaml`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kubernetes/helm/enterprise-gateway/templates/eg-clusterrole.yaml) defines the minimally viable roles for a kernel pod - most of which are required for Spark support. Since kernels, by default, reside within their own namespace created upon their launch, a cluster role is used within a namespace-scoped role binding created when the kernel's namespace is created. The name of the kernel cluster role is `kernel-controller` and, when Enterprise Gateway creates the namespace and role binding, is also the name of the role binding instance. #### Kernel Service Account Name As noted above, installations wishing to pre-create their own kernel namespaces should provide the name of the service account associated with the namespace via `KERNEL_SERVICE_ACCOUNT_NAME` in the `env` portion of the kernel creation request (along with `KERNEL_NAMESPACE`). If not provided, the built-in namespace service account, `default`, will be referenced. In such circumstances, Enterprise Gateway will **not** create a role binding on the name for the service account, so it is the user's responsibility to ensure that the service account has the capability to perform equivalent operations as defined by the `kernel-controller` role. #### Example Custom Namespace Here's an example of the creation of a custom namespace (`kernel-ns`) with its own service account (`kernel-sa`) and role binding (`kernel-controller`) that references the cluster-scoped role (`kernel-controller`) and includes appropriate labels to help with administration and analysis: ```yaml apiVersion: v1 kind: Namespace metadata: name: kernel-ns labels: app: enterprise-gateway component: kernel --- apiVersion: v1 kind: ServiceAccount metadata: name: kernel-sa namespace: kernel-ns labels: app: enterprise-gateway component: kernel --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: kernel-controller namespace: kernel-ns labels: app: enterprise-gateway component: kernel subjects: - kind: ServiceAccount name: kernel-sa namespace: kernel-ns roleRef: kind: ClusterRole name: kernel-controller apiGroup: rbac.authorization.k8s.io ``` ### Kernel Image Puller Kernels docker images can be big and their download from a container repository (e.g., docker.io or quay.io), which may cause slow kernel pod startup whenever the kernel image is first accessed on any given node. To mitigate this issue, Enterprise Gateway deployment includes `kernel-image-puller` or `KIP` Kubernetes DaemonSet. This DaemonSet is responsible for polling Enterprise Gateway for the current set of configured kernelspecs, picking out any configured image name references, and pulling those images to the node on which KIP is running. Because it's a daemon set, this will also address the case when new nodes are added to a configuration (although spinning up new nodes on a kernel start request will likely time out anyway). #### KIP Configuration `KIP` is using same kubernetes Service Account as Enterprise Gateway itself, so it will use same credentials to access private docker registry - see helm configuration section for details. `KIP_INTERVAL` - The Kernel Image Puller can be configured for the interval at which it checks for new kernelspecs `KIP_NUM_PULLERS`- the number of puller threads it will utilize per node () `KIP_NUM_RETRIES` - the number of retries it will attempt for a given image (), `KIP_PULL_POLICY` - and the pull policy () - which essentially dictates whether it will attempt to pull images that its already encountered (`Always`) vs. only pulling the image if it hasn't seen it yet (`IfNotPresent`). If the Enterprise Gateway defines an authentication token (`EG_AUTH_TOKEN`) then that same token should be configured here as (`KIP_AUTH_TOKEN`) so that the puller can correctly authenticate its requests. #### KIP Container Runtime The Kernel Image Puller also supports multiple container runtimes since Docker is no longer configured by default in Kubernetes. KIP currently supports Docker and Containerd runtimes. If another runtime is encountered, KIP will try to proceed using the Containerd client `crictl` against the configured socket. As a result, it is import that the `criSocket` value be appropriately configured relative to the container runtime. If the runtime is something other than Docker or Containerd and `crictl` isn't able to pull images, it may be necessary to manually pre-seed images or incur kernel start timeouts the first time a given node is asked to start a kernel associated with a non-resident image. KIP also supports the notion of a _default container registry_ whereby image names that do not specify a registry (e.g., `docker.io` or `quay.io`) KIP will apply the configured default. Ideally, the image name should be fully qualified. ### Kernelspec Modifications One of the more common areas of customization we see occurs within the kernelspec files located in `/usr/local/share/jupyter/kernels`. To accommodate the ability to customize the kernel definitions, you have two different options: NFS mounts, or custom container images. The two options are mutually exclusive, because they mount kernelspecs into the same location in the Enterprise Gateway pod. #### Via NFS The kernels directory can be mounted as an NFS volume into the Enterprise Gateway pod, thereby making the kernelspecs available to all EG pods within the Kubernetes cluster (provided the NFS mounts exist on all applicable nodes). As an example, we have included the necessary entries for mounting an existing NFS mount point into the Enterprise Gateway pod. By default, these references are commented out as they require the operator to configure the appropriate NFS mounts and server IP. If you are deploying Enterprise Gateway via the helm chart, you can enable NFS directly via helm values. Here you can see how `deployment.yaml` references use of the volume (ia `volumeMounts` for the container specification and `volumes` in the pod specification (non-applicable entries have been omitted): ```yaml spec: containers: # Uncomment to enable NFS-mounted kernelspecs volumeMounts: - name: kernelspecs mountPath: '/usr/local/share/jupyter/kernels' volumes: - name: kernelspecs nfs: server: path: '/usr/local/share/jupyter/kernels' ``` ```{tip} Because the kernel pod definition file, [kernel-pod.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2), resides in the kernelspecs hierarchy, customizations to the deployments of future kernel instances can now also take place. In addition, these same entries can be added to the kernel-pod.yaml definitions if access to the same or other NFS mount points are desired within kernel pods. (We'll be looking at ways to make modifications to per-kernel configurations more manageable.) ``` Use of more formal persistent volume types must include the [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes) and corresponding Persistent Volume Claim stanzas. #### Via Custom Container Image If you are deploying Enterprise Gateway via the helm chart, then instead of using NFS, you can build your custom kernelspecs into a container image that Enterprise Gateway consumes. Here's an example Dockerfile for such a container: ``` FROM alpine:3.9 COPY kernels /kernels ``` This assumes that your source contains a `kernels/` directory with all the kernelspecs you'd like to end up in the image, e.g. `kernels/python_kubernetes/kernel.json` and any associated files. Once you build your custom kernelspecs image and push it to a container registry, you can refer to it from your helm deployment. For instance: ```bash helm upgrade --install --atomic --namespace enterprise-gateway enterprise-gateway etc/kubernetes/helm --set kernelspecs.image=your-custom-image:latest ``` ...where `your-custom-image:latest` is the image name and tag of your kernelspecs image. Once deployed, the helm chart copies the data from the `/kernels` directory of your container into the `/usr/local/share/jupyter/kernels` directory of the Enterprise Gateway pod. Note that when this happens, the built-in kernelspecs are no longer available. So include all kernelspecs that you want to be available in your container image. Also, you should update the helm chart `kernel.allowedKernels` (or usually comprehended as kernel whitelist) value with the name(s) of your custom kernelspecs. ## Kubernetes Kernel Instances There are essentially two kinds of kernels (independent of language) launched within an Enterprise Gateway Kubernetes cluster - _vanilla_ and _spark-on-kubernetes_ (if available). When _vanilla_ kernels are launched, Enterprise Gateway is responsible for creating the corresponding pod. On the other hand, _spark-on-kubernetes_ kernels are launched via `spark-submit` with a specific `master` URI - which then creates the corresponding pod(s) (including executor pods). Images can be launched using both forms provided they have the appropriate support for Spark installed. Here's the yaml configuration used when _vanilla_ kernels are launched. As noted in the `KubernetesProcessProxy` section below, this file ([kernel-pod.yaml.j2](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2)) serves as a template where each of the tags surrounded with `{{` and `}}` represent variables that are substituted at the time of the kernel's launch. All `{{ kernel_xxx }}` parameters correspond to `KERNEL_XXX` environment variables that can be specified from the client in the kernel creation request's json body. ```yaml+jinja apiVersion: v1 kind: Pod metadata: name: "{{ kernel_pod_name }}" namespace: "{{ kernel_namespace }}" labels: kernel_id: "{{ kernel_id }}" app: enterprise-gateway component: kernel spec: restartPolicy: Never serviceAccountName: "{{ kernel_service_account_name }}" {% if kernel_uid is defined or kernel_gid is defined %} securityContext: {% if kernel_uid is defined %} runAsUser: {{ kernel_uid | int }} {% endif %} {% if kernel_gid is defined %} runAsGroup: {{ kernel_gid | int }} {% endif %} fsGroup: 100 {% endif %} containers: - env: - name: EG_RESPONSE_ADDRESS value: "{{ eg_response_address }}" - name: EG_PUBLIC_KEY value: "{{ eg_public_key }}" - name: KERNEL_LANGUAGE value: "{{ kernel_language }}" - name: KERNEL_SPARK_CONTEXT_INIT_MODE value: "{{ kernel_spark_context_init_mode }}" - name: KERNEL_NAME value: "{{ kernel_name }}" - name: KERNEL_USERNAME value: "{{ kernel_username }}" - name: KERNEL_ID value: "{{ kernel_id }}" - name: KERNEL_NAMESPACE value: "{{ kernel_namespace }}" image: "{{ kernel_image }}" name: "{{ kernel_pod_name }}" ``` There are a number of items worth noting: 1. Kernel pods can be identified in three ways using `kubectl`: 1. By the global label `app=enterprise-gateway` - useful when needing to identify all related objects (e.g., `kubectl get all -l app=enterprise-gateway`) 1. By the _kernel_id_ label `kernel_id=` - useful when only needing specifics about a given kernel. This label is used internally by enterprise-gateway when performing its discovery and lifecycle management operations. 1. By the _component_ label `component=kernel` - useful when needing to identity only kernels and not other enterprise-gateway components. (Note, the latter can be isolated via `component=enterprise-gateway`.) Note that since kernels run in isolated namespaces by default, it's often helpful to include the clause `--all-namespaces` on commands that will span namespaces. To isolate commands to a given namespace, you'll need to add the namespace clause `--namespace `. 1. Each kernel pod is named by the invoking user (via the `KERNEL_USERNAME` env) and its kernel_id (env `KERNEL_ID`). This identifier also applies to those kernels launched within `spark-on-kubernetes`. 1. Kernel pods use the specified `securityContext`. If env `KERNEL_UID` is not specified in the kernel creation request a default value of `1000` (the jovyan user) will be used. Similarly, for `KERNEL_GID`, whose default is `100` (the users group). In addition, Enterprise Gateway enforces a list of prohibited UID and GID values. By default, this list is initialized to the 0 (root) UID and GID. Administrators can configure the `EG_PROHIBITED_UIDS` and `EG_PROHIBITED_GIDS` environment variables via the `deployment.yaml` file with comma-separated values to alter the set of user and group ids to be prevented. 1. As noted above, if `KERNEL_NAMESPACE` is not provided in the request, Enterprise Gateway will create a namespace using the same naming algorithm for the pod. In addition, the `kernel-controller` cluster role will be bound to a namespace-scoped role binding of the same name using the namespace's default service account as its subject. Users wishing to use their own kernel namespaces must provide **both** `KERNEL_NAMESPACE` and `KERNEL_SERVICE_ACCOUNT_NAME` as these are both used in the `kernel-pod.yaml.j2` as `{{ kernel_namespace }}` and `{{ kernel_service_account_name }}`, respectively. 1. Kernel pods have restart policies of `Never`. This is because the Jupyter framework already has built-in logic for auto-restarting failed kernels and any other restart policy would likely interfere with the built-in behaviors. 1. The parameters to the launcher that is built into the image are communicated via environment variables as noted in the `env:` section above. ## Unconditional Volume Mounts Unconditional volume mounts can be added in the `kernel-pod.yaml.j2` template. An example of these unconditional volume mounts can be found when extending docker shared memory. For some I/O jobs the pod will need more than the default `64mb` of shared memory on the `/dev/shm` path. ```yaml+jinja volumeMounts: # Define any "unconditional" mounts here, followed by "conditional" mounts that vary per client {% if kernel_volume_mounts %} {% for volume_mount in kernel_volume_mounts %} - {{ volume_mount }} {% endfor %} {% endif %} volumes: # Define any "unconditional" volumes here, followed by "conditional" volumes that vary per client {% if kernel_volumes %} {% for volume in kernel_volumes %} - {{ volume }} {% endfor %} {% endif %} ``` The conditional volumes are handled by the loops inside the yaml file. Any unconditional volumes can be added before these conditions. In the scenario where the `/dev/shm` will need to be expanded the following mount has to be added. ```yaml+jinja volumeMounts: # Define any "unconditional" mounts here, followed by "conditional" mounts that vary per client - mountPath: /dev/shm name: dshm {% if kernel_volume_mounts %} {% for volume_mount in kernel_volume_mounts %} - {{ volume_mount }} {% endfor %} {% endif %} volumes: # Define any "unconditional" volumes here, followed by "conditional" volumes that vary per client - name: dshm emptyDir: medium: Memory {% if kernel_volumes %} {% for volume in kernel_volumes %} - {{ volume }} {% endfor %} {% endif %} ``` ## Kubernetes Resource Quotas When deploying kernels on a Kubernetes cluster a best practice is to define request and limit quotas for CPUs, GPUs, and Memory. These quotas can be defined from the client via KERNEL\_-prefixed environment variables which are passed through to the kernel at startup. - `KERNEL_CPUS` - CPU Request by Kernel - `KERNEL_MEMORY` - MEMORY Request by Kernel - `KERNEL_GPUS` - GPUS Request by Kernel - `KERNEL_CPUS_LIMIT` - CPU Limit - `KERNEL_MEMORY_LIMIT` - MEMORY Limit - `KERNEL_GPUS_LIMIT` - GPUS Limit Memory and CPU units are based on the [Kubernetes Official Documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) while GPU is using the NVIDIA `nvidia.com/gpu` parameter. The desired units should be included in the variable's value. When defined, these variables are then substituted into the appropriate location of the corresponding kernel-pod.yaml.j2 template. ```yaml+jinja {% if kernel_cpus is defined or kernel_memory is defined or kernel_gpus is defined or kernel_cpus_limit is defined or kernel_memory_limit is defined or kernel_gpus_limit is defined %} resources: {% if kernel_cpus is defined or kernel_memory is defined or kernel_gpus is defined %} requests: {% if kernel_cpus is defined %} cpu: "{{ kernel_cpus }}" {% endif %} {% if kernel_memory is defined %} memory: "{{ kernel_memory }}" {% endif %} {% if kernel_gpus is defined %} nvidia.com/gpu: "{{ kernel_gpus }}" {% endif %} {% endif %} {% if kernel_cpus_limit is defined or kernel_memory_limit is defined or kernel_gpus_limit is defined %} limits: {% if kernel_cpus_limit is defined %} cpu: "{{ kernel_cpus_limit }}" {% endif %} {% if kernel_memory_limit is defined %} memory: "{{ kernel_memory_limit }}" {% endif %} {% if kernel_gpus_limit is defined %} nvidia.com/gpu: "{{ kernel_gpus_limit }}" {% endif %} {% endif %} {% endif %} ``` ## KubernetesProcessProxy To indicate that a given kernel should be launched into a Kubernetes configuration, the kernel.json file's `metadata` stanza must include a `process_proxy` stanza indicating a `class_name:` of `KubernetesProcessProxy`. This ensures the appropriate lifecycle management will take place relative to a Kubernetes environment. Along with the `class_name:` entry, this process proxy stanza should also include a proxy configuration stanza which specifies the container image to associate with the kernel's pod. If this entry is not provided, the Enterprise Gateway implementation will use a default entry of `elyra/kernel-py:VERSION`. In either case, this value is made available to the rest of the parameters used to launch the kernel by way of an environment variable: `KERNEL_IMAGE`. _(Please note that the use of `VERSION` in docker image tags is a placeholder for the appropriate version-related image tag. When kernelspecs are built via the Enterprise Gateway Makefile, `VERSION` is replaced with the appropriate version denoting the target release. A full list of available image tags can be found in the dockerhub repository corresponding to each image.)_ ```json { "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy", "config": { "image_name": "elyra/kernel-py:VERSION" } } } } ``` As always, kernels are launched by virtue of the `argv:` stanza in their respective kernel.json files. However, when launching _vanilla_ kernels in a kubernetes environment, what gets invoked isn't the kernel's launcher, but, instead, a python script that is responsible for using the [Kubernetes Python API](https://github.com/kubernetes-client/python) to create the corresponding pod instance. The pod is _configured_ by applying the values to each of the substitution parameters into the [kernel-pod.yaml](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2) file previously displayed. This file resides in the same `scripts` directory as the kubernetes launch script - `launch_kubernetes.py` - which is referenced by the kernel.json's `argv:` stanza: ```json { "argv": [ "python", "/usr/local/share/jupyter/kernels/python_kubernetes/scripts/launch_kubernetes.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ``` By default, _vanilla_ kernels use a value of `none` for the spark context initialization mode so no context will be created automatically. When the kernel is intended to target _Spark-on-kubernetes_, its launch is very much like kernels launched in YARN _cluster mode_, albeit with a completely different set of parameters. Here's an example `SPARK_OPTS` string value which best conveys the idea: ``` "SPARK_OPTS": "--master k8s://https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT} --deploy-mode cluster --name ${KERNEL_USERNAME}-${KERNEL_ID} --conf spark.kubernetes.driver.label.app=enterprise-gateway --conf spark.kubernetes.driver.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.executor.label.app=enterprise-gateway --conf spark.kubernetes.executor.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.driver.docker.image=${KERNEL_IMAGE} --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor-py:v2.5.0-kubernetes-0.5.0 --conf spark.kubernetes.submission.waitAppCompletion=false", ``` Note that each of the labels previously discussed are also applied to the _driver_ and _executor_ pods. For these invocations, the `argv:` is nearly identical to non-kubernetes configurations, invoking a `run.sh` script which essentially holds the `spark-submit` invocation that takes the aforementioned `SPARK_OPTS` as its primary parameter: ```json { "argv": [ "/usr/local/share/jupyter/kernels/spark_python_kubernetes/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.spark-context-initialization-mode", "lazy" ] } ``` ### Confirming deployment and the service port mapping ```bash kubectl get all --all-namespaces -l app=enterprise-gateway NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE deploy/enterprise-gateway 1 1 1 1 2h NAME DESIRED CURRENT READY AGE rs/enterprise-gateway-74c46cb7fc 1 1 1 2h NAME READY STATUS RESTARTS AGE po/enterprise-gateway-74c46cb7fc-jrkl7 1/1 Running 0 2h NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE svc/enterprise-gateway NodePort 10.110.253.2.3 8888:32422/TCP 2h ``` Of particular importance is the mapping to port `8888` (e.g.,`32422`). If you are performing this on the same host as where the notebook will run, then you will need to note the cluster-ip entry (e.g.,`10.110.253.2.3`). (Note: if the number of replicas is > 1, then you will see two pods listed with different five-character suffixes.) ```{tip} You can avoid the need to point at a different port each time EG is launched by adding an `externalIPs:` entry to the `spec:` section of the `service.yaml` file. This entry can be specifed in the `values.yaml` via the `service.externalIPs.k8sMasterPublicIP` entry. ``` The value of the `JUPYTER_GATEWAY_URL` used by the gateway-enabled Notebook server will vary depending on whether you choose to define an external IP or not. If and external IP is defined, you'll set `JUPYTER_GATEWAY_URL=:8888` else you'll set `JUPYTER_GATEWAY_URL=:32422` **but also need to restart clients each time Enterprise Gateway is started.** As a result, use of the `externalIPs:` value is highly recommended. ## Kubernetes Tips The following items illustrate some useful commands for navigating Enterprise Gateway within a kubernetes environment. - All objects created on behalf of Enterprise Gateway can be located using the label `app=enterprise-gateway`. You'll probably see duplicated entries for the deployments(deploy) and replication sets (rs) - we didn't include the duplicates here. ```bash kubectl get all -l app=enterprise-gateway --all-namespaces NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE deploy/enterprise-gateway 1 1 1 1 3h NAME DESIRED CURRENT READY AGE rs/enterprise-gateway-74c46cb7fc 1 1 1 3h NAME READY STATUS RESTARTS AGE pod/alice-5e755458-a114-4215-96b7-bcb016fc7b62 1/1 Running 0 8s pod/enterprise-gateway-74c46cb7fc-jrkl7 1/1 Running 0 3h ``` - All objects related to a given kernel can be located using the label `kernel_id=` ```bash kubectl get all -l kernel_id=5e755458-a114-4215-96b7-bcb016fc7b62 --all-namespaces NAME READY STATUS RESTARTS AGE pod/alice-5e755458-a114-4215-96b7-bcb016fc7b62 1/1 Running 0 28s ``` Note: because kernels are, by default, isolated to their own namespace, you could also find all objects of a given kernel using only the `--namespace ` clause. - To enter into a given pod (i.e., container) in order to get a better idea of what might be happening within the container, use the exec command with the pod name ```bash kubectl exec -it enterprise-gateway-74c46cb7fc-jrkl7 /bin/bash ``` - Logs can be accessed against the pods or deployment (requires the object type prefix (e.g., `pod/`)) ```bash kubectl logs -f pod/alice-5e755458-a114-4215-96b7-bcb016fc7b62 ``` Note that if using multiple replicas, commands against each pod are required. - The Kubernetes dashboard is useful as well. It's located at port `3.2.3` of the master node ```bash https://elyra-kube1.foo.bar.com:3.2.3/dashboard/#!/overview?namespace=default ``` From there, logs can be accessed by selecting the `Pods` option in the left-hand pane followed by the _lined_ icon on the far right. - User "system:serviceaccount:default:default" cannot list pods in the namespace "default" On a recent deployment, Enterprise Gateway was not able to create or list kernel pods. Found the following command was necessary. (Kubernetes security relative to Enterprise Gateway is still under construction.) ```bash kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=default:default ``` ================================================ FILE: docs/source/operators/deploy-single.md ================================================ # Single-server deployments Single-server deployment can be useful for development and is not meant to be run in production environments as it subjects the gateway server to resource exhaustion. Steps to deploy a single server are: 1. [Install Enterprise Gateway](installing-eg.md) 1. [Install the desired kernels](installing-kernels.md) 1. Install and configure the server and desired kernel specifications (see below) 1. [Launch Enterprise Gateway](launching-eg.md) If you just want to try Enterprise Gateway in a single-server setup, you can use the following kernels specification (no need for a kernel launcher since the kernel runs locally): ```json { "display_name": "Python 3 Local", "language": "python", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.processproxy.LocalProcessProxy" } }, "argv": ["python", "-m", "ipykernel_launcher", "-f", "{connection_file}"] } ``` `process_proxy` is optional (if Enterprise Gateway encounters a kernel specification without the `process_proxy` stanza, it will treat that specification as if it contained `LocalProcessProxy`). ```{tip} You can run a local kernel in [Distributed mode](./deploy-distributed.md) by setting `remote_hosts` to the localhost. Why would you do that? 1. One reason is that it decreases the window in which a port conflict can occur since the 5 kernel ports are created by the launcher (within the same process and therefore closer to the actual invocation of the kernel) rather than by the server prior to the launch of the kernel process. 2. The second reason is that auto-restarted kernels - when an issue occurs - say due to a port conflict - will create a new set of ports rather than try to re-use the same set that produced the failure in the first place. In this case, you'd want to use the [per-kernel configuration](config-kernel-override.md#per-kernel-configuration-overrides) approach and set `remote_hosts` in the config stanza of the `process_proxy` stanza (using the stanza instead of the global `EG_REMOTE_HOSTS` allows you to not interfere with the other resource managers configuration, e.g. Spark Standalone or YARN Client kernels - Those other kernels need to be able to continue leveraging the full cluster nodes). ``` ================================================ FILE: docs/source/operators/deploy-yarn-cluster.md ================================================ # Hadoop YARN deployments To leverage the full distributed capabilities of Jupyter Enterprise Gateway, there is a need to provide additional configuration options in a cluster deployment. The following sample kernelspecs are currently available on YARN cluster: - spark_R_yarn_cluster - spark_python_yarn_cluster - spark_scala_yarn_cluster Steps required to complete deployment on a Hadoop YARN cluster are: 1. [Install Enterprise Gateway](installing-eg.md) on the primary node of the Hadoop YARN cluster. Note, this location is not a hard-requirement, but recommended. If installed remotely, some extra configuration will be necessary relative to the Hadoop configuration. 1. [Install the desired kernels](installing-kernels.md) 1. Install and configure the server and desired kernel specifications (see below) 1. [Launch Enterprise Gateway](launching-eg.md) The distributed capabilities are currently based on an Apache Spark cluster utilizing Hadoop YARN as the resource manager and thus require the following environment variables to be set to facilitate the integration between Apache Spark and Hadoop YARN components: - `SPARK_HOME` must point to the Apache Spark installation path ``` SPARK_HOME:/usr/hdp/current/spark2-client # For HDP distribution ``` - EG_YARN_ENDPOINT: Must point to the YARN resource manager endpoint if remote from YARN cluster ``` EG_YARN_ENDPOINT=http://${YARN_RESOURCE_MANAGER_FQDN}:8088/ws/v1/cluster ``` ```{note} If Enterprise Gateway is using an applicable `HADOOP_CONF_DIR` that contains a valid `yarn-site.xml` file, then this config value can remain unset (default = None) and the YARN client library will locate the appropriate resource manager from the configuration. This is also true in cases where the YARN cluster is configured for high availability. ``` If Enterprise Gateway is remote from the YARN cluster (i.e., no `HADOOP_CONF_DIR`) and the YARN cluster is configured for high availability, then the alternate endpoint should also be specified... ``` EG_ALT_YARN_ENDPOINT=http://${ALT_YARN_RESOURCE_MANAGER_FQDN}:8088/ws/v1/cluster #Common to YARN deployment ``` ## Configuring Kernels for YARN Cluster mode For each supported kernel (IPyKernel for Python, Apache Toree for Scala, and IRKernel for R), we have provided sample kernel configurations and launchers as assets associated with each [Enterprise Gateway release](https://github.com/jupyter-server/enterprise_gateway/releases). For Hadoop YARN configurations, you can access those specific kernel specifications within the `jupyter_enterprise_gateway_kernelspecs_yarn-VERSION.tar.gz` file. (Replace `VERSION` with the desired release number.) ```{note} The sample kernels specifications in `jupyter_enterprise_gateway_kernelspecs_yarn-VERSION.tar.gz` also contain specification for YARN client mode (in addition to cluster mode). Both are usable in this situation. ``` ```{tip} We recommend installing kernel specifications into a shared folder like `/usr/local/share/jupyter/kernels`. This is the location in which they reside within container images and where many of the document references assume they'll be located. ``` ### Python Kernel (IPython kernel) Considering we would like to enable the IPython kernel to run on YARN Cluster and Client mode we would have to copy the sample configuration folder **spark_python_yarn_cluster** to where the Jupyter kernels are installed (e.g. jupyter kernelspec list) ```bash wget https://github.com/jupyter-server/enterprise_gateway/releases/download/v3.2.3/jupyter_enterprise_gateway_kernelspecs-3.2.3.tar.gz KERNELS_FOLDER=/usr/local/share/jupyter/kernels tar -zxvf jupyter_enterprise_gateway_kernelspecs-3.2.3.tar.gz --strip 1 --directory $KERNELS_FOLDER/spark_python_yarn_cluster/ spark_python_yarn_cluster/ ``` For more information about the IPython kernel, please visit the [IPython kernel](https://ipython.readthedocs.io/en/stable/) page. ### Scala Kernel (Apache Toree) Considering we would like to enable the Scala Kernel to run on YARN Cluster and Client mode we would have to copy the sample configuration folder **spark_scala_yarn_cluster** to where the Jupyter kernels are installed (e.g. jupyter kernelspec list) ```bash wget https://github.com/jupyter-server/enterprise_gateway/releases/download/v3.2.3/jupyter_enterprise_gateway_kernelspecs-3.2.3.tar.gz KERNELS_FOLDER=/usr/local/share/jupyter/kernels tar -zxvf jupyter_enterprise_gateway_kernelspecs-3.2.3.tar.gz --strip 1 --directory $KERNELS_FOLDER/spark_scala_yarn_cluster/ spark_scala_yarn_cluster/ ``` For more information about the Scala kernel, please visit the [Apache Toree](https://toree.apache.org/) page. ### R Kernel (IRkernel) Considering we would like to enable the IRkernel to run on YARN Cluster and Client mode we would have to copy the sample configuration folder **spark_R_yarn_cluster** to where the Jupyter kernels are installed (e.g. jupyter kernelspec list) ```Bash wget https://github.com/jupyter-server/enterprise_gateway/releases/download/v3.2.3/jupyter_enterprise_gateway_kernelspecs-3.2.3.tar.gz KERNELS_FOLDER=/usr/local/share/jupyter/kernels tar -zxvf jupyter_enterprise_gateway_kernelspecs-3.2.3.tar.gz --strip 1 --directory $KERNELS_FOLDER/spark_R_yarn_cluster/ spark_R_yarn_cluster/ ``` For more information about the iR kernel, please visit the [IRkernel](https://irkernel.github.io/) page. ### Adjusting the kernel specifications After installing the kernel specifications, you should have a `kernel.json` that resembles the following (this one is relative to the Python kernel): ```json { "language": "python", "display_name": "Spark - Python (YARN Cluster Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy" } }, "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "PYSPARK_PYTHON": "/opt/conda/bin/python", "PYTHONPATH": "${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip", "SPARK_YARN_USER_ENV": "PYTHONUSERBASE=/home/yarn/.local,PYTHONPATH=${HOME}/.local/lib/python3.10/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip,PATH=/opt/conda/bin:$PATH", "SPARK_OPTS": "--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false", "LAUNCH_OPTS": "" }, "argv": [ "/usr/local/share/jupyter/kernels/spark_python_yarn_cluster/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ``` The `metadata` and `argv` entries for each kernel specification should be nearly identical and not require changes. You will need to adjust the `env` entries to apply to your specific configuration. You should also check the same kinds of environment and path settings in the corresponding `bin/run.sh` file - although changes are not typically necessary. After making any necessary adjustments such as updating `SPARK_HOME` or other environment specific configuration and paths, you now should have a new kernel available to execute your notebook cell code distributed on a Hadoop YARN Spark Cluster. ================================================ FILE: docs/source/operators/index.rst ================================================ Operators Guide =============== These pages are targeted at *operators* that need to deploy and configure a Jupyter Enterprise Gateway instance. .. admonition:: Use cases - *As an operator, I want to fix the bottleneck on the Jupyter Kernel Gateway server due to large number of kernels running on it and the size of each kernel (spark driver) process, by deploying the Enterprise Gateway, such that kernels can be launched as managed resources within a Hadoop YARN cluster, distributing the resource-intensive driver processes across the cluster, while still allowing the multiple data analysts to leverage the compute power of a large cluster.* - *As an operator, I want to constrain applications to specific port ranges so I can more easily identify issues and manage network configurations that adhere to my corporate policy.* - *As an operator, I want to constrain the number of active kernels that each of my users can have at any given time.* Deploying Enterprise Gateway ---------------------------- The deployment of Enterprise Gateway consists of several items, depending on the nature of the target environment. Because this topic differs depending on whether the runtime environment is targeting containers or traditional servers, we've separated the discussions accordingly. Container-based deployments ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Enterprise Gateway includes support for two forms of container-based environments, Kubernetes and Docker. .. toctree:: :maxdepth: 1 :name: container-deployments deploy-kubernetes deploy-docker Server-based deployments ~~~~~~~~~~~~~~~~~~~~~~~~ Tasks for traditional server deployments are nearly identical with respect to Enterprise Gateway's installation and invocation, differing slightly with how the kernel specifications are configured. As a result, we marked those topics as "common" relative to the others. .. toctree:: :maxdepth: 1 :name: node-deployments installing-eg installing-kernels launching-eg deploy-yarn-cluster deploy-conductor deploy-distributed deploy-single Configuring Enterprise Gateway ------------------------------ Jupyter Enterprise Gateway adheres to `Jupyter's common configuration approach `_ . You can configure an instance of Enterprise Gateway using a configuration file (recommended), via command-line parameters, or by setting the corresponding environment variables. .. toctree:: :maxdepth: 1 :name: configuring config-file config-cli config-add-env config-env-debug config-sys-env config-kernel-override config-dynamic config-culling config-availability config-security ================================================ FILE: docs/source/operators/installing-eg.md ================================================ # Installing Enterprise Gateway (common) For new users, we **highly recommend** [installing Anaconda](https://www.anaconda.com/download). Anaconda conveniently installs Python, the [Jupyter Notebook](https://jupyter.readthedocs.io/en/latest/install.html), the [IPython kernel](http://ipython.readthedocs.io/en/stable/install/kernel_install.html) and other commonly used packages for scientific computing and data science. Use the following installation steps: - Download [Anaconda](https://www.anaconda.com/download). We recommend downloading Anaconda's latest Python version (currently Python 3.11+). - Install the version of Anaconda which you downloaded, following the instructions on the download page. - Install the latest version of Jupyter Enterprise Gateway from [PyPI](https://pypi.python.org/pypi/jupyter_enterprise_gateway/) or [conda forge](https://conda-forge.org/) along with its dependencies. ```{warning} Enterprise Gateway is currently incompatible with `jupyter_client >= 7.0`. As a result, you should **not** install Enterprise Gateway into the same Python environment in which you intend to run Jupyter Notebook or Jupyter Lab since they will likely be using `jupyter_client >= 7.0`. Since Enterprise Gateway is tupically installed on servers remote from the notebook users, this is usually not an issue. ``` ```{note} **Known Dependency Constraints:** Enterprise Gateway pins several key dependencies: - `jupyter_client < 7` -- Enterprise Gateway's process proxy mechanism is incompatible with the kernel provisioner framework introduced in jupyter_client 7.x. This cap will be removed when EG adopts kernel provisioners (targeted for 4.0). - `jupyter_server < 2.0` -- For the same kernel provisioner compatibility reason. - `pyzmq < 25.0` -- pyzmq 25 removed deprecated APIs that jupyter_client 6.x still relies on. These constraints mean EG should be installed in a dedicated Python environment separate from notebook/lab installations that use newer versions of these packages. ``` ```bash # install using pip from pypi pip install --upgrade jupyter_enterprise_gateway ``` ```bash # install using conda from conda forge conda install -c conda-forge jupyter_enterprise_gateway ``` At this point, the Jupyter Enterprise Gateway deployment provides local kernel support which is fully compatible with Jupyter Kernel Gateway. To uninstall Jupyter Enterprise Gateway... ```bash #uninstall using pip pip uninstall jupyter_enterprise_gateway ``` ```bash #uninstall using conda conda uninstall jupyter_enterprise_gateway ``` ================================================ FILE: docs/source/operators/installing-kernels.md ================================================ # Installing supported kernels (common) Enterprise Gateway includes kernel specifications that support the following kernels: - IPython kernel (Python) - Apache Toree (Scala) - IRKernel (R) Refer to the following for instructions on installing the respective kernels. For cluster-based environments, these steps should be performed on each applicable node of the cluster, unless noted otherwise. ## Python Kernel (IPython kernel) The IPython kernel comes pre-installed with Anaconda and we have tested with its default version of [IPython kernel](https://ipython.readthedocs.io/en/stable/). ```{admonition} Important! :class: warning For proper operation across the cluster, the Python kernel package (not the kernel specification) must be installed on every node of the cluster available to Enterprise Gateway. For example, run `pip install ipykernel` on each applicable node. This step is also required for the IRkernel (see below). However, it is **not** required for the Scala (Apache Toree) Kernel as that can be expressed as a dependency in the `spark_submit` invocation. ``` ## Scala Kernel (Apache Toree) We have tested the latest version of [Apache Toree](https://toree.apache.org/) with Scala 2.11 support. Please note that the Apache Toree kernel is now bundled in the kernelspecs tar file for each of the Scala kernelspecs provided by Enterprise Gateway. The sample kernel specifications included in Enterprise Gateway include the necessary Apach Toree libraries so its installation is not necessary. In addition, because Apache Toree targets Spark installations, its distribution can be achieved via `spark-submit` and its installation is not necessary on worker nodes - except for [distributed deployments](deploy-distributed.md). ## R Kernel (IRkernel) Perform the following steps on Jupyter Enterprise Gateway hosting system as well as all worker nodes. Please refer to the [IRKernel documentation](https://irkernel.github.io/) for further details. ```Bash conda install --yes --quiet -c r r-essentials r-irkernel r-argparse # Create an R-script to run and install packages and update IRkernel cat <<'EOF' > install_packages.R install.packages(c('repr', 'IRdisplay', 'evaluate', 'git2r', 'crayon', 'pbdZMQ', 'devtools', 'uuid', 'digest', 'RCurl', 'curl', 'argparse'), repos='http://cran.rstudio.com/') devtools::install_github('IRkernel/IRkernel@0.8.14') IRkernel::installspec(user = FALSE) EOF # run the package install script $ANACONDA_HOME/bin/Rscript install_packages.R # OPTIONAL: check the installed R packages ls $ANACONDA_HOME/lib/R/library ``` ================================================ FILE: docs/source/operators/launching-eg.md ================================================ # Launching Enterprise Gateway (common) Very few arguments are necessary to minimally start Enterprise Gateway. The following command could be considered a minimal command: ```bash jupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 ``` where `--ip=0.0.0.0` exposes Enterprise Gateway on the public network and `--port_retries=0` ensures that a single instance will be started. ```{note} The ability to target resource-managed clusters (and use remote kernels) will require additional configuration settings depending on the resource manager. For additional information see the appropriate server-based deployment topic of our Operators Guide. ``` We recommend starting Enterprise Gateway as a background task. As a result, you might find it best to create a start script to maintain options, file redirection, etc. The following script starts Enterprise Gateway with `DEBUG` tracing enabled (default is `INFO`) and idle kernel culling for any kernels idle for 12 hours with idle check intervals occurring every 60 seconds. The Enterprise Gateway log can then be monitored via `tail -F enterprise_gateway.log` and it can be stopped via `kill $(cat enterprise_gateway.pid)` ```bash #!/bin/bash LOG=/var/log/enterprise_gateway.log PIDFILE=/var/run/enterprise_gateway.pid jupyter enterprisegateway --ip=0.0.0.0 --port_retries=0 --log-level=DEBUG --RemoteKernelManager.cull_idle_timeout=43200 --MappingKernelManager.cull_interval=60 > $LOG 2>&1 & if [ "$?" -eq 0 ]; then echo $! > $PIDFILE else exit 1 fi ``` ```{tip} Remember that any options set via the command-line will not be available for [dynamic configuration funtionality](config-dynamic.md#dynamic-configurables). ``` ================================================ FILE: docs/source/other/index.rst ================================================ Other helpful information =========================== This section includes some additional information you might find helpful and that spans the various *guides*, like troubleshooting and related resources. .. toctree:: :maxdepth: 1 :name: other troubleshooting related-resources ================================================ FILE: docs/source/other/related-resources.md ================================================ # Related Resources Here are some resources related to the Jupyter Enterprise Gateway project. - [Jupyter.org](https://jupyter.org) - [Jupyter Server Team Compass](https://github.com/jupyter-server/team-compass#jupyter-server-team-compass) - [Jupyter Calendar - Community Meetings](https://docs.jupyter.org/en/latest/community/content-community.html#jupyter-community-meetings) - [Jupyter Community Discourse Forum](https://discourse.jupyter.org/) - [Jupyter Kernel Gateway Github Repo](https://github.com/jupyter-server/kernel_gateway) - the source code for Kernel Gateway - which supports local kernels and notebook-hosted end-points. - [Jupyter Server Github Repo](https://github.com/jupyter-server/jupyter_server) - the source code for the Jupyter Server. Many of the Enterprise Gateway's handlers and kernel management classes either _are_ or are derived from the Jupyter Server classes. - [Jupyter Notebook Github Repo](https://github.com/jupyter/notebook) - the source code for the classic Notebook from which the gateways and Jupyter Server were derived. - [Jupyter Client Github Repo](https://github.com/jupyter/jupyter_client) - the source code for the base kernel lifecycle management and message classes. Enterprise Gateway extends the `KernelManager` classes of `jupyter_client`. ================================================ FILE: docs/source/other/troubleshooting.md ================================================ # Troubleshooting Guide This page identifies scenarios we've encountered when running Enterprise Gateway. We also provide instructions for setting up a debug environment on our [Debugging Jupyter Enterprise Gateway](../contributors/debug.md) page. ## Fresh Install Scenario: **I just installed Enterprise Gateway but nothing happens, how do I proceed?** Because Enterprise Gateway is one element of a networked application, there are various _touch points_ that should be validated independently. The following items can be used as a checklist to confirm general operability. 1. Confirm that Enterprise Gateway is servicing general requests. This can be accomplished using the following `curl` command, which should produce the json corresponding to the configured kernelspecs: `bash curl http://:/api/kernelspecs ` 1. Independently validate any resource manager you're running against. Various resource managers usually provide examples for how to go about validating their configuration. 1. Confirm that the Enterprise Gateway arguments for contacting the configured resource manager are in place. These should be covered in the deployment section of our Operators Guide. 1. If using a Notebook server as your front-end, ensure that the Gateway configuration options or NB2KG extension settings are properly configured. Once the notebook has started, a refresh on the tree view should issue the same `kernelspecs` request in step 1 and the drop-down menu items for available kernels should reflect an entry for each kernelspec returned. 1. **Always** consult your Enterprise Gateway log file. If you have not redirected `stdout` and `stderr` to a file you are highly encouraged to do so. In addition, you should enable `DEBUG` logging at least until your configuration is stable. Please note, however, that you may be asked to produce an Enterprise Gateway log with `DEBUG` enabled when reporting issues. An example of output redirection and `DEBUG` logging is also provided in our [Operators Guide](../operators/launching-eg.md#launching-enterprise-gateway-common). ## Hadoop YARN Cluster Mode Scenario: **I'm trying to launch a (Python/Scala/R) kernel in YARN Cluster Mode, but it failed with a "Kernel error" and State: 'FAILED'.** 1. Check the output from Enterprise Gateway for an error message. If an applicationId was generated, make a note of it. For example, you can locate the applicationId `application_15065522733.2.3011` from the following snippet of message: `[D 2017-09-28 17:13:22.675 EnterpriseGatewayApp] 13: State: 'ACCEPTED', Host: 'burna2.yourcompany.com', KernelID: '28a5e827-4676-4415-bbfc-ac30a0dcc4c3', ApplicationID: 'application_15065522733.2.3011' 17/09/28 17:13:22 INFO YarnClientImpl: Submitted application application_15065522733.2.3011 17/09/28 17:13:22 INFO Client: Application report for application_15065522733.2.3011 (state: ACCEPTED) 17/09/28 17:13:22 INFO Client: client token: N/A diagnostics: AM container is launched, waiting for AM container to Register with RM ApplicationMaster host: N/A ApplicationMaster RPC port: -1 queue: default start time: 1506644002471 final status: UNDEFINED tracking URL: http://burna1.yourcompany.com:8088/proxy/application_15065522733.2.3011/` 1. Lookup the YARN log for that applicationId in the YARN ResourceManager UI: ![YARN ResourceManager UI](../images/yarnui.jpg) 1. Drill down from the applicationId to find logs for the failed attempts and take appropriate actions. For example, for the error below, ``` Traceback (most recent call last): File "launch_ipykernel.py", line 7, in from ipython_genutils.py3compat import str_to_bytes ImportError: No module named ipython_genutils.py3compat ``` Simply running "pip install ipython_genutils" should fix the problem. If Anaconda is installed, make sure the environment variable for Python, i.e. `PYSPARK_PYTHON`, is properly configured in the kernelspec and matches the actual Anaconda installation directory. ## SSH Permissions Scenario: **I'm trying to launch a (Python/Scala/R) kernel in YARN Client Mode, but it failed with a "Kernel error" and an `AuthenticationException`.** ``` [E 2017-09-29 11:13:23.277 EnterpriseGatewayApp] Exception 'AuthenticationException' occurred when creating a SSHClient connecting to 'xxx.xxx.xxx.xxx' with user 'elyra', message='Authentication failed.'. ``` This error indicates that the password-less ssh may not be properly configured. Password-less ssh needs to be configured on the node that the Enterprise Gateway is running on to all other worker nodes. You might also see an `SSHException` indicating a similar issue. ``` [E 2017-09-29 11:13:23.277 EnterpriseGatewayApp] Exception 'SSHException' occurred when creating a SSHClient connecting to 'xxx.xxx.xxx.xxx' with user 'elyra', message='No authentication methods available.'. ``` In general, you can look for more information in the kernel log for YARN Client kernels. The default location is /tmp with a filename of `kernel-.log`. The location can be configured using the environment variable `EG_KERNEL_LOG_DIR` during Enterprise Gateway start up. ```{seealso} [Launching Enterprise Gateway](../operators/launching-eg.md#launching-enterprise-gateway-common) for an example of starting the Enterprise Gateway from a script and the [Operators Guide](../operators/config-add-env.md#additional-environment-variables) for a list of configurable environment variables. ``` ## SSH Tunneling Scenario: **I'm trying to launch a (Python/Scala/R) kernel in YARN Client Mode with SSH tunneling enabled, but it failed with a "Kernel error" and a SSHException.** ``` [E 2017-10-26 11:48:20.922 EnterpriseGatewayApp] The following exception occurred waiting for connection file response for KernelId 'da3d0dde-9de1-44b1-b1b4-e6f3cf52dfb9' on host 'remote-host-name': The authenticity of the host can't be established. ``` This error indicates that fingerprint for the ECDSA key of the remote host has not been added to the list of known hosts from where the SSH tunnel is being established. For example, if the Enterprise Gateway is running on `node1` under service-user `jdoe` and environment variable `EG_REMOTE_HOSTS` is set to `node2,node3,node4`, then the Kernels can be launched on any of those hosts and a SSH tunnel will be established between `node1` and any of the those hosts. To address this issue, you need to perform a one-time step that requires you to login to `node1` as `jdoe` and manually SSH into each of the remote hosts and accept the fingerprint of the ECDSA key of the remote host to be added to the list of known hosts as shown below: ``` [jdoe@node1 ~]$ ssh node2 The authenticity of host 'node2 (172.16.207.191)' can't be established. ECDSA key fingerprint is SHA256:Mqi3txf4YiRC9nXg8a/4gQq5vC4SjWmcN1V5Z0+nhZg. ECDSA key fingerprint is MD5:bc:4b:b2:39:07:98:c1:0b:b4:c3:24:38:92:7a:2d:ef. Are you sure you want to continue connecting (yes/no)? yes Warning: Permanently added 'node2,172.16.207.191' (ECDSA) to the list of known hosts. [jdoe@node2 ~] exit ``` Repeat the aforementioned step as `jdoe` on `node1` for each of the hosts listed in `EG_REMOTE_HOSTS` and restart Enterprise Gateway. ## Kernel Encounters `TypeError` Scenario: **I'm trying to launch a (Python/Scala/R) kernel, but it failed with `TypeError: Incorrect padding`.** ``` Traceback (most recent call last): File "/opt/conda/lib/python3.8/site-packages/tornado/web.py", line 1512, in _execute result = yield result File "/opt/conda/lib/python3.8/site-packages/tornado/gen.py", line 1055, in run value = future.result() ... ... File "/opt/conda/lib/python3.8/site-packages/enterprise_gateway/services/kernels/remotemanager.py", line 125, in _launch_kernel return self.process_proxy.launch_process(kernel_cmd, **kw) File "/opt/conda/lib/python3.8/site-packages/enterprise_gateway/services/processproxies/yarn.py", line 63, in launch_process self.confirm_remote_startup(kernel_cmd, **kw) File "/opt/conda/lib/python3.8/site-packages/enterprise_gateway/services/processproxies/yarn.py", line 174, in confirm_remote_startup ready_to_connect = self.receive_connection_info() File "/opt/conda/lib/python3.8/site-packages/enterprise_gateway/services/processproxies/processproxy.py", line 565, in receive_connection_info raise e TypeError: Incorrect padding ``` To address this issue, first ensure that the launchers used for each kernel are derived from the same release as the Enterprise Gateway server. Next ensure that `pycryptodomex 3.9.7` or later is installed on all hosts using either `pip install` or `conda install` as shown below: ``` [jdoe@node1 ~]$ pip uninstall pycryptodomex [jdoe@node1 ~]$ pip install pycryptodomex ``` or ``` [jdoe@node1 ~]$ conda install pycryptodomex ``` This should be done on the host running Enterprise Gateway as well as all the remote hosts on which the kernel is launched. ## Port Range Scenario: **I'm trying to launch a (Python/Scala/R) kernel with port range, but it failed with `RuntimeError: Invalid port range `.** ``` Traceback (most recent call last): File "/opt/conda/lib/python3.8/site-packages/tornado/web.py", line 1511, in _execute result = yield result File "/opt/conda/lib/python3.8/site-packages/tornado/gen.py", line 1055, in run value = future.result() .... .... File "/opt/conda/lib/python3.8/site-packages/enterprise_gateway/services/processproxies/processproxy.py", line 478, in __init__ super(RemoteProcessProxy, self).__init__(kernel_manager, proxy_config) File "/opt/conda/lib/python3.8/site-packages/enterprise_gateway/services/processproxies/processproxy.py", line 87, in __init__ self._validate_port_range(proxy_config) File "/opt/conda/lib/python3.8/site-packages/enterprise_gateway/services/processproxies/processproxy.py", line 407, in _validate_port_range "port numbers is (1024, 65535).".format(self.lower_port)) RuntimeError: Invalid port range '1000..2000' specified. Range for valid port numbers is (1024, 65535). ``` To address this issue, make sure that the specified port range does not overlap with TCP's well-known port range of (0, 1024\]. ## Hadoop YARN Timeout Scenario: **I'm trying to launch a (Python/Scala/R) kernel, but it times out and the YARN application status remain `ACCEPTED`.** Enterprise Gateway log from server will look like the one below, and will complain that there are no resources: `launch timeout due to: YARN resources unavailable` ```bash State: 'ACCEPTED', Host: '', KernelID: '3181db50-8bb5-4f91-8556-988895f63efa', ApplicationID: 'application_1537119233094_0001' State: 'ACCEPTED', Host: '', KernelID: '3181db50-8bb5-4f91-8556-988895f63efa', ApplicationID: 'application_1537119233094_0001' ... ... SIGKILL signal sent to pid: 19690 YarnClusterProcessProxy.kill, application ID: application_1537119233094_0001, kernel ID: 3181db50-8bb5-4f91-8556-988895f63efa, state: ACCEPTED KernelID: '3181db50-8bb5-4f91-8556-988895f63efa' launch timeout due to: YARN resources unavailable after 61.0 seconds for app application_1537119233094_0001, launch timeout: 60.0! Check YARN configuration. ``` The most common cause for this is that YARN Resource Managers are failing to start and the cluster see no resources available. Make sure YARN Resource Managerss are running ok. We have also noticed that, in Kerborized environments, sometimes there are issues with directory access rights that cause the YARN Resource Managers to fail to start and this can be corrected by validating the existence of `/hadoop/yarn` and that it's owned by `yarn: hadoop`. ## Kernel Resources Scenario: **My kernel keeps dying when processing jobs that require large amount of resources (e.g. large files)** This is usually seen when you are trying to use more resources then what is available for your kernel. To address this issue, increase the amount of memory available for your Hadoop YARN application or another resource manager managing the kernel. For example, on Kubernetes, this may be a time when the kernel specification's [kernel-pod.yaml.j2](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2) file should be extended with resource quotas. ## Kerberos Scenario: **I'm trying to use a notebook with user impersonation on a Kerberos enabled cluster, but it fails to authenticate.** When using user impersonation in a YARN cluster with Kerberos authentication, if Kerberos is not setup properly you will usually see the following warning in your Enterprise Gateway log that will keep a notebook from connecting: ```bash WARN Client: Exception encountered while connecting to the server : javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt)] ``` The most common cause for this WARN is when the user that started Enterprise Gateway is not authenticated with Kerberos. This can happen when the user has either not run `kinit` or their previous ticket has expired. ## Openshift Kubernetes Scenario: **Running Jupyter Enterprise Gateway on OpenShift Kubernetes Environment fails trying to create /home/jovyan/.local** As described [in the OpenShift Admin Guide](https://docs.openshift.com/container-platform/4.10/openshift_images/create-images.html) there is a need to issue the following command to enable running with `USER` in Dockerfile. ```bash oc adm policy add-scc-to-group anyuid system:authenticated ``` ## Opening an issue Scenario: **None of the scenarios on this page match or resolve my issue, what do I do next?** If you are unable to resolve your issue, take a look at our [open issues list](https://github.com/jupyter-server/enterprise_gateway/issues) to see if there is an applicable scenario already reported. If found, please add a comment to the issue so that we can get a sense of urgency (although all issues are important to us). If not found, please provide the following information if possible in a **new issue**. 1. Describe the issue in as much detail as possible. This should include configuration information about your environment. 1. Gather and _attach_ the following files to the issue. If possible, please archive the files first. 1. The **complete** Enterprise Gateway log file. If possible, please enable `DEBUG` logging that encompasses the issue. You can refer to this section of our [Operators Guide](../operators/launching-eg.md#launching-enterprise-gateway-common) for redirection and `DEBUG` enablement. 1. The log file(s) produced from the corresponding kernel. This is primarily a function of the underlying resource manager. - For containerized installations like Kubernetes or Docker Swarm, kernel log output can be captured by running the appropriate `logs` command against the pod or container, respectively. The names of the corresponding pod/container can be found in the Enterprise Gateway log. - For `Hadoop YARN` environments, you'll need to navigate to the appropriate log directory relative the application ID associated with the kernel. The application ID can be located in the Enterprise Gateway log. If you have access to an administrative console, you can usually navigate to the application logs more easily. 1. Although unlikely, the notebook log may also be helpful. If we find that the issue is more client-side related, we may ask for `DEBUG` logging there as well. 1. If you have altered or created new kernel specifications, the files corresponding to the failing kernels would be helpful. These files could also be added to the attached archive or attached separately. Please know that we understand that some information cannot be provided due to its sensitivity. In such cases, just let us know and we'll be happy to approach the resolution of your issue from a different angle. ================================================ FILE: docs/source/users/client-config.md ================================================ # Gateway Client Configuration The set of Gateway Client configuration options include the following. To get the current set of supported options, run the following: ```bash jupyter server --help-all ``` or ```bash jupyter server --generate-config ``` The following is produced from the `--help-all` option. To determine the corresponding configuration file option, replace `--` with `c.`. ``` --GatewayClient.auth_scheme= The auth scheme, added as a prefix to the authorization token used in the HTTP headers. (JUPYTER_GATEWAY_AUTH_SCHEME env var) Default: None --GatewayClient.auth_token= The authorization token used in the HTTP headers. The header will be formatted as:: { 'Authorization': '{auth_scheme} {auth_token}' } (JUPYTER_GATEWAY_AUTH_TOKEN env var) Default: None --GatewayClient.ca_certs= The filename of CA certificates or None to use defaults. (JUPYTER_GATEWAY_CA_CERTS env var) Default: None --GatewayClient.client_cert= The filename for client SSL certificate, if any. (JUPYTER_GATEWAY_CLIENT_CERT env var) Default: None --GatewayClient.client_key= The filename for client SSL key, if any. (JUPYTER_GATEWAY_CLIENT_KEY env var) Default: None --GatewayClient.connect_timeout= The time allowed for HTTP connection establishment with the Gateway server. (JUPYTER_GATEWAY_CONNECT_TIMEOUT env var) Default: 40.0 --GatewayClient.env_whitelist= A comma-separated list of environment variable names that will be included, along with their values, in the kernel startup request. The corresponding `env_whitelist` configuration value must also be set on the Gateway server - since that configuration value indicates which environmental values to make available to the kernel. (JUPYTER_GATEWAY_ENV_WHITELIST env var) Default: '' --GatewayClient.gateway_retry_interval= The time allowed for HTTP reconnection with the Gateway server for the first time. Next will be JUPYTER_GATEWAY_RETRY_INTERVAL multiplied by two in factor of numbers of retries but less than JUPYTER_GATEWAY_RETRY_INTERVAL_MAX. (JUPYTER_GATEWAY_RETRY_INTERVAL env var) Default: 1.0 --GatewayClient.gateway_retry_interval_max= The maximum time allowed for HTTP reconnection retry with the Gateway server. (JUPYTER_GATEWAY_RETRY_INTERVAL_MAX env var) Default: 30.0 --GatewayClient.gateway_retry_max= The maximum retries allowed for HTTP reconnection with the Gateway server. (JUPYTER_GATEWAY_RETRY_MAX env var) Default: 5 --GatewayClient.headers= Additional HTTP headers to pass on the request. This value will be converted to a dict. (JUPYTER_GATEWAY_HEADERS env var) Default: '{}' --GatewayClient.http_pwd= The password for HTTP authentication. (JUPYTER_GATEWAY_HTTP_PWD env var) Default: None --GatewayClient.http_user= The username for HTTP authentication. (JUPYTER_GATEWAY_HTTP_USER env var) Default: None --GatewayClient.kernels_endpoint= The gateway API endpoint for accessing kernel resources (JUPYTER_GATEWAY_KERNELS_ENDPOINT env var) Default: '/api/kernels' --GatewayClient.kernelspecs_endpoint= The gateway API endpoint for accessing kernelspecs (JUPYTER_GATEWAY_KERNELSPECS_ENDPOINT env var) Default: '/api/kernelspecs' --GatewayClient.kernelspecs_resource_endpoint= The gateway endpoint for accessing kernelspecs resources (JUPYTER_GATEWAY_KERNELSPECS_RESOURCE_ENDPOINT env var) Default: '/kernelspecs' --GatewayClient.request_timeout= The time allowed for HTTP request completion. (JUPYTER_GATEWAY_REQUEST_TIMEOUT env var) Default: 40.0 --GatewayClient.url= The url of the Kernel or Enterprise Gateway server where kernel specifications are defined and kernel management takes place. If defined, this Notebook server acts as a proxy for all kernel management and kernel specification retrieval. (JUPYTER_GATEWAY_URL env var) Default: None --GatewayClient.validate_cert= For HTTPS requests, determines if server's certificate should be validated or not. (JUPYTER_GATEWAY_VALIDATE_CERT env var) Default: True --GatewayClient.ws_url= The websocket url of the Kernel or Enterprise Gateway server. If not provided, this value will correspond to the value of the Gateway url with 'ws' in place of 'http'. (JUPYTER_GATEWAY_WS_URL env var) Default: None ``` ================================================ FILE: docs/source/users/connecting-to-eg.md ================================================ # Connecting the server to Enterprise Gateway To leverage the benefits of Enterprise Gateway, it's helpful to redirect a Jupyter server's kernel management to the Gateway server. This allows better separation of the user's notebooks from the managed computer cluster (Kubernetes, Hadoop YARN, Docker Swarm, etc.) on which Enterprise Gateway resides. A Jupyter server can be configured to relay kernel requests to an Enterprise Gateway server in several ways. ## Command line To instruct the server to connect to an Enterprise Gateway instance running on host `` on port ``, the following command line options can be used: ```bash jupyter lab --gateway-url=http://: --GatewayClient.http_user=guest --GatewayClient.http_pwd=guest-password ``` ## Configuration file If command line options are not appropriate for your environment, the Jupyter server configuration can be used to express Enterprise Gateway options. Note however, that command line options always override configuration file options: In your `jupyter_server_config.py` file add the following for the equivalent options: ```python c.GatewayClient.url = "http://:" c.GatewayClient.http_user = "guest" c.GatewayClient.http_pwd = "guest-password" ``` ## Docker image All GatewayClient options have corresponding environment variable support, so if you have Jupyter Lab or Notebook already in a docker image, a corresponding docker invocation would look something like this: ```bash docker run -t --rm \ -e JUPYTER_GATEWAY_URL='http://:' \ -e JUPYTER_GATEWAY_HTTP_USER=guest \ -e JUPYTER_GATEWAY_HTTP_PWD=guest-password \ -e LOG_LEVEL=DEBUG \ -p 8888:8888 \ -v ${HOME}/notebooks/:/tmp/notebooks \ -w /tmp/notebooks \ my-image ``` Notebook files residing in `${HOME}/notebooks` can then be accessed via `http://localhost:8888`. ## Connection Timeouts Sometimes, depending on the kind of cluster Enterprise Gateway is servicing, connection establishment and kernel startup can take a while (sometimes upwards of minutes). This is particularly true for managed clusters that perform scheduling like Hadoop YARN or Kubernetes. In these configurations it is important to configure both the connection and request timeout values. These options are handled by the `GatewayClient.connect_timeout` (env: `JUPYTER_GATEWAY_CONNECT_TIMEOUT`) and `GatewayClient.request_timeout` (env: `JUPYTER_GATEWAY_REQUEST_TIMEOUT`) options and default to 40 seconds. The `KERNEL_LAUNCH_TIMEOUT` environment variable will be set from these values or vice versa (whichever is greater). This value is used by EG to determine when it should give up on waiting for the kernel's startup to complete, while the other timeouts are used by Lab or Notebook when establishing the connection to EG. ================================================ FILE: docs/source/users/index.rst ================================================ Users Guide =========== Because Enterprise Gateway is a headless web server, it is typically accessed from other applications like JupyterLab and Jupyter Notebook. .. admonition:: Use cases - *As a data scientist, I want to run my notebook using the Enterprise Gateway such that I can free up resources on my own laptop and leverage my company's large Hadoop YARN cluster to run my compute-intensive operations.* - *As a student, my Data Science 101 course is leveraging GPUs in our experiments. Since GPUs are expensive, we must share resources within the university's compute cluster and configure our Notebooks to leverage the department's Enterprise Gateway server, which can then spawn container-based kernels that have access to a GPU on Kubernetes.* The following assumes an Enterprise Gateway server has been configured and deployed. Please consult the `operators <../operators/index.html>`_ documentation to deploy and configure the Enterprise Gateway server. .. note:: There are two primary client applications that can use Enterprise Gateway, JupyterLab running on Jupyter Server and Jupyter Notebook. When a reference to a *Jupyter server* (lowercase 'server') or *the server* is made, the reference applies to both Jupyter Server and Jupyter Notebook. Generally speaking, the client-side behaviors are identical between the two, although references to Jupyter Server are preferred since it's more current. If anything is different, that difference will be noted, otherwise, please assume discussion of the two are interchangeable. .. toctree:: :maxdepth: 1 :name: users installation connecting-to-eg client-config kernel-envs .. other clients (nbclient, papermill) ================================================ FILE: docs/source/users/installation.md ================================================ # Installing the client In terms of Enterprise Gateway, the client application is typically Jupyter Server (hosting JupyterLab) or Jupyter Notebook. These applications are then configured to connect to Enterprise Gateway. To install Jupyter Server via `pip`: ```bash pip install jupyter_server ``` or via `conda`: ```bash conda install -c conda-forge jupyter_server ``` Likewise, for Jupyter Notebook via `pip`: ```bash pip install notebook ``` or via `conda`: ```bash conda install -c conda-forge notebook ``` For additional information regarding the installation of [Jupyter Server](https://jupyter-server.readthedocs.io/en/latest/index.html) or [Jupyter Notebook](https://jupyter-notebook.readthedocs.io/en/latest/), please refer to their respective documentation (see embedded links). ================================================ FILE: docs/source/users/kernel-envs.md ================================================ # Kernel Environment Variables The Enterprise Gateway client software will also include _any_ environment variables prefixed with `KERNEL_` in the start kernel request sent to the Enterprise Gateway Server. This enables the ability to _statically parameterize_ aspects of kernel start requests relative to other clients using the same Enterprise Gateway instance. There are several supported `KERNEL_` variables that the Enterprise Gateway server looks for and uses, but others can be sent to customize behaviors. The following kernel-specific environment variables are used by Enterprise Gateway. As mentioned above, all `KERNEL_` variables submitted in the kernel startup request's JSON body will be available to the kernel for its launch. ```text KERNEL_GID= or 100 Containers only. This value represents the group id in which the container will run. The default value is 100 representing the users group - which is how all kernel images produced by Enterprise Gateway are built. See also KERNEL_UID. Kubernetes: Warning - If KERNEL_GID is set it is strongly recommened that feature-gate RunAsGroup be enabled, otherwise, this value will be ignored and the pod will run as the root group id. As a result, the setting of this value into the Security Context of the kernel pod is commented out in the kernel-pod.yaml file and must be enabled by the administrator. Docker: Warning - This value is only added to the supplemental group ids. As a result, if used with KERNEL_UID, the resulting container will run as the root group with this value listed in its supplemental groups. KERNEL_EXECUTOR_IMAGE= or KERNEL_IMAGE Kubernetees Spark only. This indicates the image that Spark on Kubernetes will use for the its executors. Although this value could come from the user, its strongly recommended that the process-proxy stanza of the corresponding kernel's kernelspec (kernel.json) file be updated to include the image name. If no image name is provided, the value of KERNEL_IMAGE will be used. KERNEL_EXTRA_SPARK_OPTS= Spark only. This variable allows users to add additional spark options to the current set of options specified in the corresponding kernel.json file. This variable is purely optional with no default value. In addition, it is the responsibility of the user setting this value to ensure the options passed are appropriate relative to the target environment. Because this variable contains space-separate values, it requires appropriate quotation. For example, to use with the notebook docker image jupyterhub/k8s-singleuser-sample , the environment variable would look something like this: docker run ... -e KERNEL_EXTRA_SPARK_OPTS=\"--conf spark.driver.memory=2g --conf spark.executor.memory=2g\" ... jupyterhub/k8s-singleuser-sample KERNEL_ID= or This value represents the identifier used by the Jupyter framework to identify the kernel. Although this value could be provided by the user, it is recommended that it be generated by the system. KERNEL_IMAGE= or Containers only. This indicates the image to use for the kernel in containerized environments - Kubernetes or Docker. Although it can be provided by the user, it is strongly recommended that the process-proxy stanza of the corresponding kernel's kernelspec (kernel.json) file be updated to include the image name. KERNEL_LAUNCH_TIMEOUT= or EG_KERNEL_LAUNCH_TIMEOUT Indicates the time (in seconds) to allow for a kernel's launch. This value should be submitted in the kernel startup if that particular kernel's startup time is expected to exceed that of the EG_KERNEL_LAUNCH_TIMEOUT set when Enterprise Gateway starts. KERNEL_NAMESPACE= or KERNEL_POD_NAME or EG_NAMESPACE Kubernetes only. This indicates the name of the namespace to use or create on Kubernetes in which the kernel pod will be located. For users wishing to use a pre-created namespace, this value should be submitted in the kernel startup request. In such cases, the user must also provide KERNEL_SERVICE_ACCOUNT_NAME. If not provided, Enterprise Gateway will create a new namespace for the kernel whose value is derived from KERNEL_POD_NAME. In rare cases where EG_SHARED_NAMESPACE is True, this value will be set to the value of EG_NAMESPACE. Note that if the namespace is created by Enterprise Gateway, it will be removed upon the kernel's termination. Otherwise, the Enterprise Gateway will not remove the namespace. KERNEL_POD_NAME= or KERNEL_USERNAME-KERNEL_ID Kubernetes only. By default, Enterprise Gateway will use a kernel pod name whose value is derived from KERNEL_USERNAME and KERNEL_ID separated by a hyphen ('-'). This variable is typically NOT provided by the user, but, in such cases, Enterprise Gateway will honor that value. However, when provided, it is the user's responsibility that KERNEL_POD_NAME is unique relative to any pods in the target namespace. In addition, the pod must NOT exist - unlike the case if KERNEL_NAMESPACE is provided. The KERNEL_POD_NAME can also be provided as a template string using simple variable substitution (e.g. "{{ kernel_username }}-{{ kernel_id }}"). Only simple {{ variable_name }} references are supported -- Jinja2 filters and expressions are NOT supported and will be rejected for security reasons. Available variables include all KERNEL_* environment variables (lowercased, e.g. kernel_username, kernel_namespace) plus kernel_id. Variable names must start with a letter and contain only letters, digits, and underscores. In case of invalid template syntax or missing variables, Enterprise Gateway will fall back to the default pod name using KERNEL_USERNAME-KERNEL_ID. KERNEL_REMOTE_HOST= DistributedProcessProxy only. When specified, this value will override the configured load-balancing algorithm. KERNEL_SERVICE_ACCOUNT_NAME= or EG_DEFAULT_KERNEL_SERVICE_ACCOUNT_NAME Kubernetes only. This value represents the name of the service account that Enterprise Gateway should equate with the kernel pod. If Enterprise Gateway creates the kernel's namespace, it will be associated with the cluster role identified by EG_KERNEL_CLUSTER_ROLE. If not provided, it will be derived from EG_DEFAULT_KERNEL_SERVICE_ACCOUNT_NAME. KERNEL_SPARKAPP_CONFIG_MAP= or None Spark k8s-operator only. The name of a Kubernetes ConfigMap which will be used to configure the SparkApplication. See the SparkApplicationSpec (https://googlecloudplatform.github.io/spark-on-k8s-operator/docs/api-docs.html#sparkoperator.k8s.io/v1beta2.SparkApplicationSpec) sparkConfigMap for more information. KERNEL_UID= or 1000 Containers only. This value represents the user id in which the container will run. The default value is 1000 representing the jovyan user - which is how all kernel images produced by Enterprise Gateway are built. See also KERNEL_GID. Kubernetes: Warning - If KERNEL_UID is set it is strongly recommened that feature-gate RunAsGroup be enabled and KERNEL_GID also be set, otherwise, the pod will run as the root group id. As a result, the setting of this value into the Security Context of the kernel pod is commented out in the kernel-pod.yaml file and must be enabled by the administrator. KERNEL_USERNAME= or This value represents the logical name of the user submitting the request to start the kernel. Of all the KERNEL_ variables, KERNEL_USERNAME is the one that should be submitted in the request. In environments in which impersonation is used it represents the target of the impersonation. KERNEL_VOLUMES= or None Kubernetes and Spark Operator only. A JSON-formatted string defining Kubernetes volume specifications to mount into the kernel pod. The value is parsed via yaml.safe_load and passed to the kernel pod or SparkApplication template as the kernel_volumes variable. Example: KERNEL_VOLUMES='[{"name": "my-vol", "persistentVolumeClaim": {"claimName": "my-pvc"}}]' See the kernel-pod.yaml.j2 and sparkoperator templates for how volumes are rendered. KERNEL_VOLUME_MOUNTS= or None Kubernetes and Spark Operator only. A JSON-formatted string defining Kubernetes volumeMount specifications for the kernel container. The value is parsed via yaml.safe_load and passed to the kernel pod or SparkApplication template as the kernel_volume_mounts variable. Example: KERNEL_VOLUME_MOUNTS='[{"name": "my-vol", "mountPath": "/data"}]' Must correspond to volumes defined via KERNEL_VOLUMES. KERNEL_WORKING_DIR= or None Containers only. This value should model the directory in which the active notebook file is running. It is intended to be used in conjunction with appropriate volume mounts in the kernel container such that the user's notebook filesystem exists in the container and enables the sharing of resources used within the notebook. As a result, the primary use case for this is for Jupyter Hub users running in Kubernetes. When a value is provided and EG_MIRROR_WORKING_DIRS=True, Enterprise Gateway will set the container's working directory to the value specified in KERNEL_WORKING_DIR. If EG_MIRROR_WORKING_DIRS is False, KERNEL_WORKING_DIR will not be available for use during the kernel's launch. See also EG_MIRROR_WORKING_DIRS. ``` ================================================ FILE: enterprise_gateway/__init__.py ================================================ """Lazy-loading entrypoint for the enterprise gateway package.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from ._version import __version__ # noqa def launch_instance(*args, **kwargs): from enterprise_gateway.enterprisegatewayapp import launch_instance launch_instance(*args, **kwargs) ================================================ FILE: enterprise_gateway/__main__.py ================================================ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. """CLI entrypoint for the enterprise gateway package.""" if __name__ == "__main__": import enterprise_gateway.enterprisegatewayapp as app app.launch_instance() ================================================ FILE: enterprise_gateway/_version.py ================================================ """enterprise_gateway version info""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. __version__ = "3.3.0.dev0" ================================================ FILE: enterprise_gateway/base/__init__.py ================================================ ================================================ FILE: enterprise_gateway/base/handlers.py ================================================ """Tornado handlers for the base of the API.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import json from typing import List import jupyter_server._version from jupyter_server.base.handlers import APIHandler from tornado import web from .._version import __version__ from ..mixins import CORSMixin, JSONErrorsMixin, TokenAuthorizationMixin class APIVersionHandler(TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, APIHandler): """ " Extends the jupyter_server base API handler with token auth, CORS, and JSON errors to produce version information for jupyter_server and gateway. """ def get(self): """Get the API version.""" # not authenticated, so give as few info as possible # to be backwards compatibile, use only 'version' for the jupyter_server version # and be more specific for gateway_version self.finish( json.dumps({"version": jupyter_server.__version__, "gateway_version": __version__}) ) class NotFoundHandler(JSONErrorsMixin, web.RequestHandler): """ Catches all requests and responds with 404 JSON messages. Installed as the fallback error for all unhandled requests. Raises ------ tornado.web.HTTPError Always 404 Not Found """ def prepare(self): """Prepare the response.""" raise web.HTTPError(404) default_handlers: List[tuple] = [(r"/api", APIVersionHandler), (r"/(.*)", NotFoundHandler)] ================================================ FILE: enterprise_gateway/client/__init__.py ================================================ ================================================ FILE: enterprise_gateway/client/gateway_client.py ================================================ """An Enterprise Gateway client.""" import logging import os import queue import time from threading import Thread from uuid import uuid4 import requests import websocket from tornado.escape import json_decode, json_encode, utf8 REQUEST_TIMEOUT = int(os.getenv("REQUEST_TIMEOUT", 120)) log_level = os.getenv("LOG_LEVEL", "INFO") logging.basicConfig(format="[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s] %(message)s") class GatewayClient: """ *** E X P E R I M E N T A L *** *** E X P E R I M E N T A L *** An experimental Gateway Client that is used for Enterprise Gateway integration tests and can be leveraged for micro service type of connections. """ DEFAULT_USERNAME = os.getenv("KERNEL_USERNAME", "bob") DEFAULT_GATEWAY_HOST = os.getenv("GATEWAY_HOST", "localhost:8888") KERNEL_LAUNCH_TIMEOUT = os.getenv("KERNEL_LAUNCH_TIMEOUT", "40") def __init__(self, host=DEFAULT_GATEWAY_HOST, use_secure_connection=False): """Initialize the client.""" self.http_api_endpoint = ( f"https://{host}/api/kernels" if use_secure_connection else f"http://{host}/api/kernels" ) self.ws_api_endpoint = ( f"wss://{host}/api/kernels" if use_secure_connection else f"ws://{host}/api/kernels" ) self.log = logging.getLogger("GatewayClient") self.log.setLevel(log_level) def start_kernel( self, kernelspec_name, username=DEFAULT_USERNAME, timeout=REQUEST_TIMEOUT, extra_env=None ): """Start a kernel.""" self.log.info(f"Starting a {kernelspec_name} kernel ....") if extra_env is None: extra_env = {} env = { "KERNEL_USERNAME": username, "KERNEL_LAUNCH_TIMEOUT": GatewayClient.KERNEL_LAUNCH_TIMEOUT, } env.update(extra_env) json_data = { "name": kernelspec_name, "env": env, } response = requests.post(self.http_api_endpoint, data=json_encode(json_data), timeout=60) if response.status_code == 201: json_data = response.json() kernel_id = json_data.get("id") self.log.info(f"Started kernel with id {kernel_id}") else: msg = "Error starting kernel : {} response code \n {}".format( response.status_code, response.content ) raise RuntimeError(msg) return KernelClient( self.http_api_endpoint, self.ws_api_endpoint, kernel_id, timeout=timeout, logger=self.log, ) def shutdown_kernel(self, kernel): """Shut down a kernel.""" self.log.info(f"Shutting down kernel : {kernel.kernel_id} ....") if not kernel: return False kernel.shutdown() class KernelClient: """A kernel client class.""" DEAD_MSG_ID = "deadbeefdeadbeefdeadbeefdeadbeef" POST_IDLE_TIMEOUT = 0.5 DEFAULT_INTERRUPT_WAIT = 1 def __init__( self, http_api_endpoint, ws_api_endpoint, kernel_id, timeout=REQUEST_TIMEOUT, logger=None ): """Initialize the client.""" self.shutting_down = False self.restarting = False self.http_api_endpoint = http_api_endpoint self.kernel_http_api_endpoint = f"{http_api_endpoint}/{kernel_id}" self.ws_api_endpoint = ws_api_endpoint self.kernel_ws_api_endpoint = f"{ws_api_endpoint}/{kernel_id}/channels" self.kernel_id = kernel_id self.log = logger self.kernel_socket = None self.response_reader = Thread(target=self._read_responses) self.response_queues = {} self.interrupt_thread = None self.log.debug(f"Initializing kernel client ({kernel_id}) to {self.kernel_ws_api_endpoint}") try: self.kernel_socket = websocket.create_connection( f"{ws_api_endpoint}/{kernel_id}/channels", timeout=timeout, enable_multithread=True ) except Exception as e: self.log.error(e) self.shutdown() raise e # startup reader thread self.response_reader.start() def shutdown(self): """Shut down the client.""" # Terminate thread, close socket and clear queues. self.shutting_down = True if self.kernel_socket: self.kernel_socket.close() self.kernel_socket = None if self.response_queues: self.response_queues.clear() self.response_queues = None if self.response_reader: self.response_reader.join(timeout=2.0) if self.response_reader.is_alive(): self.log.warning("Response reader thread is not terminated, continuing...") self.response_reader = None url = f"{self.http_api_endpoint}/{self.kernel_id}" response = requests.delete(url, timeout=60) if response.status_code == 204: self.log.info(f"Kernel {self.kernel_id} shutdown") return True else: msg = f"Error shutting down kernel {self.kernel_id}: {response.content}" raise RuntimeError(msg) def execute(self, code, timeout=REQUEST_TIMEOUT): """ Executes the code provided and returns the result of that execution. """ response = [] has_error = False try: msg_id = self._send_request(code) post_idle = False while True: response_message = self._get_response(msg_id, timeout, post_idle) if response_message: response_message_type = response_message["msg_type"] if response_message_type == "error" or ( response_message_type == "execute_reply" and response_message["content"]["status"] == "error" ): has_error = True response.append( "{}:{}:{}".format( response_message["content"]["ename"], response_message["content"]["evalue"], response_message["content"]["traceback"], ) ) elif response_message_type == "stream": response.append( KernelClient._convert_raw_response(response_message["content"]["text"]) ) elif ( response_message_type == "execute_result" or response_message_type == "display_data" ): if "text/plain" in response_message["content"]["data"]: response.append( KernelClient._convert_raw_response( response_message["content"]["data"]["text/plain"] ) ) elif "text/html" in response_message["content"]["data"]: response.append( KernelClient._convert_raw_response( response_message["content"]["data"]["text/html"] ) ) elif response_message_type == "status": if response_message["content"]["execution_state"] == "idle": post_idle = True # indicate we're at the logical end and timeout poll for next message continue else: self.log.debug( "Unhandled response for msg_id: {} of msg_type: {}".format( msg_id, response_message_type ) ) if ( response_message is None ): # We timed out. If post idle, its ok, else make mention of it if not post_idle: self.log.warning( f"Unexpected timeout occurred for msg_id: {msg_id} - no 'idle' status received!" ) break except Exception as e: self.log.debug(e) return "".join(response), has_error def interrupt(self): """Interrupt the kernel.""" url = "{}/{}".format(self.kernel_http_api_endpoint, "interrupt") response = requests.post(url, timeout=60) if response.status_code == 204: self.log.debug(f"Kernel {self.kernel_id} interrupted") return True else: msg = f"Unexpected response interrupting kernel {self.kernel_id}: {response.content}" raise RuntimeError(msg) def restart(self, timeout=REQUEST_TIMEOUT): """Restart the kernel.""" self.restarting = True self.kernel_socket.close() self.kernel_socket = None url = "{}/{}".format(self.kernel_http_api_endpoint, "restart") response = requests.post(url, timeout=60) if response.status_code == 200: self.log.debug(f"Kernel {self.kernel_id} restarted") self.kernel_socket = websocket.create_connection( self.kernel_ws_api_endpoint, timeout=timeout, enable_multithread=True ) self.restarting = False return True else: self.restarting = False msg = f"Unexpected response restarting kernel {self.kernel_id}: {response.content}" self.log.debug(msg) raise RuntimeError(msg) def get_state(self): """Get the state of the client.""" url = f"{self.kernel_http_api_endpoint}" response = requests.get(url, timeout=60) if response.status_code == 200: json = response.json() self.log.debug(f"Kernel {self.kernel_id} state: {json}") return json["execution_state"] else: msg = "Unexpected response retrieving state for kernel {}: {}".format( self.kernel_id, response.content ) raise RuntimeError(msg) def start_interrupt_thread(self, wait_time=DEFAULT_INTERRUPT_WAIT): """Start the interrupt thread.""" self.interrupt_thread = Thread(target=self.perform_interrupt, args=(wait_time,)) self.interrupt_thread.start() def perform_interrupt(self, wait_time): """Perform an interrupt on the client.""" time.sleep(wait_time) # Allow parent to start executing cell to interrupt self.interrupt() def terminate_interrupt_thread(self): """Terminate the interrupt thread.""" if self.interrupt_thread: self.interrupt_thread.join() self.interrupt_thread = None def _send_request(self, code): """ Builds the request and submits it to the kernel. Prior to sending the request it creates an empty response queue and adds it to the dictionary using msg_id as the key. The msg_id is returned in order to read responses. """ msg_id = uuid4().hex message = KernelClient.__create_execute_request(msg_id, code) # create response-queue and add to map for this msg_id self.response_queues[msg_id] = queue.Queue() self.kernel_socket.send(message) return msg_id def _get_response(self, msg_id, timeout, post_idle): """ Pulls the next response message from the queue corresponding to msg_id. If post_idle is true, the timeout parameter is set to a very short value since a majority of time, there won't be a message in the queue. However, in cases where a race condition occurs between the idle status and the execute_result payload - where the two are out of order, then this will pickup the result. """ if post_idle and timeout > KernelClient.POST_IDLE_TIMEOUT: timeout = ( KernelClient.POST_IDLE_TIMEOUT ) # overwrite timeout to small value following idle messages. msg_queue = self.response_queues.get(msg_id) try: self.log.debug(f"Getting response for msg_id: {msg_id} with timeout: {timeout}") response = msg_queue.get(timeout=timeout) self.log.debug( "Got response for msg_id: {}, msg_type: {}".format( msg_id, response["msg_type"] if response else "null" ) ) except queue.Empty: response = None return response def _read_responses(self): """ Reads responses from the websocket. For each response read, it is added to the response queue based on the messages parent_header.msg_id. It does this for the duration of the class's lifetime until its shutdown method is called, at which time the socket is closed (unblocking the reader) and the thread terminates. If shutdown happens to occur while processing a response (unlikely), termination takes place via the loop control boolean. """ try: while not self.shutting_down: try: raw_message = self.kernel_socket.recv() response_message = json_decode(utf8(raw_message)) msg_id = KernelClient._get_msg_id(response_message, self.log) if msg_id not in self.response_queues: # this will happen when the msg_id is generated by the server self.response_queues[msg_id] = queue.Queue() # insert into queue self.log.debug( "Inserting response for msg_id: {}, msg_type: {}".format( msg_id, response_message["msg_type"] ) ) self.response_queues.get(msg_id).put_nowait(response_message) except BaseException as be1: if ( self.restarting ): # If restarting, wait until restart has completed - which includes new socket i = 1 while self.restarting: if i >= 10 and i % 2 == 0: self.log.debug(f"Still restarting after {i} secs...") time.sleep(1) i += 1 continue raise be1 except websocket.WebSocketConnectionClosedException: pass # websocket closure most likely due to shutdown except BaseException as be2: if not self.shutting_down: self.log.warning(f"Unexpected exception encountered ({be2})") self.log.debug("Response reader thread exiting...") @staticmethod def _get_msg_id(message, logger): msg_id = KernelClient.DEAD_MSG_ID if message: if "msg_id" in message["parent_header"] and message["parent_header"]["msg_id"]: msg_id = message["parent_header"]["msg_id"] elif "msg_id" in message: # msg_id may not be in the parent_header, see if present in response # IPython kernel appears to do this after restarts with a 'starting' status msg_id = message["msg_id"] else: # Dump the "dead" message... logger.debug(f"+++++ Dumping dead message: {message}") return msg_id @staticmethod def _convert_raw_response(raw_response_message): result = raw_response_message if isinstance(raw_response_message, str) and "u'" in raw_response_message: result = raw_response_message.replace("u'", "")[:-1] return result @staticmethod def __create_execute_request(msg_id, code): return json_encode( { "header": { "username": "", "version": "5.0", "session": "", "msg_id": msg_id, "msg_type": "execute_request", }, "parent_header": {}, "channel": "shell", "content": { "code": "".join(code), "silent": False, "store_history": False, "user_expressions": {}, "allow_stdin": False, }, "metadata": {}, "buffers": {}, } ) ================================================ FILE: enterprise_gateway/enterprisegatewayapp.py ================================================ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. """Enterprise Gateway Jupyter application.""" import asyncio import errno import getpass import logging import os import signal import ssl import sys import time import weakref from typing import ClassVar, List, Optional from jupyter_client.kernelspec import KernelSpecManager from jupyter_core.application import JupyterApp, base_aliases from jupyter_server.serverapp import random_ports from jupyter_server.utils import url_path_join from tornado import httpserver, web from tornado.log import enable_pretty_logging from traitlets.config import Configurable from zmq.eventloop import ioloop from ._version import __version__ from .base.handlers import default_handlers as default_base_handlers from .mixins import EnterpriseGatewayConfigMixin from .services.api.handlers import default_handlers as default_api_handlers from .services.kernels.handlers import default_handlers as default_kernel_handlers from .services.kernels.remotemanager import RemoteMappingKernelManager from .services.kernelspecs import KernelSpecCache from .services.kernelspecs.handlers import default_handlers as default_kernelspec_handlers from .services.sessions.handlers import default_handlers as default_session_handlers from .services.sessions.kernelsessionmanager import ( FileKernelSessionManager, WebhookKernelSessionManager, ) from .services.sessions.sessionmanager import SessionManager # Add additional command line aliases aliases = dict(base_aliases) aliases.update( { "ip": "EnterpriseGatewayApp.ip", "port": "EnterpriseGatewayApp.port", "port_retries": "EnterpriseGatewayApp.port_retries", "keyfile": "EnterpriseGatewayApp.keyfile", "certfile": "EnterpriseGatewayApp.certfile", "client-ca": "EnterpriseGatewayApp.client_ca", "ssl_version": "EnterpriseGatewayApp.ssl_version", } ) class EnterpriseGatewayApp(EnterpriseGatewayConfigMixin, JupyterApp): """ Application that provisions Jupyter kernels and proxies HTTP/Websocket traffic to the kernels. - reads command line and environment variable settings - initializes managers and routes - creates a Tornado HTTP server - starts the Tornado event loop """ name = "jupyter-enterprise-gateway" version = __version__ description = """ Jupyter Enterprise Gateway Provisions remote Jupyter kernels and proxies HTTP/Websocket traffic to them. """ # Also include when generating help options classes: ClassVar = [ KernelSpecCache, FileKernelSessionManager, WebhookKernelSessionManager, RemoteMappingKernelManager, ] # Enable some command line shortcuts aliases = aliases def initialize(self, argv: Optional[List[str]] = None) -> None: """Initializes the base class, configurable manager instances, the Tornado web app, and the tornado HTTP server. Parameters ---------- argv Command line arguments """ super().initialize(argv) self.init_configurables() self.init_webapp() self.init_http_server() def init_configurables(self) -> None: """Initializes all configurable objects including a kernel manager, kernel spec manager, session manager, and personality. """ self.kernel_spec_manager = KernelSpecManager(parent=self) self.kernel_spec_manager = self.kernel_spec_manager_class( parent=self, ) self.kernel_spec_cache = self.kernel_spec_cache_class( parent=self, kernel_spec_manager=self.kernel_spec_manager ) # Only pass a default kernel name when one is provided. Otherwise, # adopt whatever default the kernel manager wants to use. kwargs = {} if self.default_kernel_name: kwargs["default_kernel_name"] = self.default_kernel_name self.kernel_manager = self.kernel_manager_class( parent=self, log=self.log, connection_dir=self.runtime_dir, kernel_spec_manager=self.kernel_spec_manager, **kwargs, ) self.session_manager = SessionManager(log=self.log, kernel_manager=self.kernel_manager) self.kernel_session_manager = self.kernel_session_manager_class( parent=self, log=self.log, kernel_manager=self.kernel_manager, config=self.config, # required to get command-line options visible ) # For B/C purposes, check if session persistence is enabled. If so, and availability # mode is not enabled, go ahead and default availability mode to 'multi-instance'. if self.kernel_session_manager.enable_persistence: if self.availability_mode is None: self.availability_mode = EnterpriseGatewayConfigMixin.AVAILABILITY_REPLICATION self.log.info( f"Kernel session persistence is enabled but availability mode is not. " f"Setting EnterpriseGatewayApp.availability_mode to '{self.availability_mode}'." ) else: # Persistence is not enabled, check if availability_mode is configured and, if so, # auto-enable persistence if self.availability_mode is not None: self.kernel_session_manager.enable_persistence = True self.log.info( f"Availability mode is set to '{self.availability_mode}' yet kernel session " "persistence is not enabled. Enabling kernel session persistence." ) # If we're using single-instance availability, attempt to start persisted sessions if self.availability_mode == EnterpriseGatewayConfigMixin.AVAILABILITY_STANDALONE: self.kernel_session_manager.start_sessions() self.contents_manager = None # Gateways don't use contents manager self.init_dynamic_configs() def _create_request_handlers(self) -> List[tuple]: """Create default Jupyter handlers and redefine them off of the base_url path. Assumes init_configurables() has already been called. """ handlers = [] # append tuples for the standard kernel gateway endpoints for handler in ( default_api_handlers + default_kernel_handlers + default_kernelspec_handlers + default_session_handlers + default_base_handlers ): # Create a new handler pattern rooted at the base_url pattern = url_path_join("/", self.base_url, handler[0]) # Some handlers take args, so retain those in addition to the # handler class ref new_handler = (pattern, *list(handler[1:])) if self.authorized_origin: self.__add_authorized_hostname_match(new_handler) handlers.append(new_handler) return handlers def __add_authorized_hostname_match(self, handler: tuple) -> None: base_prepare = handler[1].prepare authorized_hostname = self.authorized_origin def wrapped_prepare(self): ssl_cert = self.request.get_ssl_certificate() try: ssl.match_hostname(ssl_cert, authorized_hostname) except ssl.SSLCertVerificationError: raise web.HTTPError(403, "Forbidden") from None base_prepare(self) handler[1].prepare = wrapped_prepare def init_webapp(self) -> None: """Initializes Tornado web application with uri handlers. Adds the various managers and web-front configuration values to the Tornado settings for reference by the handlers. """ # Enable the same pretty logging the server uses enable_pretty_logging() # Configure the tornado logging level too logging.getLogger().setLevel(self.log_level) handlers = self._create_request_handlers() # Instantiate the configured authorizer class self.log.info(f"Using authorizer: {self.authorizer_class}") authorizer = self.authorizer_class(parent=self, log=self.log) self.web_app = web.Application( handlers=handlers, kernel_manager=self.kernel_manager, session_manager=self.session_manager, contents_manager=self.contents_manager, kernel_spec_manager=self.kernel_spec_manager, kernel_spec_cache=self.kernel_spec_cache, eg_auth_token=self.auth_token, eg_allow_credentials=self.allow_credentials, eg_allow_headers=self.allow_headers, eg_allow_methods=self.allow_methods, eg_allow_origin=self.allow_origin, eg_expose_headers=self.expose_headers, eg_max_age=self.max_age, eg_max_kernels=self.max_kernels, eg_inherited_envs=self.inherited_envs, eg_client_envs=self.client_envs, eg_kernel_headers=self.kernel_headers, eg_list_kernels=self.list_kernels, eg_authorized_users=self.authorized_users, eg_unauthorized_users=self.unauthorized_users, # Also set the allow_origin setting used by jupyter_server so that the # check_origin method used everywhere respects the value allow_origin=self.allow_origin, # Set base_url for use in request handlers base_url=self.base_url, # Always allow remote access (has been limited to localhost >= notebook 5.6) allow_remote_access=True, # setting ws_ping_interval value that can allow it to be modified for the purpose of toggling ping mechanism # for zmq web-sockets or increasing/decreasing web socket ping interval/timeouts. ws_ping_interval=self.ws_ping_interval * 1000, # Use configurable authorizer authorizer=authorizer, ) def _build_ssl_options(self) -> Optional[ssl.SSLContext]: """Build an SSLContext for the tornado HTTP server.""" if not any((self.certfile, self.keyfile, self.client_ca)): # None indicates no SSL config return None ssl_context = ssl.SSLContext(protocol=self.ssl_version or self.ssl_version_default_value) if self.certfile: ssl_context.load_cert_chain(certfile=self.certfile, keyfile=self.keyfile) if self.client_ca: ssl_context.load_verify_locations(cafile=self.client_ca) ssl_context.verify_mode = ssl.CERT_REQUIRED return ssl_context def init_http_server(self) -> None: """Initializes an HTTP server for the Tornado web application on the configured interface and port. Tries to find an open port if the one configured is not available using the same logic as the Jupyter Notebook server. """ ssl_options = self._build_ssl_options() self.http_server = httpserver.HTTPServer( self.web_app, xheaders=self.trust_xheaders, ssl_options=ssl_options ) for port in random_ports(self.port, self.port_retries + 1): try: self.http_server.listen(port, self.ip) except OSError as e: if e.errno == errno.EADDRINUSE: self.log.info("The port %i is already in use, trying another port." % port) continue elif e.errno in (errno.EACCES, getattr(errno, "WSAEACCES", errno.EACCES)): self.log.warning("Permission to listen on port %i denied" % port) continue else: raise else: self.port = port break else: self.log.critical( "ERROR: the gateway server could not be started because " "no available port could be found." ) self.exit(1) def start(self) -> None: """Starts an IO loop for the application.""" super().start() self.log.info( "Jupyter Enterprise Gateway {} is available at http{}://{}:{}".format( EnterpriseGatewayApp.version, "s" if self.keyfile else "", self.ip, self.port ) ) # If impersonation is enabled, issue a warning message if the gateway user is not in unauthorized_users. if self.impersonation_enabled: gateway_user = getpass.getuser() if gateway_user.lower() not in self.unauthorized_users: self.log.warning( "Impersonation is enabled and gateway user '{}' is NOT specified in the set of " "unauthorized users! Kernels may execute as that user with elevated privileges.".format( gateway_user ) ) self.io_loop = ioloop.IOLoop.current() if sys.platform != "win32": signal.signal(signal.SIGHUP, signal.SIG_IGN) signal.signal(signal.SIGTERM, self._signal_stop) try: self.io_loop.start() except KeyboardInterrupt: self.log.info("Interrupted...") # Ignore further interrupts (ctrl-c) signal.signal(signal.SIGINT, signal.SIG_IGN) finally: self.shutdown() def shutdown(self) -> None: """Shuts down all running kernels.""" self.log.info("Jupyter Enterprise Gateway is shutting down all running kernels") kids = self.kernel_manager.list_kernel_ids() for kid in kids: try: asyncio.get_event_loop().run_until_complete( self.kernel_manager.shutdown_kernel(kid, now=True) ) except Exception as ex: self.log.warning(f"Failed to shut down kernel {kid}: {ex}") self.log.info("Shut down complete") def stop(self) -> None: """ Stops the HTTP server and IO loop associated with the application. """ def _stop(): self.http_server.stop() self.io_loop.stop() self.io_loop.add_callback(_stop) def _signal_stop(self, sig, frame) -> None: self.log.info("Received signal to terminate Enterprise Gateway.") self.io_loop.add_callback_from_signal(self.io_loop.stop) _last_config_update = int(time.time()) _dynamic_configurables: ClassVar = {} def update_dynamic_configurables(self) -> bool: """ Called periodically, this checks the set of loaded configuration files for updates. If updates have been detected, reload the configuration files and update the list of configurables participating in dynamic updates. :return: True if updates were taken """ updated = False configs = [] for file in self.loaded_config_files: mod_time = int(os.path.getmtime(file)) if mod_time > self._last_config_update: self.log.debug(f"Config file was updated: {file}!") self._last_config_update = mod_time updated = True if updated: # If config changes are present, reload the config files. This will also update # the Application's configuration, then update the config of each configurable # from the newly loaded values. self.load_config_file(self) for config_name, configurable in self._dynamic_configurables.items(): # Since Application.load_config_file calls update_config on the Application, skip # the configurable registered with self (i.e., the application). if configurable is not self: configurable.update_config(self.config) configs.append(config_name) self.log.info( "Configuration file changes detected. Instances for the following " f"configurables have been updated: {configs}" ) return updated def add_dynamic_configurable(self, config_name: str, configurable: Configurable) -> None: """ Adds the configurable instance associated with the given name to the list of Configurables that can have their configurations updated when configuration file updates are detected. :param config_name: the name of the config within this application :param configurable: the configurable instance corresponding to that config """ if not isinstance(configurable, Configurable): msg = f"'{configurable}' is not a subclass of Configurable!" raise RuntimeError(msg) self._dynamic_configurables[config_name] = weakref.proxy(configurable) def init_dynamic_configs(self) -> None: """ Initialize the set of configurables that should participate in dynamic updates. We should also log that we're performing dynamic configuration updates, along with the list of CLI options - that are not privy to dynamic updates. :return: """ if self.dynamic_config_interval > 0: self.add_dynamic_configurable("EnterpriseGatewayApp", self) self.add_dynamic_configurable("MappingKernelManager", self.kernel_manager) self.add_dynamic_configurable("KernelSpecManager", self.kernel_spec_manager) self.add_dynamic_configurable("KernelSessionManager", self.kernel_session_manager) self.log.info( "Dynamic updates have been configured. Checking every {} seconds.".format( self.dynamic_config_interval ) ) self.log.info( "The following configuration options will not be subject to dynamic updates " "(configured via CLI):" ) for config, options in self.cli_config.items(): for option, value in options.items(): self.log.info(f" '{config}.{option}': '{value}'") if self.dynamic_config_poller is None: self.dynamic_config_poller = ioloop.PeriodicCallback( self.update_dynamic_configurables, self.dynamic_config_interval * 1000 ) self.dynamic_config_poller.start() launch_instance = EnterpriseGatewayApp.launch_instance ================================================ FILE: enterprise_gateway/itests/__init__.py ================================================ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from tornado import ioloop def teardown(): """The test fixture appears to leak something on certain platforms that endlessly tries an async socket connect and fails after the tests end. As a stopgap, force a cleanup here. """ ioloop.IOLoop.current().stop() ioloop.IOLoop.current().close(True) ================================================ FILE: enterprise_gateway/itests/kernels/authorization_test/kernel.json ================================================ { "display_name": "Authorization Testing", "language": "python", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.processproxy.LocalProcessProxy", "config": { "authorized_users": "bob,alice,bad_guy", "unauthorized_users": "bad_guy" } } }, "env": {}, "argv": ["python", "-m", "ipykernel_launcher", "-f", "{connection_file}"] } ================================================ FILE: enterprise_gateway/itests/test_authorization.py ================================================ import os import unittest from enterprise_gateway.client.gateway_client import GatewayClient class TestAuthorization(unittest.TestCase): KERNELSPEC = os.getenv("AUTHORIZATION_KERNEL_NAME", "authorization_test") @classmethod def setUpClass(cls): super().setUpClass() # initialize environment cls.gateway_client = GatewayClient() def setUp(self): pass def tearDown(self): pass def test_authorized_users(self): kernel = None try: kernel = self.gateway_client.start_kernel(TestAuthorization.KERNELSPEC, username="bob") result, has_error = kernel.execute("print('The cow jumped over the moon.')") self.assertEqual(result, "The cow jumped over the moon.\n") self.assertEqual(has_error, False) finally: if kernel: self.gateway_client.shutdown_kernel(kernel) def test_unauthorized_users(self): kernel = None try: kernel = self.gateway_client.start_kernel( TestAuthorization.KERNELSPEC, username="bad_guy" ) self.assertTrue(False, msg="Unauthorization exception expected!") except Exception as be: self.assertRegex(be.args[0], "403") finally: if kernel: self.gateway_client.shutdown_kernel(kernel) if __name__ == "__main__": unittest.main() ================================================ FILE: enterprise_gateway/itests/test_base.py ================================================ import os expected_hostname = os.getenv("ITEST_HOSTNAME_PREFIX", "") + "*" # use ${KERNEL_USERNAME} on k8s expected_application_id = os.getenv( "EXPECTED_APPLICATION_ID", "application_*" ) # use 'spark-application-*' on k8s expected_spark_version = os.getenv("EXPECTED_SPARK_VERSION", "3.2.*") # use '2.4.*' on k8s expected_spark_master = os.getenv("EXPECTED_SPARK_MASTER", "yarn") # use 'k8s:*' on k8s expected_deploy_mode = os.getenv("EXPECTED_DEPLOY_MODE", "(cluster|client)") # use 'client' on k8s class TestBase: def get_expected_application_id(self): return expected_application_id def get_expected_spark_version(self): return expected_spark_version def get_expected_spark_master(self): return expected_spark_master def get_expected_deploy_mode(self): return expected_deploy_mode def get_expected_hostname(self): return expected_hostname ================================================ FILE: enterprise_gateway/itests/test_python_kernel.py ================================================ import os import unittest from enterprise_gateway.client.gateway_client import GatewayClient from .test_base import TestBase class PythonKernelBaseTestCase(TestBase): """ Python related test cases common to vanilla IPython kernels """ def test_get_hostname(self): result, has_error = self.kernel.execute( "import subprocess; subprocess.check_output(['hostname'])" ) self.assertEqual(has_error, False) self.assertRegex(result, self.get_expected_hostname()) def test_hello_world(self): result, has_error = self.kernel.execute("print('Hello World')") self.assertEqual(has_error, False) self.assertRegex(result, "Hello World") def test_restart(self): # 1. Set a variable to a known value. # 2. Restart the kernel # 3. Attempt to increment the variable, verify an error was received (due to undefined variable) self.kernel.execute("x = 123") original_value, has_error = self.kernel.execute("print(x)") self.assertEqual(int(original_value), 123) self.assertEqual(has_error, False) self.assertTrue(self.kernel.restart()) error_result, has_error = self.kernel.execute("y = x + 1") self.assertRegex(error_result, "NameError") self.assertEqual(has_error, True) def test_interrupt(self): # 1. Set a variable to a known value. # 2. Spawn a thread that will perform an interrupt after some number of seconds, # 3. Issue a long-running command - that spans during of interrupt thread wait time, # 4. Interrupt the kernel, # 5. Attempt to increment the variable, verify expected result. self.kernel.execute("x = 123") original_value, has_error = self.kernel.execute("print(x)") self.assertEqual(int(original_value), 123) self.assertEqual(has_error, False) # Start a thread that performs the interrupt. This thread must wait long enough to issue # the next cell execution. self.kernel.start_interrupt_thread() # Build the code list to interrupt, in this case, its a sleep call. interrupted_code = [] interrupted_code.append("import time\n") interrupted_code.append("print('begin')\n") interrupted_code.append("time.sleep(60)\n") interrupted_code.append("print('end')\n") interrupted_result, has_error = self.kernel.execute(interrupted_code) # Ensure the result indicates an interrupt occurred self.assertRegex(interrupted_result, "KeyboardInterrupt") self.assertEqual(has_error, True) # Wait for thread to terminate - should be terminated already self.kernel.terminate_interrupt_thread() # Increment the pre-interrupt variable and ensure its value is correct self.kernel.execute("y = x + 1") interrupted_value, has_error = self.kernel.execute("print(y)") self.assertEqual(int(interrupted_value), 124) self.assertEqual(has_error, False) def test_scope(self): # Ensure global variable is accessible in function. # See https://github.com/jupyter-server/enterprise_gateway/issues/687 # Build the example code... scope_code = [] scope_code.append("a = 42\n") scope_code.append("def scope():\n") scope_code.append(" return a\n") scope_code.append("\n") scope_code.append("scope()\n") result, has_error = self.kernel.execute(scope_code) self.assertEqual(result, str(42)) self.assertEqual(has_error, False) class PythonKernelBaseSparkTestCase(PythonKernelBaseTestCase): """ Python related tests cases common to Spark on Yarn """ def test_get_application_id(self): result, has_error = self.kernel.execute("sc.getConf().get('spark.app.id')") self.assertRegex(result, self.get_expected_application_id()) self.assertEqual(has_error, False) def test_get_deploy_mode(self): result, has_error = self.kernel.execute("sc.getConf().get('spark.submit.deployMode')") self.assertRegex(result, self.get_expected_deploy_mode()) self.assertEqual(has_error, False) def test_get_resource_manager(self): result, has_error = self.kernel.execute("sc.getConf().get('spark.master')") self.assertRegex(result, self.get_expected_spark_master()) self.assertEqual(has_error, False) def test_get_spark_version(self): result, has_error = self.kernel.execute("sc.version") self.assertRegex(result, self.get_expected_spark_version()) self.assertEqual(has_error, False) @unittest.skip("Temporarily disabled") def test_run_pi_example(self): # Build the example code... pi_code = [] pi_code.append("from random import random\n") pi_code.append("from operator import add\n") pi_code.append("partitions = 20\n") pi_code.append("n = 100000 * partitions\n") pi_code.append("def f(_):\n") pi_code.append(" x = random() * 2 - 1\n") pi_code.append(" y = random() * 2 - 1\n") pi_code.append(" return 1 if x ** 2 + y ** 2 <= 1 else 0\n") pi_code.append("count = sc.parallelize(range(1, n + 1), partitions).map(f).reduce(add)\n") pi_code.append('print("Pi is roughly %f" % (4.0 * count / n))\n') result, has_error = self.kernel.execute(pi_code) self.assertRegex(result, "Pi is roughly 3.14*") self.assertEqual(has_error, False) class TestPythonKernelLocal(unittest.TestCase, PythonKernelBaseTestCase): KERNELSPEC = os.getenv("PYTHON_KERNEL_LOCAL_NAME", "python3") @classmethod def setUpClass(cls): super().setUpClass() print(f"\nStarting Python kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC) @classmethod def tearDownClass(cls): super().tearDownClass() print(f"\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) class TestPythonKernelDistributed(unittest.TestCase, PythonKernelBaseTestCase): KERNELSPEC = os.getenv( "PYTHON_KERNEL_DISTRIBUTED_NAME", "python_distributed" ) # python_kubernetes for k8s @classmethod def setUpClass(cls): super().setUpClass() print(f"\nStarting Python kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC) @classmethod def tearDownClass(cls): super().tearDownClass() print(f"\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) class TestPythonKernelClient(unittest.TestCase, PythonKernelBaseSparkTestCase): KERNELSPEC = os.getenv( "PYTHON_KERNEL_CLIENT_NAME", "spark_python_yarn_client" ) # spark_python_kubernetes for k8s @classmethod def setUpClass(cls): super().setUpClass() print(f"\nStarting Python kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC) @classmethod def tearDownClass(cls): super().tearDownClass() print(f"\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) class TestPythonKernelCluster(unittest.TestCase, PythonKernelBaseSparkTestCase): KERNELSPEC = os.getenv( "PYTHON_KERNEL_CLUSTER_NAME", "spark_python_yarn_cluster" ) # spark_python_kubernetes for k8s @classmethod def setUpClass(cls): super().setUpClass() print(f"\nStarting Python kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC) @classmethod def tearDownClass(cls): super().tearDownClass() print(f"\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) if __name__ == "__main__": unittest.main() ================================================ FILE: enterprise_gateway/itests/test_r_kernel.py ================================================ import os import unittest from enterprise_gateway.client.gateway_client import GatewayClient from .test_base import TestBase class RKernelBaseTestCase(TestBase): """ R related test cases common to vanilla IRKernel kernels """ def test_get_hostname(self): result, has_error = self.kernel.execute('system("hostname", intern=TRUE)') self.assertRegex(result, self.get_expected_hostname()) self.assertEqual(has_error, False) def test_hello_world(self): result, has_error = self.kernel.execute('print("Hello World", quote = FALSE)') self.assertRegex(result, "Hello World") self.assertEqual(has_error, False) def test_restart(self): # 1. Set a variable to a known value. # 2. Restart the kernel # 3. Attempt to increment the variable, verify an error was received (due to undefined variable) self.kernel.execute("x = 123") original_value, has_error = self.kernel.execute("write(x,stdout())") self.assertEqual(int(original_value), 123) self.assertEqual(has_error, False) self.assertTrue(self.kernel.restart()) error_result, has_error = self.kernel.execute("y = x + 1") self.assertRegex(error_result, "Error in eval") self.assertEqual(has_error, True) def test_interrupt(self): # 1. Set a variable to a known value. # 2. Spawn a thread that will perform an interrupt after some number of seconds, # 3. Issue a long-running command - that spans during of interrupt thread wait time, # 4. Interrupt the kernel, # 5. Attempt to increment the variable, verify expected result. self.kernel.execute("x = 123") original_value, has_error = self.kernel.execute("write(x,stdout())") self.assertEqual(int(original_value), 123) self.assertEqual(has_error, False) # Start a thread that performs the interrupt. This thread must wait long enough to issue # the next cell execution. self.kernel.start_interrupt_thread() # Build the code list to interrupt, in this case, its a sleep call. interrupted_code = [] interrupted_code.append('write("begin",stdout())\n') interrupted_code.append("Sys.sleep(30)\n") interrupted_code.append('write("end",stdout())\n') interrupted_result, has_error = self.kernel.execute(interrupted_code) # Ensure the result indicates an interrupt occurred self.assertEqual(interrupted_result.strip(), "begin") self.assertEqual(has_error, False) # Wait for thread to terminate - should be terminated already self.kernel.terminate_interrupt_thread() # Increment the pre-interrupt variable and ensure its value is correct self.kernel.execute("y = x + 1") interrupted_value, has_error = self.kernel.execute("write(y,stdout())") self.assertEqual(int(interrupted_value), 124) self.assertEqual(has_error, False) class RKernelBaseSparkTestCase(RKernelBaseTestCase): """ R related tests cases common to Spark on Yarn """ def test_get_application_id(self): result, has_error = self.kernel.execute( 'SparkR:::callJMethod(SparkR:::callJMethod(sc, "sc"), "applicationId")' ) self.assertRegex(result, self.get_expected_application_id()) self.assertEqual(has_error, False) def test_get_spark_version(self): result, has_error = self.kernel.execute("sparkR.version()") self.assertRegex(result, self.get_expected_spark_version()) self.assertEqual(has_error, False) def test_get_resource_manager(self): result, has_error = self.kernel.execute('unlist(sparkR.conf("spark.master"))') self.assertRegex(result, self.get_expected_spark_master()) self.assertEqual(has_error, False) def test_get_deploy_mode(self): result, has_error = self.kernel.execute('unlist(sparkR.conf("spark.submit.deployMode"))') self.assertRegex(result, self.get_expected_deploy_mode()) self.assertEqual(has_error, False) class TestRKernelLocal(unittest.TestCase, RKernelBaseTestCase): KERNELSPEC = os.getenv("R_KERNEL_LOCAL_NAME", "ir") # R_kubernetes for k8s @classmethod def setUpClass(cls): super().setUpClass() print(f"\nStarting R kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC) @classmethod def tearDownClass(cls): super().tearDownClass() print(f"\nShutting down R kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) class TestRKernelClient(unittest.TestCase, RKernelBaseSparkTestCase): KERNELSPEC = os.getenv( "R_KERNEL_CLIENT_NAME", "spark_R_yarn_client" ) # spark_R_kubernetes for k8s @classmethod def setUpClass(cls): print(f"\nStarting R kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC) @classmethod def tearDownClass(cls): super().tearDownClass() print(f"\nShutting down R kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) class TestRKernelCluster(unittest.TestCase, RKernelBaseSparkTestCase): KERNELSPEC = os.getenv( "R_KERNEL_CLUSTER_NAME", "spark_R_yarn_cluster" ) # spark_R_kubernetes for k8s @classmethod def setUpClass(cls): super().setUpClass() print(f"\nStarting R kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC) @classmethod def tearDownClass(cls): super().tearDownClass() print(f"\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) if __name__ == "__main__": unittest.main() ================================================ FILE: enterprise_gateway/itests/test_scala_kernel.py ================================================ import os import unittest from enterprise_gateway.client.gateway_client import GatewayClient from .test_base import TestBase class ScalaKernelBaseTestCase(TestBase): """ Scala related test cases common to vanilla Scala kernels """ def test_get_hostname(self): result, has_error = self.kernel.execute( "import java.net._; \ val localhost: InetAddress = InetAddress.getLocalHost; \ val localIpAddress: String = localhost.getHostName" ) self.assertRegex(result, self.get_expected_hostname()) self.assertEqual(has_error, False) def test_hello_world(self): result, has_error = self.kernel.execute('println("Hello World")') self.assertRegex(result, "Hello World") self.assertEqual(has_error, False) def test_restart(self): # 1. Set a variable to a known value. # 2. Restart the kernel # 3. Attempt to increment the variable, verify an error was received (due to undefined variable) self.kernel.execute("var x = 123") original_value, has_error = self.kernel.execute("x") self.assertEqual(int(original_value), 123) self.assertEqual(has_error, False) self.assertTrue(self.kernel.restart()) error_result, has_error = self.kernel.execute("var y = x + 1") self.assertRegex(error_result, "not found: value x") self.assertEqual(has_error, True) def test_interrupt(self): # 1. Set a variable to a known value. # 2. Spawn a thread that will perform an interrupt after some number of seconds, # 3. Issue a long-running command - that spans during of interrupt thread wait time, # 4. Interrupt the kernel, # 5. Attempt to increment the variable, verify expected result. self.kernel.execute("var x = 123") original_value, has_error = self.kernel.execute("x") self.assertEqual(int(original_value), 123) self.assertEqual(has_error, False) # Start a thread that performs the interrupt. This thread must wait long enough to issue # the next cell execution. self.kernel.start_interrupt_thread() # Build the code list to interrupt, in this case, its a sleep call. interrupted_code = [] interrupted_code.append('println("begin")\n') interrupted_code.append("Thread.sleep(60000)\n") interrupted_code.append('println("end")\n') interrupted_result, has_error = self.kernel.execute(interrupted_code) # Ensure the result indicates an interrupt occurred self.assertRegex(interrupted_result, "java.lang.InterruptedException") self.assertEqual(has_error, True) # Wait for thread to terminate - should be terminated already self.kernel.terminate_interrupt_thread() # Increment the pre-interrupt variable and ensure its value is correct self.kernel.execute("var y = x + 1") interrupted_value, has_error = self.kernel.execute("y") self.assertEqual(int(interrupted_value), 124) self.assertEqual(has_error, False) class ScalaKernelBaseSparkTestCase(ScalaKernelBaseTestCase): """ Scala related tests cases common to Spark (with Yarn the default RM) """ def test_get_application_id(self): result, has_error = self.kernel.execute("sc.applicationId") self.assertRegex(result, self.get_expected_application_id()) self.assertEqual(has_error, False) def test_get_spark_version(self): result, has_error = self.kernel.execute("sc.version") self.assertRegex(result, self.get_expected_spark_version()) self.assertEqual(has_error, False) def test_get_resource_manager(self): result, has_error = self.kernel.execute('sc.getConf.get("spark.master")') self.assertRegex(result, self.get_expected_spark_master()) self.assertEqual(has_error, False) def test_get_deploy_mode(self): result, has_error = self.kernel.execute('sc.getConf.get("spark.submit.deployMode")') self.assertRegex(result, self.get_expected_deploy_mode()) self.assertEqual(has_error, False) class TestScalaKernelLocal(unittest.TestCase, ScalaKernelBaseTestCase): SPARK_VERSION = os.getenv("SPARK_VERSION") DEFAULT_KERNELSPEC = f"spark_{SPARK_VERSION}_scala" KERNELSPEC = os.getenv( "SCALA_KERNEL_LOCAL_NAME", DEFAULT_KERNELSPEC ) # scala_kubernetes for k8s @classmethod def setUpClass(cls): super().setUpClass() print(f"\nStarting Scala kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC) @classmethod def tearDownClass(cls): super().tearDownClass() print(f"\nShutting down Scala kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) class TestScalaKernelClient(unittest.TestCase, ScalaKernelBaseSparkTestCase): KERNELSPEC = os.getenv( "SCALA_KERNEL_CLIENT_NAME", "spark_scala_yarn_client" ) # spark_scala_kubernetes for k8s @classmethod def setUpClass(cls): super().setUpClass() print(f"\nStarting Scala kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC) @classmethod def tearDownClass(cls): super().tearDownClass() print(f"\nShutting down Scala kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) class TestScalaKernelCluster(unittest.TestCase, ScalaKernelBaseSparkTestCase): KERNELSPEC = os.getenv( "SCALA_KERNEL_CLUSTER_NAME", "spark_scala_yarn_cluster" ) # spark_scala_kubernetes for k8s @classmethod def setUpClass(cls): super().setUpClass() print(f"\nStarting Scala kernel using {cls.KERNELSPEC} kernelspec") # initialize environment cls.gatewayClient = GatewayClient() cls.kernel = cls.gatewayClient.start_kernel(cls.KERNELSPEC) @classmethod def tearDownClass(cls): super().tearDownClass() print(f"\nShutting down Python kernel using {cls.KERNELSPEC} kernelspec") # shutdown environment cls.gatewayClient.shutdown_kernel(cls.kernel) if __name__ == "__main__": unittest.main() ================================================ FILE: enterprise_gateway/mixins.py ================================================ """Mixins for Tornado handlers.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import json import os import ssl import traceback from distutils.util import strtobool from http.client import responses from typing import Any, Awaitable, ClassVar, Dict, List, Optional, Set from tornado import web from tornado.log import LogFormatter from traitlets import ( Bool, CaselessStrEnum, CBool, Instance, Integer, TraitError, Type, Unicode, default, observe, validate, ) from traitlets import List as ListTrait from traitlets import Set as SetTrait from traitlets.config import Configurable class CORSMixin: """ Mixes CORS headers into tornado.web.RequestHandlers. """ SETTINGS_TO_HEADERS: ClassVar = { "eg_allow_credentials": "Access-Control-Allow-Credentials", "eg_allow_headers": "Access-Control-Allow-Headers", "eg_allow_methods": "Access-Control-Allow-Methods", "eg_allow_origin": "Access-Control-Allow-Origin", "eg_expose_headers": "Access-Control-Expose-Headers", "eg_max_age": "Access-Control-Max-Age", } def set_default_headers(self) -> None: """ Sets the CORS headers as the default for all responses. Disables CSP configured by the notebook package. It's not necessary for a programmatic API. """ super().set_default_headers() # Add CORS headers after default if they have a non-blank value for settings_name, header_name in self.SETTINGS_TO_HEADERS.items(): header_value = self.settings.get(settings_name) if header_value: self.set_header(header_name, header_value) # Don't set CSP: we're not serving frontend media types, only JSON self.clear_header("Content-Security-Policy") def options(self) -> None: """ Override the notebook implementation to return the headers configured in `set_default_headers instead of the hardcoded set supported by the handler base class in the notebook project. """ self.finish() class TokenAuthorizationMixin: """Mixes token auth into tornado.web.RequestHandlers and tornado.websocket.WebsocketHandlers. """ header_prefix = "token " header_prefix_len = len(header_prefix) def prepare(self) -> Optional[Awaitable[None]]: """Ensures the correct auth token is present, either as a parameter `token=` or as a header `Authorization: token `. Does nothing unless an auth token is configured in eg_auth_token. If eg_auth_token is set and the token is not present, responds with 401 Unauthorized. Notes ----- Implemented in prepare rather than in `get_user` to avoid interaction with the `@web.authenticated` decorated methods in the notebook package. """ server_token = self.settings.get("eg_auth_token") if server_token and self.request.method != "OPTIONS": client_token = self.get_argument("token", None) if client_token is None: client_token = self.request.headers.get("Authorization") if client_token and client_token.startswith(self.header_prefix): client_token = client_token[self.header_prefix_len :] else: client_token = None if client_token != server_token: return self.send_error(401) return super().prepare() class JSONErrorsMixin: """Mixes `write_error` into tornado.web.RequestHandlers to respond with JSON format errors. """ def write_error(self, status_code: int, **kwargs) -> None: """Responds with an application/json error object. Overrides the APIHandler.write_error in the notebook server until it properly sets the 'reason' field. Parameters ---------- status_code HTTP status code to set **kwargs Arbitrary keyword args. Only uses `exc_info[1]`, if it exists, to get a `log_message`, `args`, and `reason` from a raised exception that triggered this method Examples -------- {"401", reason="Unauthorized", message="Invalid auth token"} """ exc_info = kwargs.get("exc_info") message = "" reason = responses.get(status_code, "Unknown HTTP Error") reply = { "reason": reason, "message": message, } if exc_info: exception = exc_info[1] # Get the custom message, if defined if isinstance(exception, web.HTTPError): reply["message"] = exception.log_message or message else: reply["message"] = "Unknown server error" reply["traceback"] = "".join(traceback.format_exception(*exc_info)) # Construct the custom reason, if defined custom_reason = getattr(exception, "reason", "") if custom_reason: reply["reason"] = custom_reason self.set_header("Content-Type", "application/json") self.set_status(status_code, reason=reply["reason"]) self.finish(json.dumps(reply)) class EnterpriseGatewayConfigMixin(Configurable): """A mixin for enterprise gateway config.""" # Server IP / PORT binding port_env = "EG_PORT" port_default_value = 8888 port = Integer( port_default_value, config=True, help="Port on which to listen (EG_PORT env var)" ) @default("port") def _port_default(self) -> int: return int(os.getenv(self.port_env, os.getenv("KG_PORT", self.port_default_value))) port_retries_env = "EG_PORT_RETRIES" port_retries_default_value = 50 port_retries = Integer( port_retries_default_value, config=True, help="""Number of ports to try if the specified port is not available (EG_PORT_RETRIES env var)""", ) @default("port_retries") def _port_retries_default(self) -> int: return int( os.getenv( self.port_retries_env, os.getenv("KG_PORT_RETRIES", self.port_retries_default_value) ) ) ip_env = "EG_IP" ip_default_value = "127.0.0.1" ip = Unicode( ip_default_value, config=True, help="IP address on which to listen (EG_IP env var)" ) @default("ip") def _ip_default(self) -> str: return os.getenv(self.ip_env, os.getenv("KG_IP", self.ip_default_value)) # Base URL base_url_env = "EG_BASE_URL" base_url_default_value = "/" base_url = Unicode( base_url_default_value, config=True, help="The base path for mounting all API resources (EG_BASE_URL env var)", ) @default("base_url") def _base_url_default(self) -> str: return os.getenv(self.base_url_env, os.getenv("KG_BASE_URL", self.base_url_default_value)) # Token authorization auth_token_env = "EG_AUTH_TOKEN" # noqa auth_token = Unicode( config=True, help="Authorization token required for all requests (EG_AUTH_TOKEN env var)" ) @default("auth_token") def _auth_token_default(self) -> str: return os.getenv(self.auth_token_env, os.getenv("KG_AUTH_TOKEN", "")) # Begin CORS headers allow_credentials_env = "EG_ALLOW_CREDENTIALS" allow_credentials = Unicode( config=True, help="Sets the Access-Control-Allow-Credentials header. (EG_ALLOW_CREDENTIALS env var)", ) @default("allow_credentials") def _allow_credentials_default(self) -> str: return os.getenv(self.allow_credentials_env, os.getenv("KG_ALLOW_CREDENTIALS", "")) allow_headers_env = "EG_ALLOW_HEADERS" allow_headers = Unicode( config=True, help="Sets the Access-Control-Allow-Headers header. (EG_ALLOW_HEADERS env var)" ) @default("allow_headers") def _allow_headers_default(self) -> str: return os.getenv(self.allow_headers_env, os.getenv("KG_ALLOW_HEADERS", "")) allow_methods_env = "EG_ALLOW_METHODS" allow_methods = Unicode( config=True, help="Sets the Access-Control-Allow-Methods header. (EG_ALLOW_METHODS env var)" ) @default("allow_methods") def _allow_methods_default(self) -> str: return os.getenv(self.allow_methods_env, os.getenv("KG_ALLOW_METHODS", "")) allow_origin_env = "EG_ALLOW_ORIGIN" allow_origin = Unicode( config=True, help="Sets the Access-Control-Allow-Origin header. (EG_ALLOW_ORIGIN env var)" ) @default("allow_origin") def _allow_origin_default(self) -> str: return os.getenv(self.allow_origin_env, os.getenv("KG_ALLOW_ORIGIN", "")) expose_headers_env = "EG_EXPOSE_HEADERS" expose_headers = Unicode( config=True, help="Sets the Access-Control-Expose-Headers header. (EG_EXPOSE_HEADERS env var)", ) @default("expose_headers") def _expose_headers_default(self) -> str: return os.getenv(self.expose_headers_env, os.getenv("KG_EXPOSE_HEADERS", "")) trust_xheaders_env = "EG_TRUST_XHEADERS" trust_xheaders = CBool( False, config=True, help="""Use x-* header values for overriding the remote-ip, useful when application is behind a proxy. (EG_TRUST_XHEADERS env var)""", ) @default("trust_xheaders") def _trust_xheaders_default(self) -> bool: return strtobool( os.getenv(self.trust_xheaders_env, os.getenv("KG_TRUST_XHEADERS", "False")) ) certfile_env = "EG_CERTFILE" certfile = Unicode( None, config=True, allow_none=True, help="The full path to an SSL/TLS certificate file. (EG_CERTFILE env var)", ) @default("certfile") def _certfile_default(self) -> Optional[str]: return os.getenv(self.certfile_env, os.getenv("KG_CERTFILE")) keyfile_env = "EG_KEYFILE" keyfile = Unicode( None, config=True, allow_none=True, help="The full path to a private key file for usage with SSL/TLS. (EG_KEYFILE env var)", ) @default("keyfile") def _keyfile_default(self) -> Optional[str]: return os.getenv(self.keyfile_env, os.getenv("KG_KEYFILE")) client_ca_env = "EG_CLIENT_CA" client_ca = Unicode( None, config=True, allow_none=True, help="""The full path to a certificate authority certificate for SSL/TLS client authentication. (EG_CLIENT_CA env var)""", ) @default("client_ca") def _client_ca_default(self) -> Optional[str]: return os.getenv(self.client_ca_env, os.getenv("KG_CLIENT_CA")) ssl_version_env = "EG_SSL_VERSION" ssl_version_default_value = ssl.PROTOCOL_TLSv1_2 ssl_version = Integer( None, config=True, allow_none=True, help="""Sets the SSL version to use for the web socket connection. (EG_SSL_VERSION env var)""", ) @default("ssl_version") def _ssl_version_default(self) -> Optional[int]: ssl_from_env = os.getenv(self.ssl_version_env, os.getenv("KG_SSL_VERSION")) return ssl_from_env if ssl_from_env is None else int(ssl_from_env) max_age_env = "EG_MAX_AGE" max_age = Unicode( config=True, help="Sets the Access-Control-Max-Age header. (EG_MAX_AGE env var)" ) @default("max_age") def _max_age_default(self) -> str: return os.getenv(self.max_age_env, os.getenv("KG_MAX_AGE", "")) # End CORS headers max_kernels_env = "EG_MAX_KERNELS" max_kernels = Integer( None, config=True, allow_none=True, help="""Limits the number of kernel instances allowed to run by this gateway. Unbounded by default. (EG_MAX_KERNELS env var)""", ) @default("max_kernels") def _max_kernels_default(self) -> Optional[int]: val = os.getenv(self.max_kernels_env, os.getenv("KG_MAX_KERNELS")) return val if val is None else int(val) default_kernel_name_env = "EG_DEFAULT_KERNEL_NAME" default_kernel_name = Unicode( config=True, help="Default kernel name when spawning a kernel (EG_DEFAULT_KERNEL_NAME env var)", ) @default("default_kernel_name") def _default_kernel_name_default(self) -> str: # defaults to Jupyter's default kernel name on empty string return os.getenv(self.default_kernel_name_env, os.getenv("KG_DEFAULT_KERNEL_NAME", "")) list_kernels_env = "EG_LIST_KERNELS" list_kernels = Bool( config=True, help="""Permits listing of the running kernels using API endpoints /api/kernels and /api/sessions. (EG_LIST_KERNELS env var) Note: Jupyter Notebook allows this by default but Jupyter Enterprise Gateway does not.""", ) @default("list_kernels") def _list_kernels_default(self) -> bool: return ( os.getenv(self.list_kernels_env, os.getenv("KG_LIST_KERNELS", "False")).lower() == "true" ) env_whitelist = ListTrait( config=True, help="""DEPRECATED, use client_envs.""", ) @observe("env_whitelist") def _update_env_whitelist(self, change): self.log.warning("env_whitelist is deprecated, use client_envs") self.client_envs = change["new"] client_envs_env = "EG_CLIENT_ENVS" client_envs = ListTrait( config=True, help="""Environment variables allowed to be set when a client requests a new kernel. (EG_CLIENT_ENVS env var)""", ) @default("client_envs") def _client_envs_default(self): return os.getenv(self.client_envs_env, os.getenv("EG_ENV_WHITELIST", "")).split(",") env_process_whitelist = ListTrait( config=True, help="""DEPRECATED, use inherited_envs""", ) @observe("env_process_whitelist") def _update_env_process_whitelist(self, change): self.log.warning("env_process_whitelist is deprecated, use inherited_envs") self.inherited_envs = change["new"] inherited_envs_env = "EG_INHERITED_ENVS" inherited_envs = ListTrait( config=True, help="""Environment variables allowed to be inherited from the spawning process by the kernel. (EG_INHERITED_ENVS env var)""", ) @default("inherited_envs") def _inherited_envs_default(self) -> List[str]: return os.getenv(self.inherited_envs_env, os.getenv("EG_ENV_PROCESS_WHITELIST", "")).split( "," ) kernel_headers_env = "EG_KERNEL_HEADERS" kernel_headers = ListTrait( config=True, help="""Request headers to make available to kernel launch framework. (EG_KERNEL_HEADERS env var)""", ) @default("kernel_headers") def _kernel_headers_default(self) -> List[str]: default_headers = os.getenv(self.kernel_headers_env) return default_headers.split(",") if default_headers else [] # Remote hosts remote_hosts_env = "EG_REMOTE_HOSTS" remote_hosts_default_value = "localhost" remote_hosts = ListTrait( default_value=[remote_hosts_default_value], config=True, help="""Bracketed comma-separated list of hosts on which DistributedProcessProxy kernels will be launched e.g., ['host1','host2']. (EG_REMOTE_HOSTS env var - non-bracketed, just comma-separated)""", ) @default("remote_hosts") def _remote_hosts_default(self) -> List[str]: return os.getenv(self.remote_hosts_env, self.remote_hosts_default_value).split(",") # load_balancing_algorithm load_balancing_algorithm_env = "EG_LOAD_BALANCING_ALGORITHM" load_balancing_algorithm_default_value = "round-robin" load_balancing_algorithm = Unicode( load_balancing_algorithm_default_value, config=True, help="""Specifies which load balancing algorithm DistributedProcessProxy should use. Must be one of "round-robin" or "least-connection". (EG_LOAD_BALANCING_ALGORITHM env var) """, ) @default("load_balancing_algorithm") def _load_balancing_algorithm_default(self) -> str: return os.getenv( self.load_balancing_algorithm_env, self.load_balancing_algorithm_default_value ) @validate("load_balancing_algorithm") def _validate_load_balancing_algorithm(self, proposal: Dict[str, str]) -> str: value = proposal["value"] try: if value not in ["round-robin", "least-connection"]: msg = f"Unrecognized proposal value {value}" raise AssertionError(msg) except ValueError: msg = f"Invalid load_balancing_algorithm value {value}, not in [round-robin,least-connection]" raise TraitError(msg) from None return value # Yarn endpoint yarn_endpoint_env = "EG_YARN_ENDPOINT" yarn_endpoint = Unicode( None, config=True, allow_none=True, help="""The http url specifying the YARN Resource Manager. Note: If this value is NOT set, the YARN library will use the files within the local HADOOP_CONFIG_DIR to determine the active resource manager. (EG_YARN_ENDPOINT env var)""", ) @default("yarn_endpoint") def _yarn_endpoint_default(self) -> Optional[str]: return os.getenv(self.yarn_endpoint_env) # Alt Yarn endpoint alt_yarn_endpoint_env = "EG_ALT_YARN_ENDPOINT" alt_yarn_endpoint = Unicode( None, config=True, allow_none=True, help="""The http url specifying the alternate YARN Resource Manager. This value should be set when YARN Resource Managers are configured for high availability. Note: If both YARN endpoints are NOT set, the YARN library will use the files within the local HADOOP_CONFIG_DIR to determine the active resource manager. (EG_ALT_YARN_ENDPOINT env var)""", ) @default("alt_yarn_endpoint") def _alt_yarn_endpoint_default(self) -> Optional[str]: return os.getenv(self.alt_yarn_endpoint_env) yarn_endpoint_security_enabled_env = "EG_YARN_ENDPOINT_SECURITY_ENABLED" yarn_endpoint_security_enabled_default_value = False yarn_endpoint_security_enabled = Bool( yarn_endpoint_security_enabled_default_value, config=True, help="""Is YARN Kerberos/SPNEGO Security enabled (True/False). (EG_YARN_ENDPOINT_SECURITY_ENABLED env var)""", ) @default("yarn_endpoint_security_enabled") def _yarn_endpoint_security_enabled_default(self) -> bool: return bool( os.getenv( self.yarn_endpoint_security_enabled_env, self.yarn_endpoint_security_enabled_default_value, ) ) # Conductor endpoint conductor_endpoint_env = "EG_CONDUCTOR_ENDPOINT" conductor_endpoint_default_value = None conductor_endpoint = Unicode( conductor_endpoint_default_value, allow_none=True, config=True, help="""The http url for accessing the Conductor REST API. (EG_CONDUCTOR_ENDPOINT env var)""", ) @default("conductor_endpoint") def _conductor_endpoint_default(self) -> Optional[str]: return os.getenv(self.conductor_endpoint_env, self.conductor_endpoint_default_value) _log_formatter_cls = LogFormatter # traitlet default is LevelFormatter @default("log_format") def _default_log_format(self) -> str: """override default log format to include milliseconds""" return ( "%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s" ) # Impersonation enabled impersonation_enabled_env = "EG_IMPERSONATION_ENABLED" impersonation_enabled = Bool( False, config=True, help="""Indicates whether impersonation will be performed during kernel launch. (EG_IMPERSONATION_ENABLED env var)""", ) @default("impersonation_enabled") def _impersonation_enabled_default(self) -> bool: return bool(os.getenv(self.impersonation_enabled_env, "false").lower() == "true") # Unauthorized users unauthorized_users_env = "EG_UNAUTHORIZED_USERS" unauthorized_users_default_value = "root" unauthorized_users = SetTrait( default_value={unauthorized_users_default_value}, config=True, help="""Comma-separated list of user names (e.g., ['root','admin']) against which KERNEL_USERNAME will be compared. Any match (case-sensitive) will prevent the kernel's launch and result in an HTTP 403 (Forbidden) error. (EG_UNAUTHORIZED_USERS env var - non-bracketed, just comma-separated)""", ) @default("unauthorized_users") def _unauthorized_users_default(self) -> Set[str]: return os.getenv(self.unauthorized_users_env, self.unauthorized_users_default_value).split( "," ) # Authorized users authorized_users_env = "EG_AUTHORIZED_USERS" authorized_users = SetTrait( config=True, help="""Comma-separated list of user names (e.g., ['bob','alice']) against which KERNEL_USERNAME will be compared. Any match (case-sensitive) will allow the kernel's launch, otherwise an HTTP 403 (Forbidden) error will be raised. The set of unauthorized users takes precedence. This option should be used carefully as it can dramatically limit who can launch kernels. (EG_AUTHORIZED_USERS env var - non-bracketed, just comma-separated)""", ) @default("authorized_users") def _authorized_users_default(self) -> Set[str]: au_env = os.getenv(self.authorized_users_env) return au_env.split(",") if au_env is not None else [] # Authorized origin authorized_origin_env = "EG_AUTHORIZED_ORIGIN" authorized_origin = Unicode( config=True, help="""Hostname (e.g. 'localhost', 'reverse.proxy.net') which the handler will match against the request's SSL certificate. An HTTP 403 (Forbidden) error will be raised on a failed match. This option requires TLS to be enabled. It does not support IP addresses. (EG_AUTHORIZED_ORIGIN env var)""", ) # Port range port_range_env = "EG_PORT_RANGE" port_range_default_value = "0..0" port_range = Unicode( port_range_default_value, config=True, help="""Specifies the lower and upper port numbers from which ports are created. The bounded values are separated by '..' (e.g., 33245..34245 specifies a range of 1000 ports to be randomly selected). A range of zero (e.g., 33245..33245 or 0..0) disables port-range enforcement. (EG_PORT_RANGE env var)""", ) @default("port_range") def _port_range_default(self) -> str: return os.getenv(self.port_range_env, self.port_range_default_value) # Max Kernels per User max_kernels_per_user_env = "EG_MAX_KERNELS_PER_USER" max_kernels_per_user_default_value = -1 max_kernels_per_user = Integer( max_kernels_per_user_default_value, config=True, help="""Specifies the maximum number of kernels a user can have active simultaneously. A value of -1 disables enforcement. (EG_MAX_KERNELS_PER_USER env var)""", ) @default("max_kernels_per_user") def _max_kernels_per_user_default(self) -> int: return int( os.getenv(self.max_kernels_per_user_env, self.max_kernels_per_user_default_value) ) ws_ping_interval_env = "EG_WS_PING_INTERVAL_SECS" ws_ping_interval_default_value = 30 ws_ping_interval = Integer( ws_ping_interval_default_value, config=True, help="""Specifies the ping interval(in seconds) that should be used by zmq port associated with spawned kernels. Set this variable to 0 to disable ping mechanism. (EG_WS_PING_INTERVAL_SECS env var)""", ) @default("ws_ping_interval") def _ws_ping_interval_default(self) -> int: return int(os.getenv(self.ws_ping_interval_env, self.ws_ping_interval_default_value)) # Dynamic Update Interval dynamic_config_interval_env = "EG_DYNAMIC_CONFIG_INTERVAL" dynamic_config_interval_default_value = 0 dynamic_config_interval = Integer( dynamic_config_interval_default_value, min=0, config=True, help="""Specifies the number of seconds configuration files are polled for changes. A value of 0 or less disables dynamic config updates. (EG_DYNAMIC_CONFIG_INTERVAL env var)""", ) @default("dynamic_config_interval") def _dynamic_config_interval_default(self) -> int: return int( os.getenv(self.dynamic_config_interval_env, self.dynamic_config_interval_default_value) ) @observe("dynamic_config_interval") def _dynamic_config_interval_changed(self, event: Dict[str, Any]) -> None: prev_val = event["old"] self.dynamic_config_interval = event["new"] if self.dynamic_config_interval != prev_val: # Values are different. Stop the current poller. If new value is > 0, start a poller. if self.dynamic_config_poller: self.dynamic_config_poller.stop() self.dynamic_config_poller = None if self.dynamic_config_interval <= 0: self.log.warning( "Dynamic configuration updates have been disabled and cannot be re-enabled " "without restarting Enterprise Gateway!" ) # The interval has been changed, but still positive elif prev_val > 0 and hasattr(self, "init_dynamic_configs"): self.init_dynamic_configs() # Restart the poller dynamic_config_poller = None # Availability Mode AVAILABILITY_STANDALONE = "standalone" AVAILABILITY_REPLICATION = "replication" availability_mode_env = "EG_AVAILABILITY_MODE" availability_mode_default_value = None availability_mode = CaselessStrEnum( allow_none=True, values=[AVAILABILITY_REPLICATION, AVAILABILITY_STANDALONE], config=True, help="""Specifies the type of availability. Values must be one of "standalone" or "replication". (EG_AVAILABILITY_MODE env var)""", ) @default("availability_mode") def _availability_mode_env_default(self): return os.getenv(self.availability_mode_env, self.availability_mode_default_value) kernel_spec_manager = Instance("jupyter_client.kernelspec.KernelSpecManager", allow_none=True) kernel_spec_manager_class = Type( default_value="jupyter_client.kernelspec.KernelSpecManager", config=True, help=""" The kernel spec manager class to use. Must be a subclass of `jupyter_client.kernelspec.KernelSpecManager`. """, ) kernel_spec_cache_class = Type( default_value="enterprise_gateway.services.kernelspecs.KernelSpecCache", config=True, help=""" The kernel spec cache class to use. Must be a subclass of `enterprise_gateway.services.kernelspecs.KernelSpecCache`. """, ) kernel_manager_class = Type( klass="enterprise_gateway.services.kernels.remotemanager.RemoteMappingKernelManager", default_value="enterprise_gateway.services.kernels.remotemanager.RemoteMappingKernelManager", config=True, help=""" The kernel manager class to use. Must be a subclass of `enterprise_gateway.services.kernels.RemoteMappingKernelManager`. """, ) kernel_session_manager_class = Type( klass="enterprise_gateway.services.sessions.kernelsessionmanager.KernelSessionManager", default_value="enterprise_gateway.services.sessions.kernelsessionmanager.FileKernelSessionManager", config=True, help=""" The kernel session manager class to use. Must be a subclass of `enterprise_gateway.services.sessions.KernelSessionManager`. """, ) authorizer_class = Type( klass="jupyter_server.auth.authorizer.Authorizer", default_value="jupyter_server.auth.authorizer.AllowAllAuthorizer", config=True, help=""" The authorizer class to use for authenticating and authorizing requests. By default, Enterprise Gateway uses AllowAllAuthorizer which allows all authenticated requests. You can configure a custom authorizer to implement authentication and authorization logic. Example usage: c.EnterpriseGatewayApp.authorizer_class = 'my_module.MyAuthorizer' Environment variable: EG_AUTHORIZER_CLASS """, ) authorizer_class_env = "EG_AUTHORIZER_CLASS" @default("authorizer_class") def _authorizer_class_default(self): return os.getenv( self.authorizer_class_env, "jupyter_server.auth.authorizer.AllowAllAuthorizer" ) ================================================ FILE: enterprise_gateway/services/__init__.py ================================================ ================================================ FILE: enterprise_gateway/services/api/__init__.py ================================================ ================================================ FILE: enterprise_gateway/services/api/handlers.py ================================================ """Tornado handlers for kernel specs.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import os from typing import List from jupyter_server.utils import ensure_async from tornado import web from ...mixins import CORSMixin class BaseSpecHandler(CORSMixin, web.StaticFileHandler): """Exposes the ability to return specifications from static files""" @staticmethod def get_resource_metadata() -> tuple: """Returns the (resource, mime-type) for the handlers spec.""" pass def initialize(self) -> None: """Initializes the instance of this class to serve files. The handler is initialized to serve files from the directory where this module is defined. `path` parameter will be overridden. """ web.StaticFileHandler.initialize(self, path=os.path.dirname(__file__)) async def get(self) -> None: """Handler for a get on a specific handler""" resource_name, content_type = self.get_resource_metadata() self.set_header("Content-Type", content_type) res = web.StaticFileHandler.get(self, resource_name) await ensure_async(res) def options(self, **kwargs) -> None: """Method for properly handling CORS pre-flight""" self.finish() class SpecJsonHandler(BaseSpecHandler): """Exposes a JSON swagger specification""" @staticmethod def get_resource_metadata() -> tuple: """Get the resource metadata.""" return "swagger.json", "application/json" class APIYamlHandler(BaseSpecHandler): """Exposes a YAML swagger specification""" @staticmethod def get_resource_metadata() -> tuple: """Get the resource metadata.""" return "swagger.yaml", "text/x-yaml" default_handlers: List[tuple] = [ (f"/api/{SpecJsonHandler.get_resource_metadata()[0]}", SpecJsonHandler), (f"/api/{APIYamlHandler.get_resource_metadata()[0]}", APIYamlHandler), ] ================================================ FILE: enterprise_gateway/services/api/swagger.json ================================================ { "swagger": "2.0", "info": { "title": "Jupyter Enterprise Gateway API", "description": "The API for the Jupyter Enterprise Gateway", "version": "6", "contact": { "name": "Jupyter Project", "url": "https://jupyter.org" } }, "produces": ["application/json"], "consumes": ["application/json"], "parameters": { "kernel": { "name": "kernel_id", "required": true, "in": "path", "description": "kernel uuid", "type": "string", "format": "uuid" }, "session": { "name": "session", "required": true, "in": "path", "description": "session uuid", "type": "string", "format": "uuid" } }, "securityDefinitions": { "tokenHeader": { "type": "apiKey", "name": "Authorization", "in": "header", "description": "The authorization token to verify authorization. This is only needed when `EnterpriseGatewayApp.auth_token` is set. This should take the form of `token {value}` where `{value}` is the value of the token. Alternatively, the token can be passed as a query parameter." }, "tokenParam": { "type": "apiKey", "name": "token", "in": "query", "description": "The authorization token to verify authorization. This is only needed when `EnterpriseGatewayApp.auth_token` is set. This should take the form of `token={value}` where `{value}` is the value of the token. Alternatively, the token can be passed as a header." } }, "security": [ { "tokenHeader": [] }, { "tokenParam": [] } ], "paths": { "/api": { "get": { "summary": "Get API info", "tags": ["api"], "responses": { "200": { "description": "Returns information about the API", "schema": { "$ref": "#/definitions/ApiInfo" } } } } }, "/api/swagger.yaml": { "get": { "produces": ["text/x-yaml"], "summary": "Get API info", "tags": ["api"], "responses": { "200": { "description": "Returns a swagger specification in yaml" } } } }, "/api/swagger.json": { "get": { "summary": "Get API info", "tags": ["api"], "responses": { "200": { "description": "Returns a swagger specification in json" } } } }, "/api/kernelspecs": { "get": { "summary": "Get kernel specs", "tags": ["kernelspecs"], "parameters": { "name": "user", "required": false, "in": "query", "description": "When present, kernelspec results will be filtered based on the configured authorization of specified value.", "type": "string" }, "responses": { "200": { "description": "If no query parameter is specified, all kernel specs will be returned; otherwise the result set is filtered based on the query parameter.", "schema": { "type": "object", "properties": { "default": { "type": "string", "description": "The name of the default kernel." }, "kernelspecs": { "type": "object", "additionalProperties": { "$ref": "#/definitions/KernelSpec" } } } } } } } }, "/api/kernels": { "get": { "summary": "List the JSON data for all currently running kernels", "tags": ["kernels"], "responses": { "200": { "description": "List of running kernels", "schema": { "type": "array", "items": { "$ref": "#/definitions/Kernel" } } }, "403": { "description": "This method is not accessible when `EnterpriseGatewayApp.list_kernels` is `False`.", "schema": { "$ref": "#/definitions/Error" } } } }, "post": { "summary": "Start a kernel and return the uuid", "tags": ["kernels"], "parameters": [ { "name": "start_kernel_body", "in": "body", "schema": { "type": "object", "properties": { "name": { "type": "string", "description": "Kernel spec name (defaults to default kernel spec for server)" }, "env": { "type": "object", "description": "A dictionary of environment variables and values to include in the kernel process - subject to filtering.", "additionalProperties": { "type": "string" } } } } } ], "responses": { "201": { "description": "The metadata about the newly created kernel.", "schema": { "$ref": "#/definitions/Kernel" }, "headers": { "Location": { "description": "Model for started kernel", "type": "string", "format": "url" } } }, "403": { "description": "The maximum number of kernels have been created.", "schema": { "$ref": "#/definitions/Error" } } } } }, "/api/kernels/{kernel_id}": { "parameters": [ { "$ref": "#/parameters/kernel" } ], "get": { "summary": "Get kernel information", "tags": ["kernels"], "responses": { "200": { "description": "Information about the kernel", "schema": { "$ref": "#/definitions/Kernel" } } } }, "delete": { "summary": "Kill a kernel and delete the kernel id", "tags": ["kernels"], "responses": { "204": { "description": "Kernel deleted" } } } }, "/api/kernels/{kernel_id}/channels": { "parameters": [ { "$ref": "#/parameters/kernel" } ], "get": { "summary": "Upgrades the connection to a websocket connection.", "tags": ["channels"], "responses": { "200": { "description": "The connection will be upgraded to a websocket." } } } }, "/kernels/{kernel_id}/interrupt": { "parameters": [ { "$ref": "#/parameters/kernel" } ], "post": { "summary": "Interrupt a kernel", "tags": ["kernels"], "responses": { "204": { "description": "Kernel interrupted" } } } }, "/kernels/{kernel_id}/restart": { "parameters": [ { "$ref": "#/parameters/kernel" } ], "post": { "summary": "Restart a kernel", "tags": ["kernels"], "responses": { "200": { "description": "Kernel interrupted", "headers": { "Location": { "description": "URL for kernel commands", "type": "string", "format": "url" } }, "schema": { "$ref": "#/definitions/Kernel" } } } } }, "/api/sessions": { "get": { "summary": "List available sessions", "tags": ["sessions"], "responses": { "200": { "description": "List of current sessions", "schema": { "type": "array", "items": { "$ref": "#/definitions/Session" } } }, "403": { "description": "This method is not accessible when the kernel gateway when the `list_kernels` option is `False`.", "schema": { "$ref": "#/definitions/Error" } } } }, "post": { "summary": "Create a new session, or return an existing session if a session of the same name already exists.", "tags": ["sessions"], "parameters": [ { "name": "session", "in": "body", "schema": { "$ref": "#/definitions/Session" } } ], "responses": { "201": { "description": "Session created or returned", "schema": { "$ref": "#/definitions/Session" }, "headers": { "Location": { "description": "URL for session commands", "type": "string", "format": "url" } } }, "501": { "description": "Session not available", "schema": { "$ref": "#/definitions/Error" } } } } }, "/api/sessions/{session}": { "parameters": [ { "$ref": "#/parameters/session" } ], "get": { "summary": "Get session", "tags": ["sessions"], "responses": { "200": { "description": "Session", "schema": { "$ref": "#/definitions/Session" } } } }, "patch": { "summary": "This can be used to rename the session.", "tags": ["sessions"], "parameters": [ { "name": "model", "in": "body", "required": true, "schema": { "$ref": "#/definitions/Session" } } ], "responses": { "200": { "description": "Session", "schema": { "$ref": "#/definitions/Session" } }, "400": { "description": "No data provided", "schema": { "$ref": "#/definitions/Error" } } } }, "delete": { "summary": "Delete a session", "tags": ["sessions"], "responses": { "204": { "description": "Session (and kernel) were deleted" }, "410": { "description": "Kernel was deleted before the session, and the session was *not* deleted" } } } } }, "definitions": { "Error": { "description": "An error response from the server", "type": "object", "properties": { "reason": { "type": "string", "description": "The reason for the failure" }, "message": { "type": "string", "description": "The message logged when the error occurred" } } }, "KernelSpec": { "description": "Kernel spec (contents of kernel.json)", "properties": { "name": { "type": "string", "description": "Unique name for kernel" }, "KernelSpecFile": { "$ref": "#/definitions/KernelSpecFile", "description": "Kernel spec json file" }, "resources": { "type": "object", "properties": { "kernel.js": { "type": "string", "format": "filename", "description": "path for kernel.js file" }, "kernel.css": { "type": "string", "format": "filename", "description": "path for kernel.css file" }, "logo-*": { "type": "string", "format": "filename", "description": "path for logo file. Logo filenames are of the form `logo-widthxheight`" } } } } }, "KernelSpecFile": { "description": "Kernel spec json file", "required": ["argv", "display_name", "language"], "properties": { "language": { "type": "string", "description": "The programming language which this kernel runs. This will be stored in notebook metadata." }, "argv": { "type": "array", "description": "A list of command line arguments used to start the kernel. The text `{connection_file}` in any argument will be replaced with the path to the connection file.", "items": { "type": "string" } }, "display_name": { "type": "string", "description": "The kernel's name as it should be displayed in the UI. Unlike the kernel name used in the API, this can contain arbitrary unicode characters." }, "codemirror_mode": { "type": "string", "description": "Codemirror mode. Can be a string *or* an valid Codemirror mode object. This defaults to the string from the `language` property." }, "env": { "type": "object", "description": "A dictionary of environment variables to set for the kernel. These will be added to the current environment variables.", "additionalProperties": { "type": "string" } }, "metadata": { "type": "object", "description": "A free-form dictionary consisting of additional information about the kernel and its environment.", "additionalProperties": true }, "help_links": { "type": "array", "description": "Help items to be displayed in the help menu in the notebook UI.", "items": { "type": "object", "required": ["text", "url"], "properties": { "text": { "type": "string", "description": "menu item link text" }, "url": { "type": "string", "format": "URL", "description": "menu item link url" } } } } } }, "Kernel": { "description": "Kernel information", "required": ["id", "name"], "properties": { "id": { "type": "string", "format": "uuid", "description": "uuid of kernel" }, "name": { "type": "string", "description": "kernel spec name" }, "last_activity": { "type": "string", "description": "ISO 8601 timestamp for the last-seen activity on this kernel.\nUse this in combination with execution_state == 'idle' to identify\nwhich kernels have been idle since a given time.\nTimestamps will be UTC, indicated 'Z' suffix.\nAdded in notebook server 5.0.\n" }, "connections": { "type": "number", "description": "The number of active connections to this kernel.\n" }, "execution_state": { "type": "string", "description": "Current execution state of the kernel (typically 'idle' or 'busy', but may be other values, such as 'starting').\nAdded in notebook server 5.0.\n" } } }, "Session": { "description": "A session", "type": "object", "properties": { "id": { "type": "string", "format": "uuid" }, "path": { "type": "string", "description": "path to the session" }, "name": { "type": "string", "description": "name of the session" }, "type": { "type": "string", "description": "session type" }, "kernel": { "$ref": "#/definitions/Kernel" } } }, "ApiInfo": { "description": "Information about the api", "type": "object", "properties": { "version": { "type": "string" }, "gateway_version": { "type": "string" } } } } } ================================================ FILE: enterprise_gateway/services/api/swagger.yaml ================================================ swagger: "2.0" info: title: Jupyter Enterprise Gateway API description: The API for the Jupyter Enterprise Gateway version: "6" contact: name: Jupyter Project url: https://jupyter.org produces: - application/json consumes: - application/json parameters: kernel: name: kernel_id required: true in: path description: kernel uuid type: string format: uuid session: name: session required: true in: path description: session uuid type: string format: uuid securityDefinitions: tokenHeader: type: apiKey name: Authorization in: header description: | The authorization token to verify authorization. This is only needed when `EnterpriseGatewayApp.auth_token` is set. This should take the form of `token {value}` where `{value}` is the value of the token. Alternatively, the token can be passed as a query parameter. tokenParam: type: apiKey name: token in: query description: | The authorization token to verify authorization. This is only needed when `EnterpriseGatewayApp.auth_token` is set. This should take the form of `token={value}` where `{value}` is the value of the token. Alternatively, the token can be passed as a header. security: - tokenHeader: [] - tokenParam: [] paths: /api: get: summary: Get API info tags: - api responses: 200: description: Returns information about the API schema: $ref: "#/definitions/ApiInfo" /api/swagger.yaml: get: produces: - text/x-yaml summary: Get API info tags: - api responses: 200: description: Returns a swagger specification in yaml /api/swagger.json: get: summary: Get API info tags: - api responses: 200: description: Returns a swagger specification in json /api/kernelspecs: get: summary: Get kernel specs tags: - kernelspecs parameters: - name: user in: query description: When present, kernelspec results will be filtered based on the configured authorization of specified value. required: false type: string responses: 200: description: | If no query parameter is specified, all kernel specs will be returned; otherwise the result set is filtered based on the query parameter. schema: type: object properties: default: type: string description: The name of the default kernel. kernelspecs: type: object additionalProperties: $ref: "#/definitions/KernelSpec" /api/kernels: get: summary: List the JSON data for all currently running kernels tags: - kernels responses: 200: description: List of running kernels schema: type: array items: $ref: "#/definitions/Kernel" 403: description: | This method is not accessible when `EnterpriseGatewayApp.list_kernels` is `False`. schema: $ref: "#/definitions/Error" post: summary: Start a kernel and return the uuid tags: - kernels parameters: - name: start_kernel_body in: body schema: type: object properties: name: type: string description: Kernel spec name (defaults to default kernel spec for server) env: type: object description: | A dictionary of environment variables and values to include in the kernel process - subject to filtering. additionalProperties: type: string responses: 201: description: The metadata about the newly created kernel. schema: $ref: "#/definitions/Kernel" headers: Location: description: Model for started kernel type: string format: url 403: description: The maximum number of kernels have been created. schema: $ref: "#/definitions/Error" /api/kernels/{kernel_id}: parameters: - $ref: "#/parameters/kernel" get: summary: Get kernel information tags: - kernels responses: 200: description: Information about the kernel schema: $ref: "#/definitions/Kernel" delete: summary: Kill a kernel and delete the kernel id tags: - kernels responses: 204: description: Kernel deleted /api/kernels/{kernel_id}/channels: parameters: - $ref: "#/parameters/kernel" get: summary: Upgrades the connection to a websocket connection. tags: - channels responses: 200: description: The connection will be upgraded to a websocket. /kernels/{kernel_id}/interrupt: parameters: - $ref: "#/parameters/kernel" post: summary: Interrupt a kernel tags: - kernels responses: 204: description: Kernel interrupted /kernels/{kernel_id}/restart: parameters: - $ref: "#/parameters/kernel" post: summary: Restart a kernel tags: - kernels responses: 200: description: Kernel interrupted headers: Location: description: URL for kernel commands type: string format: url schema: $ref: "#/definitions/Kernel" /api/sessions: get: summary: List available sessions tags: - sessions responses: 200: description: List of current sessions schema: type: array items: $ref: "#/definitions/Session" 403: description: | This method is not accessible when the kernel gateway when the `list_kernels` option is `False`. schema: $ref: "#/definitions/Error" post: summary: | Create a new session, or return an existing session if a session of the same name already exists. tags: - sessions parameters: - name: session in: body schema: $ref: "#/definitions/Session" responses: 201: description: Session created or returned schema: $ref: "#/definitions/Session" headers: Location: description: URL for session commands type: string format: url 501: description: Session not available schema: $ref: "#/definitions/Error" /api/sessions/{session}: parameters: - $ref: "#/parameters/session" get: summary: Get session tags: - sessions responses: 200: description: Session schema: $ref: "#/definitions/Session" patch: summary: This can be used to rename the session. tags: - sessions parameters: - name: model in: body required: true schema: $ref: "#/definitions/Session" responses: 200: description: Session schema: $ref: "#/definitions/Session" 400: description: No data provided schema: $ref: "#/definitions/Error" delete: summary: Delete a session tags: - sessions responses: 204: description: Session (and kernel) were deleted 410: description: | Kernel was deleted before the session, and the session was *not* deleted definitions: Error: description: An error response from the server type: object properties: reason: type: string description: The reason for the failure message: type: string description: The message logged when the error occurred KernelSpec: description: Kernel spec (contents of kernel.json) properties: name: type: string description: Unique name for kernel KernelSpecFile: $ref: "#/definitions/KernelSpecFile" description: Kernel spec json file resources: type: object properties: kernel.js: type: string format: filename description: path for kernel.js file kernel.css: type: string format: filename description: path for kernel.css file logo-*: type: string format: filename description: | path for logo file. Logo filenames are of the form `logo-widthxheight` KernelSpecFile: description: Kernel spec json file required: - argv - display_name - language properties: language: type: string description: The programming language which this kernel runs. This will be stored in notebook metadata. argv: type: array description: | A list of command line arguments used to start the kernel. The text `{connection_file}` in any argument will be replaced with the path to the connection file. items: type: string display_name: type: string description: | The kernel's name as it should be displayed in the UI. Unlike the kernel name used in the API, this can contain arbitrary unicode characters. codemirror_mode: type: string description: | Codemirror mode. Can be a string *or* an valid Codemirror mode object. This defaults to the string from the `language` property. env: type: object description: | A dictionary of environment variables to set for the kernel. These will be added to the current environment variables. additionalProperties: type: string metadata: type: object description: | A free-form dictionary consisting of additional information about the kernel and its environment. additionalProperties: true help_links: type: array description: Help items to be displayed in the help menu in the notebook UI. items: type: object required: - text - url properties: text: type: string description: menu item link text url: type: string format: URL description: menu item link url Kernel: description: Kernel information required: - id - name properties: id: type: string format: uuid description: uuid of kernel name: type: string description: kernel spec name last_activity: type: string description: | ISO 8601 timestamp for the last-seen activity on this kernel. Use this in combination with execution_state == 'idle' to identify which kernels have been idle since a given time. Timestamps will be UTC, indicated 'Z' suffix. Added in notebook server 5.0. connections: type: number description: | The number of active connections to this kernel. execution_state: type: string description: | Current execution state of the kernel (typically 'idle' or 'busy', but may be other values, such as 'starting'). Added in notebook server 5.0. Session: description: A session type: object properties: id: type: string format: uuid path: type: string description: path to the session name: type: string description: name of the session type: type: string description: session type kernel: $ref: "#/definitions/Kernel" ApiInfo: description: Information about the api type: object properties: version: type: string gateway_version: type: string ================================================ FILE: enterprise_gateway/services/kernels/__init__.py ================================================ ================================================ FILE: enterprise_gateway/services/kernels/handlers.py ================================================ """Tornado handlers for kernel CRUD and communication.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import json import os from functools import partial from typing import Any import jupyter_server.services.kernels.handlers as jupyter_server_handlers import tornado from jupyter_client.jsonutil import date_default from tornado import web from ...mixins import CORSMixin, JSONErrorsMixin, TokenAuthorizationMixin MAX_ENV_VALUE_LENGTH = 4096 class MainKernelHandler( TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, jupyter_server_handlers.MainKernelHandler ): """Extends the jupyter_server main kernel handler with token auth, CORS, and JSON errors. """ @property def client_envs(self): return self.settings["eg_client_envs"] @property def inherited_envs(self): return self.settings["eg_inherited_envs"] def _build_kernel_env(self, model_env: dict[str, Any]) -> dict[str, str]: """Build the kernel environment from the request model and server settings.""" env = {key: value for key, value in os.environ.items() if key in self.inherited_envs} allowed_envs: list[str] allowed_envs = list(model_env.keys()) if self.client_envs == ["*"] else self.client_envs for key, value in model_env.items(): if key.startswith("KERNEL_") or key in allowed_envs: if not isinstance(value, str): raise tornado.web.HTTPError( 400, f"Environment variable '{key}' value must be a string" ) if len(value) > MAX_ENV_VALUE_LENGTH: raise tornado.web.HTTPError( 400, f"Environment variable '{key}' exceeds maximum length" ) env[key] = value return env def _build_kernel_headers(self) -> dict[str, str]: """Build kernel headers from the request based on server settings.""" kernel_headers = {} missing_headers = [] kernel_header_names = self.settings["eg_kernel_headers"] for name in kernel_header_names: if name: value = self.request.headers.get(name) if value: kernel_headers[name] = value else: missing_headers.append(name) if missing_headers: self.log.warning( "The following headers specified in 'kernel-headers' were not found: {}".format( missing_headers ) ) return kernel_headers async def post(self): """Overrides the super class method to manage env in the request body. Max kernel limits are now enforced in RemoteMappingKernelManager.start_kernel(). Raises ------ tornado.web.HTTPError 403 Forbidden if either max kernel limit is reached (total or per user, if configured) """ max_kernels = self.settings["eg_max_kernels"] if max_kernels is not None: km = self.settings["kernel_manager"] kernels = km.list_kernels() if len(kernels) >= max_kernels: raise tornado.web.HTTPError(403, "Resource Limit") model = self.get_json_body() if model is not None and "env" in model: if not isinstance(model["env"], dict): raise tornado.web.HTTPError(400) env = self._build_kernel_env(model["env"]) kernel_headers = self._build_kernel_headers() # No way to override the call to start_kernel on the kernel manager # so do a temporary partial (ugh) orig_start = self.kernel_manager.start_kernel self.kernel_manager.start_kernel = partial( self.kernel_manager.start_kernel, env=env, kernel_headers=kernel_headers ) try: await super().post() finally: self.kernel_manager.start_kernel = orig_start else: await super().post() async def get(self): """Overrides the super class method to honor the kernel listing configuration setting. Allows the request to reach the super class if listing is enabled. Raises ------ tornado.web.HTTPError 403 Forbidden if kernel listing is disabled """ if not self.settings.get("eg_list_kernels"): raise tornado.web.HTTPError(403, "Forbidden") else: await super().get() def options(self, **kwargs: dict[str, Any] | None): """Method for properly handling CORS pre-flight""" self.finish() class KernelHandler( TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, jupyter_server_handlers.KernelHandler ): """Extends the jupyter_server kernel handler with token auth, CORS, and JSON errors. """ def options(self, **kwargs: dict[str, Any] | None): """Method for properly handling CORS pre-flight""" self.finish() @web.authenticated def get(self, kernel_id: str): """Get the model for a kernel.""" km = self.kernel_manager km.check_kernel_id(kernel_id) model = km.kernel_model(kernel_id) self.finish(json.dumps(model, default=date_default)) @web.authenticated async def delete(self, kernel_id): """Remove a kernel.""" self.kernel_manager.check_kernel_id(kernel_id=kernel_id) await super().delete(kernel_id=kernel_id) class ZMQChannelsHandler( TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, jupyter_server_handlers.ZMQChannelsHandler ): """Extends the kernel websocket handler.""" async def get(self, kernel_id): """Handle a get request for a kernel.""" # Synchronize Kernel and check if it exists. self.kernel_manager.check_kernel_id(kernel_id=kernel_id) await super().get(kernel_id=kernel_id) default_handlers: list[tuple] = [] for path, cls in jupyter_server_handlers.default_handlers: if cls.__name__ in globals(): # Use the same named class from here if it exists default_handlers.append((path, globals()[cls.__name__])) else: # Gen a new type with CORS and token auth bases = (TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, cls) default_handlers.append((path, type(cls.__name__, bases, {}))) ================================================ FILE: enterprise_gateway/services/kernels/remotemanager.py ================================================ """Kernel managers that operate against a remote process.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import asyncio import os import re import signal import time import uuid from typing import Any, ClassVar from jupyter_client.ioloop.manager import AsyncIOLoopKernelManager from jupyter_client.kernelspec import KernelSpec from jupyter_server.services.kernels.kernelmanager import AsyncMappingKernelManager from tornado import web from traitlets import directional_link from traitlets import log as traitlets_log from zmq import IO_THREADS, MAX_SOCKETS, Context from enterprise_gateway.mixins import EnterpriseGatewayConfigMixin from ..processproxies.processproxy import BaseProcessProxyABC, LocalProcessProxy, RemoteProcessProxy from ..sessions.kernelsessionmanager import KernelSessionManager default_kernel_launch_timeout = float(os.getenv("EG_KERNEL_LAUNCH_TIMEOUT", "30")) kernel_restart_status_poll_interval = float(os.getenv("EG_RESTART_STATUS_POLL_INTERVAL", 1.0)) def import_item(name: str): """Import and return ``bar`` given the string ``foo.bar``. Calling ``bar = import_item("foo.bar")`` is the functional equivalent of executing the code ``from foo import bar``. Parameters ---------- name : string The fully qualified name of the module/package being imported. Returns ------- mod : module object The module that was imported. """ parts = name.rsplit(".", 1) if len(parts) == 2: # called with 'foo.bar....' package, obj = parts module = __import__(package, fromlist=[obj]) try: pak = getattr(module, obj) except AttributeError: raise ImportError("No module named %s" % obj) from None return pak else: # called with un-dotted string return __import__(parts[0]) def get_process_proxy_config(kernelspec: KernelSpec) -> dict[str, Any]: """ Return the process-proxy stanza from the kernelspec. Checks the kernelspec's metadata dictionary for a process proxy entry. If found, it is returned, else one is created relative to the LocalProcessProxy and returns. Parameters ---------- kernelspec : obj The kernel specification object from which the process-proxy dictionary is derived. Returns ------- process_proxy : dict The process proxy portion of the kernelspec. If one does not exist, it will contain the default information. If no `config` sub-dictionary exists, an empty `config` dictionary will be present. """ if "process_proxy" in kernelspec.metadata: process_proxy = kernelspec.metadata.get("process_proxy") if "class_name" in process_proxy: # If no class_name, return default if "config" not in process_proxy: # if class_name, but no config stanza, add one process_proxy.update({"config": {}}) return process_proxy # Return what we found (plus config stanza if necessary) return { "class_name": "enterprise_gateway.services.processproxies.processproxy.LocalProcessProxy", "config": {}, } def new_kernel_id(**kwargs: dict[str, Any] | None) -> str: """ This method provides a mechanism by which clients can specify a kernel's id. In this case that mechanism is via the per-kernel environment variable: KERNEL_ID. If specified, its value will be validated and returned, otherwise the result from the provided method is returned. NOTE: This method exists in jupyter_client.multikernelmanager.py for releases > 5.2.3. If you find that this method is not getting invoked, then you likely need to update the version of jupyter_client. The Enterprise Gateway dependency will be updated once new releases of jupyter_client are more prevalent. Returns ------- kernel_id : str The uuid string to associate with the new kernel """ log = kwargs.pop("log", None) or traitlets_log.get_logger() kernel_id_fn = kwargs.pop("kernel_id_fn", None) or (lambda: str(uuid.uuid4())) env = kwargs.get("env") if env and env.get("KERNEL_ID"): # If there's a KERNEL_ID in the env, check it out # convert string back to UUID - validating string in the process. str_kernel_id = env.get("KERNEL_ID") try: str_v4_kernel_id = str(uuid.UUID(str_kernel_id, version=4)) if str_kernel_id != str_v4_kernel_id: # Given string is not uuid v4 compliant msg = "value is not uuid v4 compliant" raise ValueError(msg) except ValueError as ve: log.error( "Invalid v4 UUID value detected in ['env']['KERNEL_ID']: '{}'! Error: {}".format( str_kernel_id, ve ) ) raise ve # user-provided id is valid, use it kernel_id = str(str_kernel_id) log.debug(f"Using user-provided kernel_id: {kernel_id}") else: kernel_id = kernel_id_fn(**kwargs) return kernel_id class TrackPendingRequests: """ Simple class to track (increment/decrement) pending kernel start requests, both total and per user. This tracking is necessary due to an inherent race condition that occurs now that kernel startup is asynchronous. As a result, multiple/simultaneous requests must be considered, in addition all existing kernel sessions. """ _pending_requests_all = 0 _pending_requests_user: ClassVar = {} def increment(self, username: str) -> None: """Increment the requests for a username.""" self._pending_requests_all += 1 cur_val = int(self._pending_requests_user.get(username, 0)) self._pending_requests_user[username] = cur_val + 1 def decrement(self, username: str) -> None: """Decrement the requests for a username.""" self._pending_requests_all -= 1 cur_val = int(self._pending_requests_user.get(username)) self._pending_requests_user[username] = cur_val - 1 def get_counts(self, username: str) -> tuple[int, int]: """Get the counts for a username.""" return self._pending_requests_all, int(self._pending_requests_user.get(username, 0)) class RemoteMappingKernelManager(AsyncMappingKernelManager): """ Extends the AsyncMappingKernelManager with support for managing remote kernels via the process-proxy. """ def _context_default(self) -> Context: """ We override the _context_default method in """ zmq_context = super()._context_default() if self.shared_context: # this should be True by default # pyzmq currently does not expose defaults for these values, so we replicate them here # libzmq/zmq.h: ZMQ_MAX_SOCKETS_DLFT = 1023; zmq.Context.MAX_SOCKETS # libzmq/zmq.h: ZMQ_IO_THREADS_DFLT = 1; zmq.Context.IO_THREADS zmq_max_sock_desired = int(os.getenv("EG_ZMQ_MAX_SOCKETS", zmq_context.MAX_SOCKETS)) if zmq_max_sock_desired != zmq_context.MAX_SOCKETS: zmq_context.set(MAX_SOCKETS, zmq_max_sock_desired) self.log.info(f"Set ZMQ_MAX_SOCKETS to {zmq_context.MAX_SOCKETS}") zmq_io_threads_desired = int(os.getenv("EG_ZMQ_IO_THREADS", zmq_context.IO_THREADS)) if zmq_io_threads_desired != zmq_context.IO_THREADS: zmq_context.set(IO_THREADS, zmq_io_threads_desired) self.log.info(f"Set ZMQ_IO_THREADS to {zmq_context.IO_THREADS}") return zmq_context pending_requests: TrackPendingRequests = ( TrackPendingRequests() ) # Used to enforce max-kernel limits def _kernel_manager_class_default(self) -> str: return "enterprise_gateway.services.kernels.remotemanager.RemoteKernelManager" def check_kernel_id(self, kernel_id: str) -> None: """Check that a kernel_id exists and raise 404 if not.""" if kernel_id not in self and not self._refresh_kernel(kernel_id): self.parent.kernel_session_manager.delete_session(kernel_id) raise web.HTTPError(404, "Kernel does not exist: %s" % kernel_id) def _refresh_kernel(self, kernel_id: str) -> bool: if self.parent.availability_mode == EnterpriseGatewayConfigMixin.AVAILABILITY_REPLICATION: try: self.parent.kernel_session_manager.load_session(kernel_id) except Exception as e: self.log.error(f"Failed to load session, kernel_id:{kernel_id}", e) return False return self.parent.kernel_session_manager.start_session(kernel_id) # else we should throw 404 when not using an availability mode of 'replication' return False async def start_kernel(self, *args: list[Any] | None, **kwargs: dict[str, Any] | None) -> str: """ Starts a kernel for a session and return its kernel_id. Returns ------- kernel_id : str The uuid associated with the new kernel. This string will equal the value of the input parameter `kernel_id` if one was provided. """ username = KernelSessionManager.get_kernel_username(**kwargs) self.log.debug( "RemoteMappingKernelManager.start_kernel: {kernel_name}, kernel_username: {username}".format( kernel_name=kwargs["kernel_name"], username=username ) ) # Check max kernel limits self._enforce_kernel_limits(username) RemoteMappingKernelManager.pending_requests.increment(username) try: kernel_id = await super().start_kernel(*args, **kwargs) finally: RemoteMappingKernelManager.pending_requests.decrement(username) self.parent.kernel_session_manager.create_session(kernel_id, **kwargs) return kernel_id async def restart_kernel(self, kernel_id: str, now: bool = False) -> None: """Restart a kernel.""" kernel = self.get_kernel(kernel_id) if kernel.restarting: # assuming duplicate request. await self.wait_for_restart_finish(kernel_id, "restart") self.log.info("Skipping kernel restart as this was duplicate request.") return try: kernel.restarting = True # Moved in out of RemoteKernelManager await super().restart_kernel(kernel_id) finally: kernel.restarting = False async def shutdown_kernel( self, kernel_id: str, now: bool = False, restart: bool = False ) -> None: """Shut down a kernel.""" kernel = self.get_kernel(kernel_id) if kernel.restarting: await self.wait_for_restart_finish(kernel_id, "shutdown") try: await super().shutdown_kernel(kernel_id, now, restart) except KeyError as ke: # this is hint for multiple shutdown request. self.log.exception(f"Exception while shutting down kernel: '{kernel_id}': {ke}") raise web.HTTPError(404, "Kernel does not exist: %s" % kernel_id) from None async def wait_for_restart_finish(self, kernel_id: str, action: str = "shutdown") -> None: """Wait for a kernel restart to finish.""" kernel = self.get_kernel(kernel_id) start_time = float(time.time()) # epoc time timeout = kernel.kernel_launch_timeout poll_time = kernel_restart_status_poll_interval self.log.info( f"Kernel '{kernel_id}' was restarting when {action} request received. Polling every {poll_time} " f"seconds for next {timeout} seconds for kernel to complete its restart." ) while kernel.restarting: now = float(time.time()) if (now - start_time) > timeout: self.log.info( f"Timeout: Exiting restart wait loop in order to {action} kernel '{kernel_id}'." ) break await asyncio.sleep(poll_time) return def _enforce_kernel_limits(self, username: str) -> None: """ If MaxKernels or MaxKernelsPerUser are configured, enforce the respective values. """ if self.parent.max_kernels is not None or self.parent.max_kernels_per_user >= 0: ( pending_all, pending_user, ) = RemoteMappingKernelManager.pending_requests.get_counts(username) # Enforce overall limit... if self.parent.max_kernels is not None: active_and_pending = len(self.list_kernels()) + pending_all if active_and_pending >= self.parent.max_kernels: error_message = ( "A max kernels limit has been set to {} and there are " "currently {} active and pending {}.".format( self.parent.max_kernels, active_and_pending, "kernel" if active_and_pending == 1 else "kernels", ) ) self.log.error(error_message) raise web.HTTPError(403, error_message) # Enforce per-user limit... if self.parent.max_kernels_per_user >= 0 and self.parent.kernel_session_manager: active_and_pending = ( self.parent.kernel_session_manager.active_sessions(username) + pending_user ) if active_and_pending >= self.parent.max_kernels_per_user: error_message = ( "A max kernels per user limit has been set to {} and user '{}' " "currently has {} active and pending {}.".format( self.parent.max_kernels_per_user, username, active_and_pending, "kernel" if active_and_pending == 1 else "kernels", ) ) self.log.error(error_message) raise web.HTTPError(403, error_message) return def remove_kernel(self, kernel_id: str) -> None: """ Removes the kernel associated with `kernel_id` from the internal map and deletes the kernel session. """ try: super().remove_kernel(kernel_id) except KeyError: # this is hint for multiple shutdown request. self.log.debug(f"Exception while removing kernel {kernel_id}: kernel not found.") self.parent.kernel_session_manager.delete_session(kernel_id) def start_kernel_from_session( self, kernel_id: str, kernel_name: str, connection_info: dict[str, Any], process_info: dict[str, Any], launch_args: dict[str, Any], ) -> bool: """ Starts a kernel from a persisted kernel session. This method is used in HA situations when a previously running Enterprise Gateway instance has terminated and a new instance - with access to the persisted kernel sessions is starting up. It attempts to "revive" the persisted kernel session by instantiating the necessary class instances to re-establish communication with the currently active kernel. Note that this method is typically only successful when kernel instances are remote from the previously running Enterprise Gateway server - since the need to re-establish communications won't work if the kernels were also local to the (probably) terminated server. Parameters ---------- kernel_id : str The uuid string corresponding to the kernel to start kernel_name : str The name of kernel to start connection_info : dict The connection information for the kernel loaded from persistent storage process_info : dict The process information corresponding to the process-proxy used by the kernel and loaded from persistent storage launch_args : dict The arguments used for the initial launch of the kernel Returns ------- True if kernel could be located and started, False otherwise. """ # Create a KernelManger instance and load connection and process info, then confirm the kernel is still # alive. constructor_kwargs = {} if self.kernel_spec_manager: constructor_kwargs["kernel_spec_manager"] = self.kernel_spec_manager # Construct a kernel manager... km = self.kernel_manager_factory( connection_file=os.path.join(self.connection_dir, "kernel-%s.json" % kernel_id), parent=self, log=self.log, kernel_name=kernel_name, **constructor_kwargs, ) # Load connection info into member vars - no need to write out connection file km.load_connection_info(connection_info) km._launch_args = launch_args # Construct a process-proxy process_proxy = get_process_proxy_config(km.kernel_spec) process_proxy_class = import_item(process_proxy.get("class_name")) km.process_proxy = process_proxy_class(km, proxy_config=process_proxy.get("config")) km.process_proxy.load_process_info(process_info) # Confirm we can even poll the process. If not, remove the persisted session. if km.process_proxy.poll() is False: return False km.kernel = km.process_proxy km.start_restarter() km._connect_control_socket() self._kernels[kernel_id] = km self._kernel_connections[kernel_id] = 0 self.start_watching_activity(kernel_id) self.add_restart_callback( kernel_id, lambda: self._handle_kernel_died(kernel_id), "dead", ) # Only initialize culling if available. Warning message will be issued in gatewayapp at startup. func = getattr(self, "initialize_culler", None) if func: func() return True def new_kernel_id(self, **kwargs: dict[str, Any] | None) -> str: """ Determines the kernel_id to use for a new kernel. """ return new_kernel_id(kernel_id_fn=super().new_kernel_id, log=self.log, **kwargs) class RemoteKernelManager(EnterpriseGatewayConfigMixin, AsyncIOLoopKernelManager): """ Extends the AsyncIOLoopKernelManager used by the RemoteMappingKernelManager. This class is responsible for detecting that a remote kernel is desired, then launching the appropriate class (previously pulled from the kernel spec). The process 'proxy' is returned - upon which methods of poll(), wait(), send_signal(), and kill() can be called. """ def __init__(self, **kwargs: dict[str, Any] | None): """Initialize the remote kernel manager.""" super().__init__(**kwargs) self.process_proxy = None self.response_address = None self.public_key = None self.sigint_value = None self.kernel_id = None self.user_overrides = {} self.kernel_launch_timeout = default_kernel_launch_timeout self.restarting = False # need to track whether we're in a restart situation or not self._activity_stream = None # If this instance supports port caching, then disable cache_ports since we don't need this # for remote kernels and it breaks the ability to support port ranges for local kernels (which # is viewed as more imporant for EG). # Note: This check MUST remain in this method since cache_ports is used immediately # following construction. if hasattr(self, "cache_ports"): self.cache_ports = False if not self.connection_file: self.kernel_id = new_kernel_id(log=self.log) self._link_dependent_props() if self.kernel_spec_manager is None: self.kernel_spec_manager = self.kernel_spec_manager_class( parent=self, ) def _link_dependent_props(self) -> None: """ Ensure that RemoteKernelManager, when used as part of an EnterpriseGatewayApp, has certain necessary configuration stay in sync with the app's configuration. When RemoteKernelManager is used independently, this function is a no-op, and default values or configuration set on this class is used. """ try: eg_instance = self.parent.parent except AttributeError: return dependent_props = [ "authorized_users", "unauthorized_users", "port_range", "impersonation_enabled", "max_kernels_per_user", "client_envs", "inherited_envs", "yarn_endpoint", "alt_yarn_endpoint", "yarn_endpoint_security_enabled", "conductor_endpoint", "remote_hosts", "load_balancing_algorithm", ] self._links = [ directional_link((eg_instance, prop), (self, prop)) for prop in dependent_props ] async def start_kernel(self, **kwargs: dict[str, Any] | None): """ Starts a kernel in a separate process. Where the started kernel resides depends on the configured process proxy. Parameters ---------- `**kwargs` : optional keyword arguments that are passed down to build the kernel_cmd and launching the kernel (e.g. Popen kwargs). """ self._get_process_proxy() self._capture_user_overrides(**kwargs) await super().start_kernel(**kwargs) def _capture_user_overrides(self, **kwargs: dict[str, Any] | None) -> None: """ Make a copy of any allowed or KERNEL_ env values provided by user. These will be injected back into the env after the kernelspec env has been applied. This enables defaulting behavior of the kernelspec env stanza that would have otherwise overridden the user-provided values. """ env = kwargs.get("env", {}) # If KERNEL_LAUNCH_TIMEOUT is passed in the payload, override it. self.kernel_launch_timeout = float( env.get("KERNEL_LAUNCH_TIMEOUT", default_kernel_launch_timeout) ) self.user_overrides.update( { key: value for key, value in env.items() if key.startswith("KERNEL_") or key in self.inherited_envs or key in self.client_envs } ) def format_kernel_cmd(self, extra_arguments: list[str] | None = None) -> list[str]: """ Replace templated args (e.g. {response_address}, {port_range}, or {kernel_id}). """ cmd = super().format_kernel_cmd(extra_arguments) if self.response_address or self.port_range or self.kernel_id or self.public_key: ns = self._launch_args.copy() if self.response_address: ns["response_address"] = self.response_address if self.public_key: ns["public_key"] = self.public_key if self.port_range: ns["port_range"] = self.port_range if self.kernel_id: ns["kernel_id"] = self.kernel_id pat = re.compile(r"\{([A-Za-z0-9_]+)\}") def from_ns(match): """Get the key out of ns if it's there, otherwise no change.""" return ns.get(match.group(1), match.group()) return [pat.sub(from_ns, arg) for arg in cmd] return cmd async def _launch_kernel( self, kernel_cmd: list[str], **kwargs: dict[str, Any] | None ) -> BaseProcessProxyABC: # Note: despite the under-bar prefix to this method, the jupyter_client comment says that # this method should be "[overridden] in a subclass to launch kernel subprocesses differently". # So that's what we've done. env = kwargs["env"] # Apply user_overrides to enable defaulting behavior from kernelspec.env stanza. Note that we do this # BEFORE setting KERNEL_GATEWAY and removing {EG,KG}_AUTH_TOKEN so those operations cannot be overridden. env.update(self.user_overrides) # No longer using Kernel Gateway, but retain references of B/C purposes env["KERNEL_GATEWAY"] = "1" if "EG_AUTH_TOKEN" in env: del env["EG_AUTH_TOKEN"] if "KG_AUTH_TOKEN" in env: del env["KG_AUTH_TOKEN"] self.log.debug( f"Launching kernel: '{self.kernel_spec.display_name}' with command: {kernel_cmd}" ) proxy = await self.process_proxy.launch_process(kernel_cmd, **kwargs) return proxy def request_shutdown(self, restart: bool = False) -> None: """ Send a shutdown request via control channel and process proxy (if remote). """ super().request_shutdown(restart) # If we're using a remote proxy, we need to send the launcher indication that we're # shutting down so it can exit its listener thread, if its using one. if isinstance(self.process_proxy, RemoteProcessProxy): self.process_proxy.shutdown_listener() async def restart_kernel(self, now: bool = False, **kwargs: dict[str, Any] | None) -> None: """ Restarts a kernel with the arguments that were used to launch it. This is an automatic restart request (now=True) AND this is associated with a remote kernel, check the active connection count. If there are zero connections, do not restart the kernel. Parameters ---------- now : bool, optional If True, the kernel is forcefully restarted *immediately*, without having a chance to do any cleanup action. Otherwise the kernel is given 1s to clean up before a forceful restart is issued. In all cases the kernel is restarted, the only difference is whether it is given a chance to perform a clean shutdown or not. `**kwargs` : optional Any options specified here will overwrite those used to launch the kernel. """ kernel_id = self.kernel_id or os.path.basename(self.connection_file).replace( "kernel-", "" ).replace(".json", "") # Check if this is a remote process proxy and if now = True. If so, check its connection count. If no # connections, shutdown else perform the restart. Note: auto-restart sets now=True, but handlers use # the default value (False). if ( # noqa isinstance(self.process_proxy, RemoteProcessProxy) and now and self.mapping_kernel_manager ): if self.mapping_kernel_manager._kernel_connections.get(kernel_id, 0) == 0: self.log.warning( "Remote kernel ({}) will not be automatically restarted since there are no " "clients connected at this time.".format(kernel_id) ) # Use the parent mapping kernel manager so activity monitoring and culling is also shutdown await self.mapping_kernel_manager.shutdown_kernel(kernel_id, now=now) return if now: # if auto-restarting (when now is True), indicate we're restarting. self.restarting = True await super().restart_kernel(now, **kwargs) if isinstance(self.process_proxy, RemoteProcessProxy): # for remote kernels... # Re-establish activity watching... if self._activity_stream: self._activity_stream.close() self._activity_stream = None if self.mapping_kernel_manager: self.mapping_kernel_manager.start_watching_activity(kernel_id) # Refresh persisted state. if self.kernel_session_manager: self.kernel_session_manager.refresh_session(kernel_id) if now: self.restarting = False async def signal_kernel(self, signum: int) -> None: """ Sends signal `signum` to the kernel process. """ if self.has_kernel: if signum == signal.SIGINT: if self.sigint_value is None: # If we're interrupting the kernel, check if kernelspec's env defines # an alternate interrupt signal. We'll do this once per interrupted kernel. # This is required for kernels whose language may prevent signals across # process/user boundaries (Scala, for example). self.sigint_value = signum # use default alt_sigint = self.kernel_spec.env.get("EG_ALTERNATE_SIGINT") if alt_sigint: try: sig_value = getattr(signal, alt_sigint) if isinstance(sig_value, int): # Python 2 self.sigint_value = sig_value else: # Python 3 self.sigint_value = sig_value.value self.log.debug( "Converted EG_ALTERNATE_SIGINT '{}' to value '{}' to use as interrupt signal.".format( alt_sigint, self.sigint_value ) ) except AttributeError: self.log.warning( "Error received when attempting to convert EG_ALTERNATE_SIGINT of " "'{}' to a value. Check kernelspec entry for kernel '{}' - using " "default 'SIGINT'".format(alt_sigint, self.kernel_spec.display_name) ) self.kernel.send_signal(self.sigint_value) else: self.kernel.send_signal(signum) else: msg = "Cannot signal kernel. No kernel is running!" raise RuntimeError(msg) def cleanup(self, connection_file: bool = True) -> None: """ Clean up resources when the kernel is shut down """ # Note This method has been deprecated in jupyter_client 6.1.5 and # remains here for pre-6.2.0 jupyter_client installations. # Note we must use `process_proxy` here rather than `kernel`, although they're the same value. # The reason is because if the kernel shutdown sequence has triggered its "forced kill" logic # then that method (jupyter_client/manager.py/_kill_kernel()) will set `self.kernel` to None, # which then prevents process proxy cleanup. if self.process_proxy: self.process_proxy.cleanup() self.process_proxy = None return super().cleanup(connection_file) def cleanup_resources(self, restart: bool = False) -> None: """ Clean up resources when the kernel is shut down """ # Note This method was introduced in jupyter_client 6.1.5 and # will not be called until jupyter_client 6.2.0 has been released. # Note we must use `process_proxy` here rather than `kernel`, although they're the same value. # The reason is because if the kernel shutdown sequence has triggered its "forced kill" logic # then that method (jupyter_client/manager.py/_kill_kernel()) will set `self.kernel` to None, # which then prevents process proxy cleanup. if self.process_proxy: self.process_proxy.cleanup() self.process_proxy = None return super().cleanup_resources(restart) def write_connection_file(self) -> None: """ Write connection info to JSON dict in self.connection_file if the kernel is local. If this is a remote kernel that's using a response address or we're restarting, we should skip the write_connection_file since it will create 5 useless ports that would not adhere to port-range restrictions if configured. """ if ( isinstance(self.process_proxy, LocalProcessProxy) or not self.response_address ) and not self.restarting: # However, since we *may* want to limit the selected ports, go ahead and get the ports using # the process proxy (will be LocalProcessProxy for default case) since the port selection will # handle the default case when the member ports aren't set anyway. ports = self.process_proxy.select_ports(5) self.shell_port = ports[0] self.iopub_port = ports[1] self.stdin_port = ports[2] self.hb_port = ports[3] self.control_port = ports[4] super().write_connection_file() return None def _get_process_proxy(self) -> None: """ Reads the associated kernelspec and to see if has a process proxy stanza. If one exists, it instantiates an instance. If a process proxy is not specified in the kernelspec, a LocalProcessProxy stanza is fabricated and instantiated. """ process_proxy_cfg = get_process_proxy_config(self.kernel_spec) process_proxy_class_name = process_proxy_cfg.get("class_name") self.log.debug( "Instantiating kernel '{}' with process proxy: {}".format( self.kernel_spec.display_name, process_proxy_class_name ) ) process_proxy_class = import_item(process_proxy_class_name) self.process_proxy = process_proxy_class( kernel_manager=self, proxy_config=process_proxy_cfg.get("config") ) # When this class is used by an EnterpriseGatewayApp instance, it will be able to # access the app's configuration using the traitlet parent chain. # When it's used independently, it should fall back to safe defaults. @property def kernel_session_manager(self) -> KernelSessionManager | None: try: return self.parent.parent.kernel_session_manager except AttributeError: return None @property def cull_idle_timeout(self) -> int: try: return self.parent.cull_idle_timeout except AttributeError: return 0 @property def mapping_kernel_manager(self) -> RemoteMappingKernelManager | None: try: return self.parent except AttributeError: return None ================================================ FILE: enterprise_gateway/services/kernelspecs/__init__.py ================================================ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from .kernelspec_cache import KernelSpecCache # noqa ================================================ FILE: enterprise_gateway/services/kernelspecs/handlers.py ================================================ """Tornado handlers for kernel specs.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import json from typing import Dict, List, Optional from jupyter_server.base.handlers import JupyterHandler from jupyter_server.services.kernelspecs.handlers import is_kernelspec_model, kernelspec_model from jupyter_server.utils import ensure_async, url_unescape from tornado import web from traitlets import Set from ...base.handlers import APIHandler from ...mixins import CORSMixin, JSONErrorsMixin, TokenAuthorizationMixin from .kernelspec_cache import KernelSpecCache def apply_user_filter( kernelspec_model: Dict[str, object], global_authorized_list: Set, global_unauthorized_list: Set, kernel_user: Optional[str] = None, ) -> Optional[Dict[str, object]]: """ If authorization lists are configured - either within the kernelspec or globally, ensure the user is authorized for the given kernelspec. """ if kernel_user: # Check the unauthorized list of the kernelspec, then the globally-configured unauthorized list - the # semantics of which are a union of the two lists. try: # Check if kernel_user in kernelspec_model unauthorized_list = kernelspec_model["spec"]["metadata"]["process_proxy"]["config"][ "unauthorized_users" ] except KeyError: pass else: if kernel_user in unauthorized_list: return None if kernel_user in global_unauthorized_list: return None # Check the authorized list of the kernelspec, then the globally-configured authorized list - # but only if the kernelspec list doesn't exist. This is because the kernelspec set of authorized # users may be a subset of globally authorized users and is, essentially, used as a denial to those # not defined in the kernelspec's list. try: authorized_list = kernelspec_model["spec"]["metadata"]["process_proxy"]["config"][ "authorized_users" ] except KeyError: if global_authorized_list and kernel_user not in global_authorized_list: return None else: if authorized_list and kernel_user not in authorized_list: return None return kernelspec_model class MainKernelSpecHandler(TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, APIHandler): """The root kernel spec handler.""" @property def kernel_spec_cache(self) -> KernelSpecCache: return self.settings["kernel_spec_cache"] @web.authenticated async def get(self) -> None: """Get the kernel spec models.""" ksc = self.kernel_spec_cache km = self.kernel_manager model = {} model["default"] = km.default_kernel_name model["kernelspecs"] = specs = {} kernel_user_filter = self.request.query_arguments.get("user") kernel_user = None if kernel_user_filter: kernel_user = kernel_user_filter[0].decode("utf-8") if kernel_user: self.log.debug("Searching kernels for user '%s' " % kernel_user) kspecs = await ensure_async(ksc.get_all_specs()) list_kernels_found = [] for kernel_name, kernel_info in kspecs.items(): try: if is_kernelspec_model(kernel_info): d = kernel_info else: d = kernelspec_model( self, kernel_name, kernel_info["spec"], kernel_info["resource_dir"] ) d = apply_user_filter( d, self.settings["eg_authorized_users"], self.settings["eg_unauthorized_users"], kernel_user, ) if d is not None: specs[kernel_name] = d list_kernels_found.append(d["name"]) else: self.log.debug( f"User {kernel_user} is not authorized to use kernel spec {kernel_name}" ) except Exception: self.log.error("Failed to load kernel spec: '%s'", kernel_name) continue self.set_header("Content-Type", "application/json") self.finish(json.dumps(model)) class KernelSpecHandler(TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, APIHandler): """A handler for a specific kernel spec.""" @property def kernel_spec_cache(self) -> KernelSpecCache: return self.settings["kernel_spec_cache"] @web.authenticated async def get(self, kernel_name: str) -> None: """Get a kernel spec by name.""" ksc = self.kernel_spec_cache kernel_name = url_unescape(kernel_name) kernel_user_filter = self.request.query_arguments.get("user") kernel_user = None if kernel_user_filter: kernel_user = kernel_user_filter[0].decode("utf-8") try: spec = await ensure_async(ksc.get_kernel_spec(kernel_name)) except KeyError: raise web.HTTPError(404, "Kernel spec %s not found" % kernel_name) from None if is_kernelspec_model(spec): model = spec else: model = kernelspec_model(self, kernel_name, spec.to_dict(), spec.resource_dir) d = apply_user_filter( model, self.settings["eg_authorized_users"], self.settings["eg_unauthorized_users"], kernel_user, ) if d is None: raise web.HTTPError( 403, f"User {kernel_user} is not authorized to use kernel spec {kernel_name}" ) self.set_header("Content-Type", "application/json") self.finish(json.dumps(model)) class KernelSpecResourceHandler( TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, web.StaticFileHandler, JupyterHandler ): """A handler for kernel spec resources.""" SUPPORTED_METHODS = ("GET", "HEAD") @property def kernel_spec_cache(self) -> KernelSpecCache: return self.settings["kernel_spec_cache"] def initialize(self) -> None: """Initialize the handler.""" web.StaticFileHandler.initialize(self, path="") @web.authenticated async def get(self, kernel_name: str, path: str, include_body: bool = True) -> None: """Get a resource for a kernel.""" ksc = self.kernel_spec_cache try: kernelspec = await ensure_async(ksc.get_kernel_spec(kernel_name)) self.root = kernelspec.resource_dir except KeyError as e: raise web.HTTPError(404, "Kernel spec %s not found" % kernel_name) from e self.log.debug("Serving kernel resource from: %s", self.root) return await web.StaticFileHandler.get(self, path, include_body=include_body) @web.authenticated def head(self, kernel_name: str, path: str) -> None: """Get the head for a kernel resource.""" return self.get(kernel_name, path, include_body=False) kernel_name_regex: str = r"(?P[\w\.\-%]+)" # Extends the default handlers from the jupyter_server package with token auth, CORS # and JSON errors. default_handlers: List[tuple] = [ (r"/api/kernelspecs", MainKernelSpecHandler), (r"/api/kernelspecs/%s" % kernel_name_regex, KernelSpecHandler), (r"/kernelspecs/%s/(?P.*)" % kernel_name_regex, KernelSpecResourceHandler), ] ================================================ FILE: enterprise_gateway/services/kernelspecs/kernelspec_cache.py ================================================ """Cache handling for kernel specs.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import os from typing import ClassVar, Dict, Optional, Union from jupyter_client.kernelspec import KernelSpec from jupyter_server.utils import ensure_async from traitlets.config import SingletonConfigurable from traitlets.traitlets import CBool, default from watchdog.events import FileMovedEvent, FileSystemEventHandler from watchdog.observers import Observer # Simplify the typing. Cache items are essentially dictionaries of strings # to either strings or dictionaries. The items themselves are indexed by # the kernel_name (case-insensitive). CacheItemType = Dict[str, Union[str, Dict]] class KernelSpecCache(SingletonConfigurable): """The primary (singleton) instance for managing KernelSpecs. This class contains the configured KernelSpecManager instance upon which it uses to populate the cache (when enabled) or as a pass-thru (when disabled). Note that the KernelSpecManager returns different formats from methods get_all_specs() and get_kernel_spec(). The format in which cache entries are stored is that of the get_all_specs() results. As a result, some conversion between formats is necessary, depending on which method is called. """ cache_enabled_env = "EG_KERNELSPEC_CACHE_ENABLED" cache_enabled = CBool( True, config=True, help="""Enable Kernel Specification caching. (EG_KERNELSPEC_CACHE_ENABLED env var)""", ) @default("cache_enabled") def _cache_enabled_default(self): return os.getenv(self.cache_enabled_env, "false").lower() in ("true", "1") def __init__(self, kernel_spec_manager, **kwargs) -> None: """Initialize the cache.""" super().__init__(**kwargs) self.kernel_spec_manager = kernel_spec_manager self._initialize() async def get_kernel_spec(self, kernel_name: str) -> KernelSpec: """Get the named kernel specification. This method is equivalent to calling KernelSpecManager.get_kernel_spec(). If caching is enabled, it will pull the item from the cache. If no item is returned (as will be the case if caching is disabled) it will defer to the currently configured KernelSpecManager. If an item is returned (and caching is enabled), it will be added to the cache. """ kernelspec = self.get_item(kernel_name) if not kernelspec: kernelspec = await ensure_async(self.kernel_spec_manager.get_kernel_spec(kernel_name)) if kernelspec: self.put_item(kernel_name, kernelspec) return kernelspec async def get_all_specs(self) -> Dict[str, CacheItemType]: """Get all available kernel specifications. This method is equivalent to calling KernelSpecManager.get_all_specs(). If caching is enabled, it will pull all items from the cache. If no items are returned (as will be the case if caching is disabled) it will defer to the currently configured KernelSpecManager. If items are returned (and caching is enabled), they will be added to the cache. Note that the return type of this method is not a dictionary or list of KernelSpec instances, but rather a dictionary of kernel-name to kernel-info dictionaries are returned - as is the case with the respective return values of the KernelSpecManager methods. """ kernelspecs = self.get_all_items() if not kernelspecs: kernelspecs = await ensure_async(self.kernel_spec_manager.get_all_specs()) if kernelspecs: self.put_all_items(kernelspecs) return kernelspecs # Cache-related methods def get_item(self, kernel_name: str) -> Optional[KernelSpec]: """Retrieves a named kernel specification from the cache. If cache is disabled or the item is not in the cache, None is returned; otherwise, a KernelSpec instance of the item is returned. """ kernelspec = None if self.cache_enabled: cache_item = self.cache_items.get(kernel_name.lower()) if cache_item: # Convert to KernelSpec # In certain conditions, like when the kernelspec is fetched prior to its removal from the cache, # we can encounter a FileNotFoundError. In those cases, treat as a cache miss as well. try: kernelspec = KernelSpecCache.cache_item_to_kernel_spec(cache_item) except FileNotFoundError: pass if not kernelspec: self.cache_misses += 1 self.log.debug(f"Cache miss ({self.cache_misses}) for kernelspec: {kernel_name}") return kernelspec def get_all_items(self) -> Dict[str, CacheItemType]: """Retrieves all kernel specification from the cache. If cache is disabled or no items are in the cache, an empty dictionary is returned; otherwise, a dictionary of kernel-name to specifications (kernel infos) are returned. """ items = {} if self.cache_enabled: for kernel_name in self.cache_items: cache_item = self.cache_items.get(kernel_name) items[kernel_name] = cache_item if not items: self.cache_misses += 1 return items def put_item(self, kernel_name: str, cache_item: Union[KernelSpec, CacheItemType]) -> None: """Adds or updates a kernel specification in the cache. This method can take either a KernelSpec (if called directly from the `get_kernel_spec()` method, or a CacheItemItem (if called from a cache-related method) as that is the type in which the cache items are stored. If it determines the cache entry corresponds to a currently unwatched directory, that directory will be added to list of observed directories and scheduled accordingly. """ if self.cache_enabled: self.log.info(f"KernelSpecCache: adding/updating kernelspec: {kernel_name}") if type(cache_item) is KernelSpec: cache_item = KernelSpecCache.kernel_spec_to_cache_item(cache_item) resource_dir = cache_item["resource_dir"] self.cache_items[kernel_name.lower()] = cache_item observed_dir = os.path.dirname(resource_dir) if observed_dir not in self.observed_dirs: # New directory to watch, schedule it... self.log.debug(f"KernelSpecCache: observing directory: {observed_dir}") self.observed_dirs.add(observed_dir) self.observer.schedule(KernelSpecChangeHandler(self), observed_dir, recursive=True) def put_all_items(self, kernelspecs: Dict[str, CacheItemType]) -> None: """Adds or updates a dictionary of kernel specification in the cache.""" for kernel_name, cache_item in kernelspecs.items(): self.put_item(kernel_name, cache_item) def remove_item(self, kernel_name: str) -> Optional[CacheItemType]: """Removes the cache item corresponding to kernel_name from the cache.""" cache_item = None if self.cache_enabled and kernel_name.lower() in self.cache_items: cache_item = self.cache_items.pop(kernel_name.lower()) self.log.info(f"KernelSpecCache: removed kernelspec: {kernel_name}") return cache_item def _initialize(self): """Initializes the cache and starts the observer.""" # The kernelspec cache consists of a dictionary mapping the kernel name to the actual # kernelspec data (CacheItemType). self.cache_items = {} # Maps kernel name to kernelspec self.observed_dirs = set() # Tracks which directories are being watched self.cache_misses = 0 # Seed the cache and start the observer if self.cache_enabled: self.observer = Observer() kernelspecs = self.kernel_spec_manager.get_all_specs() self.put_all_items(kernelspecs) # Following adds, see if any of the manager's kernel dirs are not observed and add them for kernel_dir in self.kernel_spec_manager.kernel_dirs: if kernel_dir not in self.observed_dirs: if os.path.exists(kernel_dir): self.log.info(f"KernelSpecCache: observing directory: {kernel_dir}") self.observed_dirs.add(kernel_dir) self.observer.schedule( KernelSpecChangeHandler(self), kernel_dir, recursive=True ) else: self.log.warning( f"KernelSpecCache: kernel_dir '{kernel_dir}' does not exist" " and will not be observed." ) self.observer.start() @staticmethod def kernel_spec_to_cache_item(kernelspec: KernelSpec) -> CacheItemType: """Converts a KernelSpec instance to a CacheItemType for storage into the cache.""" cache_item = {} cache_item["spec"] = kernelspec.to_dict() cache_item["resource_dir"] = kernelspec.resource_dir return cache_item @staticmethod def cache_item_to_kernel_spec(cache_item: CacheItemType) -> KernelSpec: """Converts a CacheItemType to a KernelSpec instance for user consumption.""" kernel_spec = KernelSpec(resource_dir=cache_item["resource_dir"], **cache_item["spec"]) return kernel_spec class KernelSpecChangeHandler(FileSystemEventHandler): """Watchdog handler that filters on specific files deemed representative of a kernel specification.""" # Events related to these files trigger the management of the KernelSpec cache. Should we find # other files qualify as indicators of a kernel specification's state (like perhaps detached parameter # files in the future) should be added to this list - at which time it should become configurable. watched_files: ClassVar = ["kernel.json"] def __init__(self, kernel_spec_cache: KernelSpecCache, **kwargs): """Initialize the handler.""" super().__init__(**kwargs) self.kernel_spec_cache = kernel_spec_cache self.log = kernel_spec_cache.log def dispatch(self, event): """Dispatches events pertaining to kernelspecs to the appropriate methods. The primary purpose of this method is to ensure the action is occurring against the a file in the list of watched files and adds some additional attributes to the event instance to make the actual event handling method easier. :param event: The event object representing the file system event. :type event: :class:`FileSystemEvent` """ if os.path.basename(event.src_path) in self.watched_files: src_resource_dir = os.path.dirname(event.src_path) event.src_resource_dir = src_resource_dir event.src_kernel_name = os.path.basename(src_resource_dir) if type(event) is FileMovedEvent: dest_resource_dir = os.path.dirname(event.dest_path) event.dest_resource_dir = dest_resource_dir event.dest_kernel_name = os.path.basename(dest_resource_dir) super().dispatch(event) def on_created(self, event): """Fires when a watched file is created. This will trigger a call to the configured KernelSpecManager to fetch the instance associated with the created file, which is then added to the cache. """ kernel_name = event.src_kernel_name try: kernelspec = self.kernel_spec_cache.kernel_spec_manager.get_kernel_spec(kernel_name) self.kernel_spec_cache.put_item(kernel_name, kernelspec) except Exception as e: self.log.warning( "The following exception occurred creating cache entry for: {src_resource_dir} " "- continuing... ({e})".format(src_resource_dir=event.src_resource_dir, e=e) ) def on_deleted(self, event): """Fires when a watched file is deleted, triggering a removal of the corresponding item from the cache.""" kernel_name = event.src_kernel_name self.kernel_spec_cache.remove_item(kernel_name) def on_modified(self, event): """Fires when a watched file is modified. This will trigger a call to the configured KernelSpecManager to fetch the instance associated with the modified file, which is then replaced in the cache. """ kernel_name = event.src_kernel_name try: kernelspec = self.kernel_spec_cache.kernel_spec_manager.get_kernel_spec(kernel_name) self.kernel_spec_cache.put_item(kernel_name, kernelspec) except Exception as e: self.log.warning( "The following exception occurred updating cache entry for: {src_resource_dir} " "- continuing... ({e})".format(src_resource_dir=event.src_resource_dir, e=e) ) def on_moved(self, event): """Fires when a watched file is moved. This will trigger the update of the existing cached item, replacing its resource_dir entry with that of the new destination. """ src_kernel_name = event.src_kernel_name dest_kernel_name = event.dest_kernel_name cache_item = self.kernel_spec_cache.remove_item(src_kernel_name) cache_item["resource_dir"] = event.dest_resource_dir self.kernel_spec_cache.put_item(dest_kernel_name, cache_item) ================================================ FILE: enterprise_gateway/services/processproxies/__init__.py ================================================ ================================================ FILE: enterprise_gateway/services/processproxies/conductor.py ================================================ """Code related to managing kernels running in Conductor clusters.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import asyncio import json import os import re import signal import socket import subprocess import time from random import randint from typing import Any, ClassVar from jupyter_client import localinterfaces from jupyter_server.utils import url_unescape from ..kernels.remotemanager import RemoteKernelManager from .processproxy import RemoteProcessProxy pjoin = os.path.join local_ip = localinterfaces.public_ips()[0] poll_interval = float(os.getenv("EG_POLL_INTERVAL", "0.5")) max_poll_attempts = int(os.getenv("EG_MAX_POLL_ATTEMPTS", "10")) class ConductorClusterProcessProxy(RemoteProcessProxy): """ Kernel lifecycle management for Conductor clusters. """ initial_states: ClassVar = {"SUBMITTED", "WAITING", "RUNNING"} final_states: ClassVar = {"FINISHED", "KILLED", "RECLAIMED"} # Don't include FAILED state def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict): """Initialize the proxy.""" super().__init__(kernel_manager, proxy_config) self.application_id = None self.driver_id = None self.env = None self.rest_credential = None self.jwt_token = None self.conductor_endpoint = proxy_config.get( "conductor_endpoint", kernel_manager.conductor_endpoint ) self.ascd_endpoint = self.conductor_endpoint async def launch_process( self, kernel_cmd: str, **kwargs: dict[str, Any] | None ) -> ConductorClusterProcessProxy: """ Launches the specified process within a Conductor cluster environment. """ await super().launch_process(kernel_cmd, **kwargs) self.env = kwargs.get("env") self.kernel_headers = kwargs.get("kernel_headers") # Get Conductor cred from process env env_dict = dict(os.environ.copy()) if env_dict and "EGO_SERVICE_CREDENTIAL" in env_dict: self.rest_credential = env_dict["EGO_SERVICE_CREDENTIAL"] elif self.kernel_headers and "Jwt-Auth-User-Payload" in self.kernel_headers: kwargs.get("env")["KERNEL_NOTEBOOK_COOKIE_JAR"] = "kernelcookie" + str(randint(0, 1000)) jsonKH = json.loads(self.kernel_headers["Jwt-Auth-User-Payload"]) self.jwt_token = jsonKH["accessToken"] await asyncio.get_event_loop().run_in_executor( None, self._performConductorJWTLogonAndRetrieval, self.jwt_token, kwargs.get("env") ) else: error_message = ( "ConductorClusterProcessProxy failed to obtain the Conductor credential." ) self.log_and_raise(http_status_code=500, reason=error_message) # dynamically update Spark submit parameters await asyncio.get_event_loop().run_in_executor( None, self._update_launch_info, kernel_cmd, kwargs.get("env") ) # Enable stderr PIPE for the run command kwargs.update({"stderr": subprocess.PIPE}) self.local_proc = self.launch_kernel(kernel_cmd, **kwargs) self.pid = self.local_proc.pid self.ip = local_ip self.log.debug( "Conductor cluster kernel launched using Conductor endpoint: {}, pid: {}, Kernel ID: {}, " "cmd: '{}'".format( self.conductor_endpoint, self.local_proc.pid, self.kernel_id, kernel_cmd ) ) await self.confirm_remote_startup() return self def _update_launch_info(self, kernel_cmd: list[str], env_dict: dict) -> None: """ Dynamically assemble the spark-submit configuration passed from NB2KG. """ if any(arg.endswith(".sh") for arg in kernel_cmd): self.log.debug("kernel_cmd contains execution script") else: kernel_dir = self.kernel_manager.kernel_spec_manager._find_spec_directory( self.kernel_manager.kernel_name ) cmd = pjoin(kernel_dir, "bin/run.sh") kernel_cmd.insert(0, cmd) # add SPARK_HOME, PYSPARK_PYTHON, update SPARK_OPT to contain SPARK_MASTER and EGO_SERVICE_CREDENTIAL env_dict["SPARK_HOME"] = env_dict["KERNEL_SPARK_HOME"] env_dict["PYSPARK_PYTHON"] = env_dict["KERNEL_PYSPARK_PYTHON"] # add KERNEL_SPARK_OPTS to append user configured Spark configuration user_defined_spark_opts = "" if "KERNEL_SPARK_OPTS" in env_dict: user_defined_spark_opts = env_dict["KERNEL_SPARK_OPTS"] # Get updated one_notebook_master_rest_url for KERNEL_NOTEBOOK_MASTER_REST and SPARK_OPTS. if self.jwt_token is None: self._update_notebook_master_rest_url(env_dict) if "--master" not in env_dict["SPARK_OPTS"]: env_dict["SPARK_OPTS"] = ( "--master {master} --conf spark.ego.credential={rest_cred} " "--conf spark.pyspark.python={pyspark_python} {spark_opts} " "{user_defined_spark_opts}".format( master=env_dict["KERNEL_NOTEBOOK_MASTER_REST"], rest_cred="'" + self.rest_credential + "'", pyspark_python=env_dict["PYSPARK_PYTHON"], spark_opts=env_dict["SPARK_OPTS"], user_defined_spark_opts=user_defined_spark_opts, ) ) def _update_notebook_master_rest_url(self, env_dict: dict) -> None: """ Updates the notebook master rest url to update KERNEL_NOTEBOOK_MASTER_REST, conductor_endpoint, and SPARK_OPTS. """ self.log.debug("Updating notebook master rest urls.") response = None # Assemble REST call header = "Accept: application/json" authorization = "Authorization: %s" % self.rest_credential if ( "KERNEL_NOTEBOOK_DATA_DIR" not in env_dict or "KERNEL_NOTEBOOK_COOKIE_JAR" not in env_dict or "KERNEL_CURL_SECURITY_OPT" not in env_dict ): self.log.warning( "Could not find KERNEL environment variables. Not updating notebook master rest url." ) return if ( "CONDUCTOR_REST_URL" not in env_dict or "KERNEL_SIG_ID" not in env_dict or "KERNEL_NOTEBOOK_MASTER_REST" not in env_dict ): self.log.warning( "Could not find CONDUCTOR_REST_URL or KERNEL_SIG_ID or KERNEL_NOTEBOOK_MASTER_REST. " "Not updating notebook master rest url." ) return cookie_jar = pjoin( env_dict["KERNEL_NOTEBOOK_DATA_DIR"], env_dict["KERNEL_NOTEBOOK_COOKIE_JAR"] ) sslconf = env_dict["KERNEL_CURL_SECURITY_OPT"].split() ascd_rest_url = env_dict["CONDUCTOR_REST_URL"] ig_id = env_dict["KERNEL_SIG_ID"] url = f"{ascd_rest_url}conductor/v1/instances?id={ig_id}&fields=outputs" cmd = ["curl", "-v", "-b", cookie_jar, "-X", "GET", "-H", header, "-H", authorization, url] cmd[2:2] = sslconf # Perform REST call try: process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True ) output, stderr = process.communicate() response = json.loads(output) if output else None if ( response is None or len(response) < 1 or not response[0] or not response[0]["outputs"] ): response = None except Exception as e: self.log.warning( f"Getting instance group with cmd '{cmd}' failed with exception: '{e}'. Continuing..." ) return outputs = response[0]["outputs"] if ( "one_notebook_master_rest_url" not in outputs or not outputs["one_notebook_master_rest_url"] or "value" not in outputs["one_notebook_master_rest_url"] or not outputs["one_notebook_master_rest_url"]["value"] ): self.log.warning( "Could not get one_notebook_master_rest_url from instance group. " "Not updating notebook master rest url." ) return if ( "one_notebook_master_web_submission_url" not in outputs or not outputs["one_notebook_master_web_submission_url"] or "value" not in outputs["one_notebook_master_web_submission_url"] or not outputs["one_notebook_master_web_submission_url"]["value"] ): self.log.warning( "Could not get one_notebook_master_web_submission_url from instance group. " "Not updating notebook master rest url." ) return updated_one_notebook_master_rest_url = outputs["one_notebook_master_rest_url"]["value"] updated_one_notebook_master_web_submission_url = outputs[ "one_notebook_master_web_submission_url" ]["value"] if updated_one_notebook_master_rest_url and updated_one_notebook_master_web_submission_url: self.log.debug( f"Updating KERNEL_NOTEBOOK_MASTER_REST to '{updated_one_notebook_master_rest_url}'." ) os.environ["KERNEL_NOTEBOOK_MASTER_REST"] = updated_one_notebook_master_rest_url env_dict["KERNEL_NOTEBOOK_MASTER_REST"] = updated_one_notebook_master_rest_url self.conductor_endpoint = updated_one_notebook_master_web_submission_url def poll(self) -> bool | None: """ Submitting a new kernel/app will take a while to be SUBMITTED. Thus application ID will probably not be available immediately for poll. So will regard the application as RUNNING when application ID still in SUBMITTED/WAITING/RUNNING state. :return: None if the application's ID is available and state is SUBMITTED/WAITING/RUNNING. Otherwise False. """ result = False if self._get_application_id(): state = self._query_app_state_by_driver_id(self.driver_id) if state in ConductorClusterProcessProxy.initial_states: result = None return result def send_signal(self, signum: int) -> bool | None: """ Currently only support 0 as poll and other as kill. :param signum :return: None if signal was successfully sent to kernel, False if an exception was thrown """ self.log.debug(f"ConductorClusterProcessProxy.send_signal {signum}") if signum == 0: return self.poll() elif signum == signal.SIGKILL: return self.kill() else: return super().send_signal(signum) def kill(self) -> bool | None: """ Kill a kernel. :return: None if the application existed and is not in RUNNING state, False otherwise. """ state = None result = False if self.driver_id: resp = self._kill_app_by_driver_id(self.driver_id) self.log.debug( "ConductorClusterProcessProxy.kill: kill_app_by_driver_id({}) response: {}, confirming " "app state is not RUNNING".format(self.driver_id, resp) ) i = 1 state = self._query_app_state_by_driver_id(self.driver_id) while state not in ConductorClusterProcessProxy.final_states and i <= max_poll_attempts: time.sleep(poll_interval) state = self._query_app_state_by_driver_id(self.driver_id) i = i + 1 if state in ConductorClusterProcessProxy.final_states: result = None super().kill() self.log.debug( "ConductorClusterProcessProxy.kill, application ID: {}, kernel ID: {}, state: {}".format( self.application_id, self.kernel_id, state ) ) return result def cleanup(self) -> None: """Clean up the kernel.""" # we might have a defunct process (if using waitAppCompletion = false) - so poll, kill, wait when we have # a local_proc. if self.local_proc: self.log.debug( "ConductorClusterProcessProxy.cleanup: Clearing possible defunct process, pid={}...".format( self.local_proc.pid ) ) if super().poll(): super().kill() super().wait() self.local_proc = None # reset application id to force new query - handles kernel restarts/interrupts self.application_id = None # for cleanup, we should call the superclass last super().cleanup() def _parse_driver_submission_id(self, submission_response: str) -> None: """ Parse driver id from stderr gotten back from launch_kernel :param submission_response """ if submission_response: self.log.debug(f"Submission Response: {submission_response}\n") matched_lines = [ line for line in submission_response.split("\n") if "submissionId" in line ] if matched_lines and len(matched_lines) > 0: driver_info = matched_lines[0] self.log.debug(f"Driver Info: {driver_info}") driver_id = driver_info.split(":")[1] driver_id = re.findall(r'"([^"]*)"', driver_id) if driver_id and len(driver_id) > 0: self.driver_id = driver_id[0] self.log.debug(f"Driver ID: {driver_id[0]}") # Handle Checking for submission error to report err_lines = [ line for line in submission_response.split("\n") if "Application submission failed" in line ] if err_lines and len(err_lines) > 0: self.log_and_raise( http_status_code=500, reason=err_lines[0][err_lines[0].find("Application submission failed") :], ) async def confirm_remote_startup(self) -> None: """ Confirms the application is in a started state before returning. Should post-RUNNING states be unexpectedly encountered ('FINISHED', 'KILLED', 'RECLAIMED') then we must throw, otherwise the rest of the gateway will believe its talking to a valid kernel. """ self.start_time = RemoteProcessProxy.get_current_time() i = 0 ready_to_connect = False # we're ready to connect when we have a connection file to use while not ready_to_connect: if self.local_proc.stderr: # Read stderr after the launch_kernel, and parse the driver id from the REST response output = self.local_proc.stderr.read().decode("utf-8") self._parse_driver_submission_id(output) i += 1 await self.handle_timeout() if self._get_application_id(True): # Once we have an application ID, start monitoring state, obtain assigned host and get connection info app_state = self._get_application_state() if app_state in ConductorClusterProcessProxy.final_states: error_message = ( "KernelID: '{}', ApplicationID: '{}' unexpectedly found in state '{}' " "during kernel startup!".format( self.kernel_id, self.application_id, app_state ) ) self.log_and_raise(http_status_code=500, reason=error_message) self.log.debug( "{}: State: '{}', Host: '{}', KernelID: '{}', ApplicationID: '{}'".format( i, app_state, self.assigned_host, self.kernel_id, self.application_id ) ) if self.assigned_host: ready_to_connect = await self.receive_connection_info() else: self.detect_launch_failure() def _get_application_state(self) -> str: """ Gets the current application state using the application_id already obtained. Once the assigned host has been identified, it is no longer accessed. """ app_state = None apps = self._query_app_by_driver_id(self.driver_id) if apps: for app in apps: if "state" in app: app_state = app["state"] if not self.assigned_host and app["driver"]: self.assigned_host = app["driver"]["host"] # Set the driver host to the actual host where the application landed. self.assigned_ip = socket.gethostbyname(self.assigned_host) return app_state async def handle_timeout(self) -> None: """ Checks to see if the kernel launch timeout has been exceeded while awaiting connection info. """ await asyncio.sleep(poll_interval) time_interval = RemoteProcessProxy.get_time_diff( self.start_time, RemoteProcessProxy.get_current_time() ) if time_interval > self.kernel_launch_timeout: reason = f"Application failed to start within {self.kernel_launch_timeout} seconds." error_http_code = 500 if self._get_application_id(True): if self._query_app_state_by_driver_id(self.driver_id) != "WAITING": reason = "Kernel unavailable after {} seconds for driver_id {}, app_id {}, launch timeout: {}!" reason = reason.format( time_interval, self.driver_id, self.application_id, self.kernel_launch_timeout, ) error_http_code = 503 else: reason = "App {} is WAITING, but waited too long ({} secs) to get connection file".format( self.application_id, self.kernel_launch_timeout ) await asyncio.get_event_loop().run_in_executor(None, self.kill) timeout_message = f"KernelID: '{self.kernel_id}' launch timeout due to: {reason}" self.log_and_raise(http_status_code=error_http_code, reason=timeout_message) def _get_application_id(self, ignore_final_states: bool = False) -> str: """ Return the kernel's application ID if available, otherwise None. If we're obtaining application_id from scratch, do not consider kernels in final states. """ if not self.application_id: apps = self._query_app_by_driver_id(self.driver_id) state_condition = True if apps: for app in apps: if "state" in app and ignore_final_states: state_condition = ( app["state"] not in ConductorClusterProcessProxy.final_states ) if "applicationid" in app and len(app["applicationid"]) > 0 and state_condition: self.application_id = app["applicationid"] time_interval = RemoteProcessProxy.get_time_diff( self.start_time, RemoteProcessProxy.get_current_time() ) self.log.info( "ApplicationID: '{}' assigned for KernelID: '{}', state: {}, " "{} seconds after starting.".format( app["applicationid"], self.kernel_id, app["state"], time_interval ) ) else: self.log.debug( "ApplicationID not yet assigned for KernelID: '{}' - retrying...".format( self.kernel_id ) ) else: self.log.debug( f"ApplicationID not yet assigned for KernelID: '{self.kernel_id}' - retrying..." ) return self.application_id def get_process_info(self) -> dict[str, Any]: """ Captures the base information necessary for kernel persistence relative to Conductor clusters. """ process_info = super().get_process_info() process_info.update({"application_id": self.application_id}) process_info.update({"rest_credential": self.rest_credential}) return process_info def load_process_info(self, process_info: dict[str, Any]) -> None: """ Captures the base information necessary for kernel persistence relative to Conductor clusters. """ super().load_process_info(process_info) self.application_id = process_info["application_id"] self.rest_credential = process_info["rest_credential"] def _query_app_by_driver_id(self, driver_id: str) -> dict | None: """ Retrieve application by using driver ID. :param driver_id: as the unique driver id for query :return: The JSON object of an application. None if driver_id is not found. """ response = None if not driver_id: return response # Assemble REST call env = self.env header = "Accept: application/json" authorization = "Authorization: %s" % self.rest_credential cookie_jar = pjoin(env["KERNEL_NOTEBOOK_DATA_DIR"], env["KERNEL_NOTEBOOK_COOKIE_JAR"]) sslconf = env["KERNEL_CURL_SECURITY_OPT"].split() url = f"{self.conductor_endpoint}/v1/applications?driverid={driver_id}" cmd = ["curl", "-v", "-b", cookie_jar, "-X", "GET", "-H", header, "-H", authorization, url] cmd[2:2] = sslconf # Perform REST call try: process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True ) output, stderr = process.communicate() response = json.loads(output) if output else None response = None if not response or not response["applist"] else response["applist"] except Exception as e: self.log.warning( f"Getting application with cmd '{cmd}' failed with exception: '{e}'. Continuing..." ) return response def _query_app_by_id(self, app_id: str) -> dict | None: """ Retrieve an application by application ID. :param app_id :return: The JSON object of an application. None if app_id is not found. """ response = None # Assemble REST call env = self.env header = "Accept: application/json" authorization = "Authorization: %s" % self.rest_credential cookie_jar = pjoin(env["KERNEL_NOTEBOOK_DATA_DIR"], env["KERNEL_NOTEBOOK_COOKIE_JAR"]) sslconf = env["KERNEL_CURL_SECURITY_OPT"].split() url = f"{self.conductor_endpoint}/v1/applications?applicationid={app_id}" cmd = ["curl", "-v", "-b", cookie_jar, "-X", "GET", "-H", header, "-H", authorization, url] cmd[2:2] = sslconf # Perform REST call try: process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True ) output, stderr = process.communicate() response = json.loads(output) if output else None response = None if response is None or not response["applist"] else response["applist"] except Exception as e: self.log.warning( f"Getting application with cmd '{cmd}' failed with exception: '{e}'. Continuing..." ) return response def _query_app_state_by_driver_id(self, driver_id: str) -> dict | None: """ Return the state of an application. :param driver_id: :return: """ response = None apps = self._query_app_by_driver_id(driver_id) if apps: for app in apps: if "state" in app: response = app["state"] return response def _get_driver_by_app_id(self, app_id: str) -> dict | None: """ Get driver info from application ID. :param app_id :return: The JSON response driver information of the corresponding application. None if app_id is not found. """ response = None apps = self._query_app_by_id(app_id) if apps: for app in apps: if app and app["driver"]: self.log.debug("Obtain Driver ID: {}".format(app["driver"]["id"])) response = app["driver"] else: self.log.warning("Application id does not exist") return response def _kill_app_by_driver_id(self, driver_id: str): """ Kill an application. If the app's state is FINISHED or FAILED, it won't be changed to KILLED. :param driver_id :return: The JSON response of killing the application. None if driver is not found. """ self.log.debug(f"Kill driver: {driver_id}") if driver_id is None: if self.application_id is None: return None self.log.debug( "Driver does not exist, retrieving DriverID with ApplicationID: {}".format( self.application_id ) ) driver_info = self._get_driver_by_app_id(self.application_id) if driver_info: self.driver_id = driver_info["id"] else: return None # Assemble REST call response = None env = self.env header = "Accept: application/json" authorization = "Authorization: %s" % self.rest_credential cookie_jar = pjoin(env["KERNEL_NOTEBOOK_DATA_DIR"], env["KERNEL_NOTEBOOK_COOKIE_JAR"]) sslconf = env["KERNEL_CURL_SECURITY_OPT"].split() url = f"{self.conductor_endpoint}/v1/submissions/kill/{self.driver_id}" cmd = ["curl", "-v", "-b", cookie_jar, "-X", "POST", "-H", header, "-H", authorization, url] cmd[2:2] = sslconf # Perform REST call try: process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True ) output, stderr = process.communicate() response = json.loads(output) if output else None except Exception as e: self.log.warning( f"Termination of application with cmd '{cmd}' failed with exception: '{e}'. Continuing..." ) self.log.debug(f"Kill response: {response}") return response def _performRestCall(self, cmd: list[str], url: str, HA_LIST: list[str]) -> tuple: # noqa for HA in HA_LIST: portcolon = url.rfind(":") slash = url.find("://") url = url[0 : slash + 3] + HA + url[portcolon:] cmd[-1] = url self.log.debug(cmd) process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, universal_newlines=True, ) output, stderr = process.communicate() if ( "Could not resolve host" not in stderr and "Failed connect to" not in stderr and "Connection refused" not in stderr ): return output, stderr self.log_and_raise( http_status_code=500, reason="Could not connect to ascd. Verify ascd is running." ) return "Error", "Error" # confirm return type def _performConductorJWTLogonAndRetrieval( # noqa self, jwt_token: str, env_dict: dict[str, Any] ): """ Authenticate to Conductor with a JWT Token and setup the kernel environment variables. :param jwt_token: JWT Token to authenticate with to Conductor :param env_dict: Environment Dictionary of this Kernel launch :return: None """ response = None if not jwt_token: return response # Assemble JWT Auth logon REST call env = self.env if env["KERNEL_IG_UUID"] is None: reasonErr = ( "Instance group specified is None. Check environment " "specified instance group is available." ) self.log_and_raise(http_status_code=500, reason=reasonErr) # Determine hostname of ascd_endpoint and setup the HA List portcolon = self.ascd_endpoint.rfind(":") slash = self.ascd_endpoint.find("://") host = self.ascd_endpoint[slash + 3 : portcolon] HA_LIST = env["KERNEL_CONDUCTOR_HA_ENDPOINTS"].split(",") HA_LIST.insert(0, host) header = "Accept: application/json" authorization = "Authorization: Bearer %s" % jwt_token cookie_jar = pjoin(env["KERNEL_NOTEBOOK_DATA_DIR"], env["KERNEL_NOTEBOOK_COOKIE_JAR"]) sslconf = env["KERNEL_CURL_SECURITY_OPT"].split() url = "{}/auth/logon/jwt?topology={}".format(self.ascd_endpoint, env["KERNEL_TOPOLOGY"]) cmd = ["curl", "-v", "-b", cookie_jar, "-X", "GET", "-H", header, "-H", authorization, url] cmd[2:2] = sslconf output, stderr = self._performRestCall(cmd, url, HA_LIST) if "Error" in output: reasonErr = "Failed to perform JWT Auth Logon. " + output.splitlines()[0] self.log.warning(cmd) self.log_and_raise(http_status_code=500, reason=reasonErr) self.rest_credential = url_unescape(output)[1:-1] # Assemble EGO Token Logon REST call authorization = "Authorization: PlatformToken token=" + output.strip('"') url = "%s/auth/logon" % self.ascd_endpoint cmd = ["curl", "-v", "-c", cookie_jar, "-X", "GET", "-H", header, "-H", authorization, url] cmd[2:2] = sslconf output, stderr = self._performRestCall(cmd, url, HA_LIST) if "Error" in output: reasonErr = "Failed to perform EGO Auth Logon. " + output.splitlines()[0] self.log.warning(cmd) self.log_and_raise(http_status_code=500, reason=reasonErr) # Get the Python path to use to make sure the right conda environment is used url = "{}/anaconda/instances/{}".format( self.ascd_endpoint, env["KERNEL_ANACONDA_INST_UUID"] ) cmd = ["curl", "-v", "-b", cookie_jar, "-X", "GET", "-H", header, "-H", authorization, url] cmd[2:2] = sslconf output, stderr = self._performRestCall(cmd, url, HA_LIST) response = json.loads(output) if output else None if response is None or not response["parameters"]["deploy_home"]["value"]: reasonErr = "Could not retrieve anaconda instance. Verify anaconda instance with id " reasonErr = reasonErr + env["KERNEL_ANACONDA_INST_UUID"] + " exists" self.log.warning(cmd) self.log_and_raise(http_status_code=500, reason=reasonErr) else: env_dict["KERNEL_PYSPARK_PYTHON"] = ( response["parameters"]["deploy_home"]["value"] + "/anaconda/envs/" + env["KERNEL_ANACONDA_ENV"] + "/bin/python" ) # Get instance group information we need url = "{}/instances?id={}&fields=sparkinstancegroup,outputs".format( self.ascd_endpoint, env["KERNEL_IG_UUID"], ) cmd = ["curl", "-v", "-b", cookie_jar, "-X", "GET", "-H", header, "-H", authorization, url] cmd[2:2] = sslconf output, stderr = self._performRestCall(cmd, url, HA_LIST) response = json.loads(output) if output else None if response is None or len(response) == 0 or response[0] is None: reasonErr = ( "Could not retrieve instance group. Verify instance group with id " + env["KERNEL_IG_UUID"] + " exists." ) self.log.warning(cmd) self.log_and_raise(http_status_code=500, reason=reasonErr) elif ( response is None or response[0] is None or "value" not in response[0]["outputs"]["batch_master_rest_urls"] ): reasonErr = ( "Could not retrieve outputs for instance group. Verify instance group with id " + env["KERNEL_IG_UUID"] + " is started" ) self.log.warning(cmd) self.log_and_raise(http_status_code=500, reason=reasonErr) else: env_dict["KERNEL_SPARK_HOME"] = response[0]["sparkinstancegroup"]["sparkhomedir"] env_dict["KERNEL_NOTEBOOK_MASTER_REST"] = response[0]["outputs"][ "batch_master_rest_urls" ]["value"] self.conductor_endpoint = response[0]["outputs"]["one_batch_master_web_submission_url"][ "value" ] return response ================================================ FILE: enterprise_gateway/services/processproxies/container.py ================================================ """Code related to managing kernels running in containers.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import abc import logging import os import signal from typing import Any import urllib3 # docker ends up using this and it causes lots of noise, so turn off warnings from jupyter_client import localinterfaces from ..kernels.remotemanager import RemoteKernelManager from .processproxy import RemoteProcessProxy log = logging.getLogger(__name__) urllib3.disable_warnings() local_ip = localinterfaces.public_ips()[0] default_kernel_uid = "1000" # jovyan user is the default default_kernel_gid = "100" # users group is the default def _parse_prohibited_ids(env_var: str, default: str) -> list[int]: """Parse a comma-separated list of IDs from an environment variable into integers. Raises: ValueError: If any entry in the configured value is not a valid integer. This enforces a fail-closed posture — a misconfigured prohibited list (e.g. usernames instead of numeric IDs) will prevent startup rather than silently yielding an empty list. """ result: list[int] = [] raw_value = os.getenv(env_var, default) for item in raw_value.split(","): stripped = item.strip() if stripped: try: result.append(int(stripped)) except ValueError: msg = ( f"Invalid entry '{stripped}' in {env_var}='{raw_value}'. " f"All entries must be numeric IDs, not usernames or group names. " f"Example: {env_var}=0,1000" ) log.critical(msg) raise ValueError(msg) from None return result # These could be enforced via a PodSecurityPolicy, but those affect # all pods so the cluster admin would need to configure those for # all applications. prohibited_uids = _parse_prohibited_ids("EG_PROHIBITED_UIDS", "0") prohibited_gids = _parse_prohibited_ids("EG_PROHIBITED_GIDS", "0") mirror_working_dirs = bool(os.getenv("EG_MIRROR_WORKING_DIRS", "false").lower() == "true") # Get the globally-configured default images. Defaulting to None if not set. default_kernel_image = os.getenv("EG_KERNEL_IMAGE") default_kernel_executor_image = os.getenv("EG_KERNEL_EXECUTOR_IMAGE") class ContainerProcessProxy(RemoteProcessProxy): """ Kernel lifecycle management for container-based kernels. """ def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict): """Initialize the proxy.""" super().__init__(kernel_manager, proxy_config) self.container_name = "" self.assigned_node_ip = None def _determine_kernel_images(self, **kwargs: dict[str, Any] | None) -> None: """ Determine which kernel images to use. Initialize to any defined in the process proxy override that then let those provided by client via env override. """ kernel_image = self.proxy_config.get("image_name", default_kernel_image) self.kernel_image = kwargs["env"].get("KERNEL_IMAGE", kernel_image) if self.kernel_image is None: self.log_and_raise( http_status_code=500, reason="No kernel image could be determined! Set the `image_name` in the " "process_proxy.config stanza of the corresponding kernel.json file.", ) # If no default executor image is configured, default it to current image kernel_executor_image = self.proxy_config.get( "executor_image_name", default_kernel_executor_image or self.kernel_image ) self.kernel_executor_image = kwargs["env"].get( "KERNEL_EXECUTOR_IMAGE", kernel_executor_image ) async def launch_process( self, kernel_cmd: str, **kwargs: dict[str, Any] | None ) -> ContainerProcessProxy: """ Launches the specified process within the container environment. """ # Set env before superclass call so we see these in the debug output self._determine_kernel_images(**kwargs) kwargs["env"]["KERNEL_IMAGE"] = self.kernel_image kwargs["env"]["KERNEL_EXECUTOR_IMAGE"] = self.kernel_executor_image # If mirroring is not enabled, remove working directory if present if not mirror_working_dirs and "KERNEL_WORKING_DIR" in kwargs["env"]: del kwargs["env"]["KERNEL_WORKING_DIR"] self._enforce_prohibited_ids(**kwargs) await super().launch_process(kernel_cmd, **kwargs) self.local_proc = self.launch_kernel(kernel_cmd, **kwargs) self.pid = self.local_proc.pid self.ip = local_ip self.log.info( "{}: kernel launched. Kernel image: {}, KernelID: {}, cmd: '{}'".format( self.__class__.__name__, self.kernel_image, self.kernel_id, kernel_cmd ) ) await self.confirm_remote_startup() return self def _enforce_prohibited_ids(self, **kwargs: dict[str, Any] | None) -> None: """Determine UID and GID with which to launch container and ensure they are not prohibited.""" kernel_uid = kwargs["env"].get("KERNEL_UID", default_kernel_uid) kernel_gid = kwargs["env"].get("KERNEL_GID", default_kernel_gid) try: uid_int = int(kernel_uid) except (ValueError, TypeError): self.log_and_raise( http_status_code=403, reason=f"Invalid KERNEL_UID value '{kernel_uid}': not a valid integer!", ) try: gid_int = int(kernel_gid) except (ValueError, TypeError): self.log_and_raise( http_status_code=403, reason=f"Invalid KERNEL_GID value '{kernel_gid}': not a valid integer!", ) max_id = 4294967295 # uint32 max — Linux uid_t/gid_t upper bound if not (0 <= uid_int <= max_id): self.log_and_raise( http_status_code=403, reason=f"Invalid KERNEL_UID value '{kernel_uid}': must be in range 0-{max_id}!", ) if not (0 <= gid_int <= max_id): self.log_and_raise( http_status_code=403, reason=f"Invalid KERNEL_GID value '{kernel_gid}': must be in range 0-{max_id}!", ) if uid_int in prohibited_uids: self.log_and_raise( http_status_code=403, reason=f"Kernel's UID value of '{kernel_uid}' has been denied via EG_PROHIBITED_UIDS!", ) if gid_int in prohibited_gids: self.log_and_raise( http_status_code=403, reason=f"Kernel's GID value of '{kernel_gid}' has been denied via EG_PROHIBITED_GIDS!", ) # Ensure the kernel's env has normalized values kwargs["env"]["KERNEL_UID"] = str(uid_int) kwargs["env"]["KERNEL_GID"] = str(gid_int) def poll(self) -> bool | None: """Determines if container is still active. Submitting a new kernel to the container manager will take a while to be Running. Thus kernel ID will probably not be available immediately for poll. So will regard the container as active when no status is available or one of the initial phases. Returns ------- None if the container cannot be found or its in an initial state. Otherwise False. """ result = False container_status = self.get_container_status(None) # Do not check whether container_status is None # EG couldn't restart kernels although connections exists. # See https://github.com/jupyter-server/enterprise_gateway/issues/827 if container_status in self.get_initial_states(): result = None return result def send_signal(self, signum: int) -> bool | None: """Send signal `signum` to container. Parameters ---------- signum : int The signal number to send. Zero is used to determine heartbeat. """ if signum == 0: return self.poll() elif signum == signal.SIGKILL: return self.kill() else: # This is very likely an interrupt signal, so defer to the super class # which should use the communication port. return super().send_signal(signum) def kill(self) -> bool | None: """Kills a containerized kernel. Returns ------- None if the container is gracefully terminated, False otherwise. """ result = None if self.container_name: # We only have something to terminate if we have a name result = self.terminate_container_resources() return result def shutdown_listener(self): """Shut down the listener.""" super().shutdown_listener() if self.container_name: # We only have something to terminate if we have a name self.terminate_container_resources() async def confirm_remote_startup(self) -> None: """Confirms the container has started and returned necessary connection information.""" self.log.debug("Trying to confirm kernel container startup status") self.start_time = RemoteProcessProxy.get_current_time() i = 0 ready_to_connect = False # we're ready to connect when we have a connection file to use while not ready_to_connect: i += 1 await self.handle_timeout() container_status = self.get_container_status(i) if container_status: if container_status in self.get_error_states(): self.log_and_raise( http_status_code=500, reason=f"Error starting kernel container; status: '{container_status}'.", ) else: if self.assigned_host: ready_to_connect = await self.receive_connection_info() self.pid = ( 0 # We won't send process signals for kubernetes lifecycle management ) self.pgid = 0 else: self.detect_launch_failure() def get_process_info(self) -> dict[str, Any]: """Captures the base information necessary for kernel persistence relative to containers.""" process_info = super().get_process_info() process_info.update( { "assigned_node_ip": self.assigned_node_ip, } ) return process_info def load_process_info(self, process_info: dict[str, Any]) -> None: """Loads the base information necessary for kernel persistence relative to containers.""" super().load_process_info(process_info) self.assigned_node_ip = process_info["assigned_node_ip"] @abc.abstractmethod def get_initial_states(self): """Return list of states in lowercase indicating container is starting (includes running).""" raise NotImplementedError @abc.abstractmethod def get_error_states(self): """Returns the list of error states (in lowercase).""" raise NotImplementedError @abc.abstractmethod def get_container_status(self, iteration: int | None) -> str: """Returns the current container state (in lowercase) or the empty string if not available.""" raise NotImplementedError @abc.abstractmethod def terminate_container_resources(self): """Terminate any artifacts created on behalf of the container's lifetime.""" raise NotImplementedError ================================================ FILE: enterprise_gateway/services/processproxies/crd.py ================================================ """Code related to managing kernels running based on k8s custom resource.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import re from contextlib import suppress from typing import Any from kubernetes import client from ..kernels.remotemanager import RemoteKernelManager from .k8s import KubernetesProcessProxy class CustomResourceProcessProxy(KubernetesProcessProxy): """A custom resource process proxy.""" # Identifies the kind of object being managed by this process proxy. # For these values we will prefer the values found in the 'kind' field # of the object's metadata. This attribute is strictly used to provide # context to log messages. object_kind = "CustomResourceDefinition" def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict): """Initialize the proxy.""" super().__init__(kernel_manager, proxy_config) self.group = self.version = self.plural = None self.kernel_resource_name = None async def launch_process( self, kernel_cmd: str, **kwargs: dict[str, Any] | None ) -> CustomResourceProcessProxy: """Launch the process for a kernel.""" self.kernel_resource_name = self._determine_kernel_pod_name(**kwargs) kwargs["env"]["KERNEL_RESOURCE_NAME"] = self.kernel_resource_name kwargs["env"]["KERNEL_CRD_GROUP"] = self.group kwargs["env"]["KERNEL_CRD_VERSION"] = self.version kwargs["env"]["KERNEL_CRD_PLURAL"] = self.plural await super().launch_process(kernel_cmd, **kwargs) return self def get_container_status(self, iteration: int | None) -> str: """Determines submitted CRD application status Submitting a new kernel application CRD will take a while to reach the Running state and the submission can also fail due to malformation or other issues which will prevent the application pod to reach the desired Running state. This function check the CRD submission state and in case of success it then delegates to parent to check if the application pod is running. Returns ------- Empty string if the container cannot be found otherwise. The pod application status in case of success on Spark Operator side Or the retrieved spark operator submission status in other cases (e.g. Failed) """ application_state = "" with suppress(Exception): custom_resource = client.CustomObjectsApi().get_namespaced_custom_object( self.group, self.version, self.kernel_namespace, self.plural, self.kernel_resource_name, ) if custom_resource: application_state = custom_resource['status']['applicationState']['state'].lower() if application_state in self.get_error_states(): exception_text = self._get_exception_text( custom_resource['status']['applicationState']['errorMessage'] ) error_message = ( f"CRD submission for kernel {self.kernel_id} failed: {exception_text}" ) self.log.debug(error_message) elif application_state == "running" and not self.assigned_host: super().get_container_status(iteration) # only log if iteration is not None (otherwise poll() is too noisy) # check for running state to avoid double logging with superclass if iteration and application_state != "running": self.log.debug( f"{iteration}: Waiting from CRD status from resource manager {self.object_kind.lower()} in " f"namespace '{self.kernel_namespace}'. Name: '{self.kernel_resource_name}', " f"Status: '{application_state}', KernelID: '{self.kernel_id}'" ) return application_state def delete_managed_object(self, termination_stati: list[str]) -> bool: """Deletes the object managed by this process-proxy A return value of True indicates the object is considered deleted, otherwise a False or None value is returned. Note: the caller is responsible for handling exceptions. """ delete_status = client.CustomObjectsApi().delete_namespaced_custom_object( self.group, self.version, self.kernel_namespace, self.plural, self.kernel_resource_name, grace_period_seconds=0, propagation_policy="Background", ) result = delete_status and delete_status.get("status", None) in termination_stati return result def get_initial_states(self) -> set: """Return list of states in lowercase indicating container is starting (includes running).""" return ["submitted", "pending", "running"] def _get_exception_text(self, error_message): match = re.search(r'Exception\s*:\s*(.*)', error_message, re.MULTILINE) if match: error_message = match.group(1) return error_message ================================================ FILE: enterprise_gateway/services/processproxies/distributed.py ================================================ """Code used for the generic distribution of kernels across a set of hosts.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import asyncio import json import os import signal from socket import gethostbyname from subprocess import STDOUT from typing import Any, ClassVar from ..kernels.remotemanager import RemoteKernelManager from .processproxy import BaseProcessProxyABC, RemoteProcessProxy poll_interval = float(os.getenv("EG_POLL_INTERVAL", "0.5")) kernel_log_dir = os.getenv( "EG_KERNEL_LOG_DIR", "/tmp" # noqa ) # would prefer /var/log, but its only writable by root class TrackKernelOnHost: """A class for tracking a kernel on a host.""" _host_kernels: ClassVar = {} _kernel_host_mapping: ClassVar = {} def add_kernel_id(self, host: str, kernel_id: str) -> None: """Add a kernel to a host.""" self._kernel_host_mapping[kernel_id] = host self.increment(host) def delete_kernel_id(self, kernel_id: str) -> None: """Delete a kernel id from tracking.""" host = self._kernel_host_mapping.get(kernel_id) if host: self.decrement(host) del self._kernel_host_mapping[kernel_id] def min_or_remote_host(self, remote_host: str | None = None) -> str: """Return the remote host if given, or the kernel with the min value.""" if remote_host: return remote_host return min(self._host_kernels, key=lambda k: self._host_kernels[k]) def increment(self, host: str) -> None: """Increment the value for a host.""" val = int(self._host_kernels.get(host, 0)) self._host_kernels[host] = val + 1 def decrement(self, host: str) -> None: """Decrement the value for a host.""" val = int(self._host_kernels.get(host, 0)) self._host_kernels[host] = val - 1 def init_host_kernels(self, hosts) -> None: """Inititialize the kernels for a set of hosts.""" if len(self._host_kernels) == 0: self._host_kernels.update({key: 0 for key in hosts}) class DistributedProcessProxy(RemoteProcessProxy): """ Manages the lifecycle of kernels distributed across a set of hosts. """ host_index = 0 kernel_on_host = TrackKernelOnHost() def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict): """Initialize the proxy.""" super().__init__(kernel_manager, proxy_config) self.kernel_log = None self.local_stdout = None self.least_connection = kernel_manager.load_balancing_algorithm == "least-connection" if proxy_config.get("remote_hosts"): self.hosts = proxy_config.get("remote_hosts").split(",") else: self.hosts = kernel_manager.remote_hosts # from command line or env if self.least_connection: DistributedProcessProxy.kernel_on_host.init_host_kernels(self.hosts) async def launch_process( self, kernel_cmd: str, **kwargs: dict[str, Any] | None ) -> DistributedProcessProxy: """ Launches a kernel process on a selected host. """ env_dict = kwargs.get("env") await super().launch_process(kernel_cmd, **kwargs) self.assigned_host = self._determine_next_host(env_dict) self.ip = gethostbyname(self.assigned_host) # convert to ip if host is provided self.assigned_ip = self.ip try: result_pid = self._launch_remote_process(kernel_cmd, **kwargs) self.pid = int(result_pid) except Exception as e: error_message = "Failure occurred starting kernel on '{}'. Returned result: {}".format( self.ip, e ) self.log_and_raise(http_status_code=500, reason=error_message) self.log.info( "Kernel launched on '{}', pid: {}, ID: {}, Log file: {}:{}, Command: '{}'. ".format( self.assigned_host, self.pid, self.kernel_id, self.assigned_host, self.kernel_log, kernel_cmd, ) ) await self.confirm_remote_startup() return self def _launch_remote_process(self, kernel_cmd: str, **kwargs: dict[str, Any] | None) -> str: """ Launch the kernel as indicated by the argv stanza in the kernelspec. Note that this method will bypass use of ssh if the remote host is also the local machine. """ cmd = self._build_startup_command(kernel_cmd, **kwargs) self.log.debug(f"Invoking cmd: '{cmd}' on host: {self.assigned_host}") result_pid = "bad_pid" # purposely initialize to bad int value if BaseProcessProxyABC.ip_is_local(self.ip): # launch the local command with redirection in place self.local_stdout = open(self.kernel_log, mode="a") # noqa self.local_proc = self.launch_kernel( cmd, stdout=self.local_stdout, stderr=STDOUT, **kwargs ) result_pid = str(self.local_proc.pid) else: # launch remote command via ssh result = self.rsh(self.ip, cmd) for line in result: result_pid = line.strip() return result_pid def _build_startup_command(self, argv_cmd: str, **kwargs: dict[str, Any] | None) -> str: """ Builds the command to invoke by concatenating envs from kernelspec followed by the kernel argvs. We also force nohup, redirection to a file and place in background, then follow with an echo for the background pid. Note: We optimize for the local case and just return the existing command. """ # Optimized case needs to also redirect the kernel output, so unconditionally compose kernel_log env_dict = kwargs["env"] kid = env_dict.get("KERNEL_ID") self.kernel_log = os.path.join(kernel_log_dir, f"kernel-{kid}.log") if BaseProcessProxyABC.ip_is_local(self.ip): # We're local so just use what we're given cmd = argv_cmd else: # Add additional envs, including those in kernelspec cmd = "" for key, value in env_dict.items(): cmd += "export {}={};".format(key, json.dumps(value).replace("'", "''")) for key, value in self.kernel_manager.kernel_spec.env.items(): cmd += "export {}={};".format(key, json.dumps(value).replace("'", "''")) cmd += "nohup" for arg in argv_cmd: cmd += f" {arg}" cmd += f" >> {self.kernel_log} 2>&1 & echo $!" # return the process id return cmd def _determine_next_host(self, env_dict: dict) -> str: """Simple round-robin index into list of hosts or use least-connection .""" remote_host = env_dict.get("KERNEL_REMOTE_HOST") if self.least_connection: next_host = DistributedProcessProxy.kernel_on_host.min_or_remote_host(remote_host) DistributedProcessProxy.kernel_on_host.add_kernel_id(next_host, self.kernel_id) else: next_host = ( remote_host if remote_host else self.hosts[DistributedProcessProxy.host_index % self.hosts.__len__()] ) DistributedProcessProxy.host_index += 1 return next_host def _unregister_assigned_host(self) -> None: if self.least_connection: DistributedProcessProxy.kernel_on_host.delete_kernel_id(self.kernel_id) async def confirm_remote_startup(self) -> None: """Confirms the remote kernel has started by obtaining connection information from the remote host.""" self.start_time = RemoteProcessProxy.get_current_time() i = 0 ready_to_connect = False # we're ready to connect when we have a connection file to use while not ready_to_connect: i += 1 await self.handle_timeout() self.log.debug( "{}: Waiting to connect. Host: '{}', KernelID: '{}'".format( i, self.assigned_host, self.kernel_id ) ) if self.assigned_host: ready_to_connect = await self.receive_connection_info() async def handle_timeout(self) -> None: """Checks to see if the kernel launch timeout has been exceeded while awaiting connection info.""" await asyncio.sleep(poll_interval) time_interval = RemoteProcessProxy.get_time_diff( self.start_time, RemoteProcessProxy.get_current_time() ) if time_interval > self.kernel_launch_timeout: reason = ( "Waited too long ({}s) to get connection file. Check Enterprise Gateway log and kernel " "log ({}:{}) for more information.".format( self.kernel_launch_timeout, self.assigned_host, self.kernel_log ) ) timeout_message = f"KernelID: '{self.kernel_id}' launch timeout due to: {reason}" await asyncio.get_event_loop().run_in_executor(None, self.kill) self.log_and_raise(http_status_code=500, reason=timeout_message) def cleanup(self) -> None: """Clean up the proxy.""" # DistributedProcessProxy can have a tendency to leave zombies, particularly when EG is # abruptly terminated. This extra call to shutdown_lister does the trick. self.shutdown_listener() self._unregister_assigned_host() if self.local_stdout: self.local_stdout.close() self.local_stdout = None super().cleanup() def shutdown_listener(self) -> None: """Ensure that kernel process is terminated.""" self.send_signal(signal.SIGTERM) super().shutdown_listener() ================================================ FILE: enterprise_gateway/services/processproxies/docker_swarm.py ================================================ """Code related to managing kernels running in docker-based containers.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import logging import os from typing import Any from docker.client import DockerClient from docker.errors import NotFound from docker.models.containers import Container from docker.models.services import Service # Debug logging level of docker produces too much noise - raise to info by default. from ..kernels.remotemanager import RemoteKernelManager from .container import ContainerProcessProxy logging.getLogger("urllib3.connectionpool").setLevel( os.environ.get("EG_DOCKER_LOG_LEVEL", logging.WARNING) ) docker_network = os.environ.get("EG_DOCKER_NETWORK", "bridge") client = DockerClient.from_env() class DockerSwarmProcessProxy(ContainerProcessProxy): """ Kernel lifecycle management for kernels in Docker Swarm. """ def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict): """Initialize the proxy.""" super().__init__(kernel_manager, proxy_config) def launch_process( self, kernel_cmd: str, **kwargs: dict[str, Any] | None ) -> DockerSwarmProcessProxy: """ Launches the specified process within a Docker Swarm environment. """ # Convey the network to the docker launch script kwargs["env"]["EG_DOCKER_NETWORK"] = docker_network kwargs["env"]["EG_DOCKER_MODE"] = "swarm" return super().launch_process(kernel_cmd, **kwargs) def get_initial_states(self) -> set: """Return list of states in lowercase indicating container is starting (includes running).""" return {"preparing", "starting", "running"} def get_error_states(self) -> set: """Returns the list of error states indicating container is shutting down or receiving error.""" return {"failed", "rejected", "complete", "shutdown", "orphaned", "remove"} def _get_service(self) -> Service: # Fetches the service object corresponding to the kernel with a matching label. service = None services = client.services.list(filters={"label": "kernel_id=" + self.kernel_id}) num_services = len(services) if num_services != 1: if num_services > 1: msg = "{}: Found more than one service ({}) for kernel_id '{}'!".format( self.__class__.__name__, num_services, self.kernel_id ) raise RuntimeError(msg) else: service = services[0] self.container_name = service.name return service def _get_task(self) -> dict: # Fetches the task object corresponding to the service associated with the kernel. We only ask for the # current task with desired-state == running. This eliminates failed states. task = None service = self._get_service() if service: tasks = service.tasks(filters={"desired-state": "running"}) num_tasks = len(tasks) if num_tasks != 1: if num_tasks > 1: msg = "{}: Found more than one task ({}) for service '{}', kernel_id '{}'!".format( self.__class__.__name__, num_tasks, service.name, self.kernel_id ) raise RuntimeError(msg) else: task = tasks[0] return task def get_container_status(self, iteration: int | None) -> str: """Return current container state.""" # Locates the kernel container using the kernel_id filter. If the status indicates an initial state we # should be able to get at the NetworksAttachments and determine the associated container's IP address. task_state = "" task_id = None task = self._get_task() if task: task_status = task["Status"] task_id = task["ID"] if task_status: task_state = task_status["State"].lower() if ( not self.assigned_host and task_state == "running" ): # in self.get_initial_states() # get the NetworkAttachments and pick out the first of the Network and first networks_attachments = task["NetworksAttachments"] if len(networks_attachments) > 0: address = networks_attachments[0]["Addresses"][0] ip = address.split("/")[0] self.assigned_ip = ip self.assigned_host = self.container_name if iteration: # only log if iteration is not None (otherwise poll() is too noisy) self.log.debug( "{}: Waiting to connect to docker container. " "Name: '{}', Status: '{}', IPAddress: '{}', KernelID: '{}', TaskID: '{}'".format( iteration, self.container_name, task_state, self.assigned_ip, self.kernel_id, task_id, ) ) return task_state def terminate_container_resources(self) -> bool | None: """Terminate any artifacts created on behalf of the container's lifetime.""" # Remove the docker service. result = True # We'll be optimistic service = self._get_service() if service: try: service.remove() # Service still exists, attempt removal except Exception as err: self.log.debug( "{} Termination of service: {} raised exception: {}".format( self.__class__.__name__, service.name, err ) ) if isinstance(err, NotFound): pass # okay if its not found else: result = False self.log.warning(f"Error occurred removing service: {err}") if result: self.log.debug( "{}.terminate_container_resources, service {}, kernel ID: {} has been terminated.".format( self.__class__.__name__, self.container_name, self.kernel_id ) ) self.container_name = None result = None # maintain jupyter contract else: self.log.warning( "{}.terminate_container_resources, container {}, kernel ID: {} has not been terminated.".format( self.__class__.__name__, self.container_name, self.kernel_id ) ) return result class DockerProcessProxy(ContainerProcessProxy): """Kernel lifecycle management for Docker kernels (non-Swarm).""" def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict): """Initialize the proxy.""" super().__init__(kernel_manager, proxy_config) def launch_process( self, kernel_cmd: str, **kwargs: dict[str, Any] | None ) -> DockerProcessProxy: """Launches the specified process within a Docker environment.""" # Convey the network to the docker launch script kwargs["env"]["EG_DOCKER_NETWORK"] = docker_network kwargs["env"]["EG_DOCKER_MODE"] = "docker" return super().launch_process(kernel_cmd, **kwargs) def get_initial_states(self) -> set: """Return list of states in lowercase indicating container is starting (includes running).""" return {"created", "running"} def get_error_states(self) -> set: """Returns the list of error states indicating container is shutting down or receiving error.""" return {"restarting", "removing", "paused", "exited", "dead"} def _get_container(self) -> Container: # Fetches the container object corresponding the the kernel_id label. # Only used when docker mode == regular (not swarm) container = None containers = client.containers.list(filters={"label": "kernel_id=" + self.kernel_id}) num_containers = len(containers) if num_containers != 1: if num_containers > 1: msg = "{}: Found more than one container ({}) for kernel_id '{}'!".format( self.__class__.__name__, num_containers, self.kernel_id ) raise RuntimeError(msg) else: container = containers[0] return container def get_container_status(self, iteration: int | None) -> str: """Return current container state.""" # Locates the kernel container using the kernel_id filter. If the phase indicates Running, the pod's IP # is used for the assigned_ip. Only used when docker mode == regular (non swarm) container_status = "" container = self._get_container() if container: self.container_name = container.name if container.status: container_status = container.status.lower() if container_status == "running" and not self.assigned_host: # Container is running, capture IP # we'll use this as a fallback in case we don't find our network self.assigned_ip = container.attrs.get("NetworkSettings").get("IPAddress") networks = container.attrs.get("NetworkSettings").get("Networks") if len(networks) > 0: self.assigned_ip = networks.get(docker_network).get("IPAddress") self.log.debug( "Using assigned_ip {} from docker network '{}'.".format( self.assigned_ip, docker_network ) ) else: self.log.warning( "Docker network '{}' could not be located in container attributes - " "using assigned_ip '{}'.".format(docker_network, self.assigned_ip) ) self.assigned_host = self.container_name if iteration: # only log if iteration is not None (otherwise poll() is too noisy) self.log.debug( "{}: Waiting to connect to docker container. " "Name: '{}', Status: '{}', IPAddress: '{}', KernelID: '{}'".format( iteration, self.container_name, container_status, self.assigned_ip, self.kernel_id, ) ) return container_status def terminate_container_resources(self) -> bool | None: """Terminate any artifacts created on behalf of the container's lifetime.""" # Remove the container result = True # Since we run containers with remove=True, we'll be optimistic container = self._get_container() if container: try: container.remove(force=True) # Container still exists, attempt forced removal except Exception as err: self.log.debug( f"Container termination for container: {container.name} raised exception: {err}" ) if isinstance(err, NotFound): pass # okay if its not found else: result = False self.log.warning(f"Error occurred removing container: {err}") if result: self.log.debug( "{}.terminate_container_resources, container {}, kernel ID: {} has been terminated.".format( self.__class__.__name__, self.container_name, self.kernel_id ) ) self.container_name = None result = None # maintain jupyter contract else: self.log.warning( "{}.terminate_container_resources, container {}, kernel ID: {} has not been terminated.".format( self.__class__.__name__, self.container_name, self.kernel_id ) ) return result ================================================ FILE: enterprise_gateway/services/processproxies/k8s.py ================================================ """Code related to managing kernels running in Kubernetes clusters.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import logging import os import re from typing import Any import kubernetes import urllib3 from kubernetes import client, config from ..kernels.remotemanager import RemoteKernelManager from ..sessions.kernelsessionmanager import KernelSessionManager from .container import ContainerProcessProxy urllib3.disable_warnings() # Default logging level of kubernetes produces too much noise - raise to warning only. logging.getLogger("kubernetes").setLevel(os.environ.get("EG_KUBERNETES_LOG_LEVEL", logging.WARNING)) enterprise_gateway_namespace = os.environ.get("EG_NAMESPACE", "default") default_kernel_service_account_name = os.environ.get( "EG_DEFAULT_KERNEL_SERVICE_ACCOUNT_NAME", "default" ) kernel_cluster_role = os.environ.get("EG_KERNEL_CLUSTER_ROLE", "cluster-admin") share_gateway_namespace = bool(os.environ.get("EG_SHARED_NAMESPACE", "False").lower() == "true") kpt_dir = os.environ.get("EG_POD_TEMPLATE_DIR", "/tmp") # noqa config.load_incluster_config() def get_subject_class(): """ Returns the appropriate Subject class based on the kubernetes client version. In kubernetes-client, V1Subject was renamed to RbacV1Subject. This function returns the appropriate class based on the installed version. """ # Check if V1Subject exists in the client if hasattr(client, 'V1Subject'): logging.debug( "Using client.V1Subject for Kubernetes client version: %s", kubernetes.__version__ ) return client.V1Subject # Fall back to RbacV1Subject for older versions logging.debug( "Using client.RbacV1Subject for Kubernetes client version: %s", kubernetes.__version__ ) return client.RbacV1Subject class KubernetesProcessProxy(ContainerProcessProxy): """ Kernel lifecycle management for Kubernetes kernels. """ # Identifies the kind of object being managed by this process proxy. # For these values we will prefer the values found in the 'kind' field # of the object's metadata. This attribute is strictly used to provide # context to log messages. object_kind = "Pod" def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict): """Initialize the proxy.""" super().__init__(kernel_manager, proxy_config) self.kernel_pod_name = None self.kernel_namespace = None self.delete_kernel_namespace = False async def launch_process( self, kernel_cmd: str, **kwargs: dict[str, Any] | None ) -> KubernetesProcessProxy: """Launches the specified process within a Kubernetes environment.""" # Set env before superclass call, so we can see these in the debug output # Kubernetes relies on internal env variables to determine its configuration. When # running within a K8s cluster, these start with KUBERNETES_SERVICE, otherwise look # for envs prefixed with KUBECONFIG. for key in os.environ: if key.startswith("KUBECONFIG") or key.startswith("KUBERNETES_SERVICE"): kwargs["env"][key] = os.environ[key] # Determine pod name and namespace - creating the latter if necessary self.kernel_pod_name = self._determine_kernel_pod_name(**kwargs) self.kernel_namespace = self._determine_kernel_namespace(**kwargs) await super().launch_process(kernel_cmd, **kwargs) return self def get_initial_states(self) -> set: """Return list of states in lowercase indicating container is starting (includes running).""" return ["pending", "running"] def get_error_states(self) -> set: """Return list of states in lowercase indicating container failed .""" return ["failed"] def get_container_status(self, iteration: int | None) -> str: """Return current container state.""" # Locates the kernel pod using the kernel_id selector. If the phase indicates Running, the pod's IP # is used for the assigned_ip. pod_status = "" kernel_label_selector = "kernel_id=" + self.kernel_id + ",component=kernel" ret = client.CoreV1Api().list_namespaced_pod( namespace=self.kernel_namespace, label_selector=kernel_label_selector ) if ret and ret.items: pod_info = ret.items[0] self.container_name = pod_info.metadata.name if pod_info.status: pod_status = pod_info.status.phase.lower() if pod_status == "running" and not self.assigned_host: # Pod is running, capture IP self.assigned_ip = pod_info.status.pod_ip self.assigned_host = self.container_name self.assigned_node_ip = pod_info.status.host_ip if iteration: # only log if iteration is not None (otherwise poll() is too noisy) self.log.debug( f"{iteration}: Waiting to connect to k8s {self.object_kind.lower()} in " f"namespace '{self.kernel_namespace}'. Name: '{self.container_name}', " f"Status: '{pod_status}', Pod IP: '{self.assigned_ip}', KernelID: '{self.kernel_id}'" ) return pod_status def delete_managed_object(self, termination_stati: list[str]) -> bool: """Deletes the object managed by this process-proxy A return value of True indicates the object is considered deleted, otherwise a False or None value is returned. Note: the caller is responsible for handling exceptions. """ body = client.V1DeleteOptions(grace_period_seconds=0, propagation_policy="Background") # Deleting a Pod will return a v1.Pod if found and its status will be a PodStatus containing # a phase string property # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#podstatus-v1-core v1_pod = client.CoreV1Api().delete_namespaced_pod( namespace=self.kernel_namespace, body=body, name=self.container_name ) status = None if v1_pod and v1_pod.status: status = v1_pod.status.phase result = status in termination_stati return result def terminate_container_resources(self) -> bool | None: """Terminate any artifacts created on behalf of the container's lifetime.""" # Kubernetes objects don't go away on their own - so we need to tear down the namespace # and/or pod associated with the kernel. We'll always target the pod first so that shutdown # is perceived as happening more rapidly. Then, if we created the namespace, and we're not # in the process of restarting the kernel, we'll delete the namespace. # After deleting the pod we check the container status, rather than the status returned # from the pod deletion API, since it's not necessarily reflective of the actual status. result = False termination_stati = ["Succeeded", "Failed", "Terminating", "Success"] # Delete the managed object then, if applicable, the namespace object_type = self.object_kind try: result = self.delete_managed_object(termination_stati) if not result: # If the status indicates the object is not terminated, capture its current status. # If None, update the result to True, else issue warning that it is not YET deleted # since we still have the hard termination sequence to occur. cur_status = self.get_container_status(None) if cur_status is None: result = True else: self.log.warning( f"{object_type} '{self.kernel_namespace}.{self.container_name}'" f" is not yet deleted. Current status is '{cur_status}'." ) if self.delete_kernel_namespace and not self.kernel_manager.restarting: object_type = "Namespace" # Status is a return value for calls that don't return other objects. # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#status-v1-meta body = client.V1DeleteOptions( grace_period_seconds=0, propagation_policy="Background" ) v1_status = client.CoreV1Api().delete_namespace( name=self.kernel_namespace, body=body ) status = None if v1_status: status = v1_status.status if status and any(s in status for s in termination_stati): result = True if not result: self.log.warning( f"Namespace {self.kernel_namespace} is not yet deleted. " f"Current status is '{status}'." ) except Exception as err: if isinstance(err, client.rest.ApiException) and err.status == 404: result = True # okay if it's not found else: self.log.warning(f"Error occurred deleting {object_type.lower()}: {err}") if result: self.log.debug( f"KubernetesProcessProxy.terminate_container_resources, " f"{self.object_kind}: {self.kernel_namespace}.{self.container_name}, " f"kernel ID: {self.kernel_id} has been terminated." ) self.container_name = None result = None # maintain jupyter contract else: self.log.warning( "KubernetesProcessProxy.terminate_container_resources, " f"{self.object_kind}: {self.kernel_namespace}.{self.container_name}, " f"kernel ID: {self.kernel_id} has not been terminated." ) # Check if there's a kernel pod template file for this kernel and silently delete it. kpt_file = kpt_dir + "/kpt_" + self.kernel_id try: os.remove(kpt_file) except OSError: pass return result def _safe_template_substitute(self, template_str: str, variables: dict) -> str | None: """ Safely substitute variables in Jinja2-style template syntax. Only supports simple variable substitution: {{ variable_name }} Logs missing variables and returns None if any are missing. """ # Pattern to match {{ variable_name }} with optional whitespace # Explicitly exclude variables starting with underscore to prevent magic method attacks pattern = r'\{\{\s*([a-zA-Z][a-zA-Z0-9_]*)\s*\}\}' missing_vars = [] def replace_var(match): var_name = match.group(1) if var_name in variables: return str(variables[var_name]) else: missing_vars.append(var_name) return match.group(0) # Keep original placeholder result = re.sub(pattern, replace_var, template_str) # Check if there are any remaining {{ }} patterns that didn't match our simple pattern # This catches malicious templates like {{ foo.__class__ }} or {{ 1+1 }} if '{{' in result and '}}' in result: self.log.warning( "Invalid template syntax detected in KERNEL_POD_NAME: contains unsupported expressions" ) return None # Log missing variables and return None if any are missing if missing_vars: self.log.warning(f"Template variables not found in KERNEL_POD_NAME: {missing_vars}") return None # Signal caller to use default return result def _determine_kernel_pod_name(self, **kwargs: dict[str, Any] | None) -> str: pod_name = kwargs["env"].get("KERNEL_POD_NAME") if pod_name is None: pod_name = KernelSessionManager.get_kernel_username(**kwargs) + "-" + self.kernel_id else: self.log.debug(f"Processing KERNEL_POD_NAME based on env var => {pod_name}") if "{{" in pod_name and "}}" in pod_name: self.log.debug("Processing KERNEL_POD_NAME template variables") keywords = {} for name, value in kwargs["env"].items(): if name.startswith("KERNEL_"): keywords[name.lower()] = value keywords["kernel_id"] = self.kernel_id # Safe template substitution with fallback substituted = self._safe_template_substitute(pod_name, keywords) if substituted is None: # Fall back to default if template variables are missing self.log.warning( "Falling back to default pod name due to missing template variables" ) pod_name = ( KernelSessionManager.get_kernel_username(**kwargs) + "-" + self.kernel_id ) else: pod_name = substituted # Rewrite pod_name to be compatible with DNS name convention # And put back into env since kernel needs this pod_name = re.sub("[^0-9a-z]+", "-", pod_name.lower()) while pod_name.startswith("-"): pod_name = pod_name[1:] while pod_name.endswith("-"): pod_name = pod_name[:-1] kwargs["env"]["KERNEL_POD_NAME"] = pod_name return pod_name def _determine_kernel_namespace(self, **kwargs: dict[str, Any] | None) -> str: # Since we need the service account name regardless of whether we're creating the namespace or not, # get it now. service_account_name = KubernetesProcessProxy._determine_kernel_service_account_name( **kwargs ) # If KERNEL_NAMESPACE was provided, then we assume it already exists. If not provided, then we'll # create the namespace and record that we'll want to delete it as well. namespace = kwargs["env"].get("KERNEL_NAMESPACE") if namespace is None: # check if share gateway namespace is configured... if share_gateway_namespace: # if so, set to EG namespace namespace = enterprise_gateway_namespace self.log.warning( "Shared namespace has been configured. All kernels will reside in EG namespace: {}".format( namespace ) ) else: namespace = self._create_kernel_namespace(service_account_name) kwargs["env"]["KERNEL_NAMESPACE"] = namespace # record in env since kernel needs this else: self.log.info(f"KERNEL_NAMESPACE provided by client: {namespace}") return namespace @staticmethod def _determine_kernel_service_account_name(**kwargs: dict[str, Any] | None) -> str: # Check if an account name was provided. If not, set to the default name (which can be set # from the EG env as well). Finally, ensure the env value is set. service_account_name = kwargs["env"].get( "KERNEL_SERVICE_ACCOUNT_NAME", default_kernel_service_account_name ) kwargs["env"]["KERNEL_SERVICE_ACCOUNT_NAME"] = service_account_name return service_account_name def _create_kernel_namespace(self, service_account_name: str) -> str: # Creates the namespace for the kernel based on the kernel username and kernel id. Since we're creating # the namespace, we'll also note that it should be deleted as well. In addition, the kernel pod may need # to list/create other pods (true for spark-on-k8s), so we'll also create a RoleBinding associated with # the namespace's default ServiceAccount. Since this is always done when creating a namespace, we can # delete the RoleBinding when deleting the namespace (no need to record that via another member variable). namespace = self.kernel_pod_name # create the namespace ... labels = {"app": "enterprise-gateway", "component": "kernel", "kernel_id": self.kernel_id} namespace_metadata = client.V1ObjectMeta(name=namespace, labels=labels) body = client.V1Namespace(metadata=namespace_metadata) # create the namespace try: client.CoreV1Api().create_namespace(body=body) self.delete_kernel_namespace = True self.log.info(f"Created kernel namespace: {namespace}") # Now create a RoleBinding for this namespace for the default ServiceAccount. We'll reference # the ClusterRole, but that will only be applied for this namespace. This prevents the need for # creating a role each time. self._create_role_binding(namespace, service_account_name) except Exception as err: if ( isinstance(err, client.rest.ApiException) and err.status == 409 and self.kernel_manager.restarting ): self.delete_kernel_namespace = ( True # okay if ns already exists and restarting, still mark for delete ) self.log.info(f"Re-using kernel namespace: {namespace}") else: if self.delete_kernel_namespace: reason = "Error occurred creating role binding for namespace '{}': {}".format( namespace, err ) # delete the namespace since we'll be using the EG namespace... body = client.V1DeleteOptions( grace_period_seconds=0, propagation_policy="Background" ) client.CoreV1Api().delete_namespace(name=namespace, body=body) self.log.warning(f"Deleted kernel namespace: {namespace}") else: reason = f"Error occurred creating namespace '{namespace}': {err}" self.log_and_raise(http_status_code=500, reason=reason) return namespace def _create_role_binding(self, namespace: str, service_account_name: str) -> None: # Creates RoleBinding instance for the given namespace. The role used will be the ClusterRole named by # EG_KERNEL_CLUSTER_ROLE. # Note that roles referenced in RoleBindings are scoped to the namespace so re-using the cluster role prevents # the need for creating a new role with each kernel. # The ClusterRole will be bound to the kernel service user identified by KERNEL_SERVICE_ACCOUNT_NAME then # EG_DEFAULT_KERNEL_SERVICE_ACCOUNT_NAME, respectively. # We will not use a try/except clause here since _create_kernel_namespace will handle exceptions. role_binding_name = kernel_cluster_role # use same name for binding as cluster role labels = {"app": "enterprise-gateway", "component": "kernel", "kernel_id": self.kernel_id} binding_metadata = client.V1ObjectMeta(name=role_binding_name, labels=labels) binding_role_ref = client.V1RoleRef( api_group="", kind="ClusterRole", name=kernel_cluster_role ) # Use the appropriate Subject class based on kubernetes client version SubjectClass = get_subject_class() binding_subjects = SubjectClass( api_group="", kind="ServiceAccount", name=service_account_name, namespace=namespace ) body = client.V1RoleBinding( kind="RoleBinding", metadata=binding_metadata, role_ref=binding_role_ref, subjects=[binding_subjects], ) client.RbacAuthorizationV1Api().create_namespaced_role_binding( namespace=namespace, body=body ) self.log.info( "Created kernel role-binding '{}' in namespace: {} for service account: {}".format( role_binding_name, namespace, service_account_name ) ) def get_process_info(self) -> dict[str, Any]: """Captures the base information necessary for kernel persistence relative to kubernetes.""" process_info = super().get_process_info() process_info.update( {"kernel_ns": self.kernel_namespace, "delete_ns": self.delete_kernel_namespace} ) return process_info def load_process_info(self, process_info: dict[str, Any]) -> None: """Loads the base information necessary for kernel persistence relative to kubernetes.""" super().load_process_info(process_info) self.kernel_namespace = process_info["kernel_ns"] self.delete_kernel_namespace = process_info["delete_ns"] ================================================ FILE: enterprise_gateway/services/processproxies/processproxy.py ================================================ """Kernel managers that operate against a remote process.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import abc import asyncio import base64 import errno import getpass import json import logging import os import random import re import signal import subprocess import sys import time import warnings from calendar import timegm from enum import Enum from socket import ( AF_INET, SHUT_RDWR, SHUT_WR, SO_REUSEADDR, SOCK_STREAM, SOL_SOCKET, gethostbyname, gethostname, socket, timeout, ) from typing import Any import paramiko import pexpect from Cryptodome.Cipher import AES, PKCS1_v1_5 from Cryptodome.PublicKey import RSA from Cryptodome.Util.Padding import unpad from jupyter_client import launch_kernel, localinterfaces from jupyter_server import _tz from jupyter_server.serverapp import random_ports from paramiko.client import SSHClient from tornado import web from tornado.ioloop import PeriodicCallback from traitlets.config import SingletonConfigurable from zmq.ssh import tunnel from ..sessions.kernelsessionmanager import KernelSessionManager # Default logging level of paramiko produces too much noise - raise to warning only. logging.getLogger("paramiko").setLevel(os.getenv("EG_SSH_LOG_LEVEL", logging.WARNING)) # Pop certain env variables that don't need to be logged, e.g. remote_pwd env_pop_list = ["EG_REMOTE_PWD", "LS_COLORS"] # Comma separated list of env variables that shouldn't be logged sensitive_env_keys = os.getenv("EG_SENSITIVE_ENV_KEYS", "").lower().split(",") redaction_mask = os.getenv("EG_REDACTION_MASK", "********") default_kernel_launch_timeout = float(os.getenv("EG_KERNEL_LAUNCH_TIMEOUT", "30")) max_poll_attempts = int(os.getenv("EG_MAX_POLL_ATTEMPTS", "10")) poll_interval = float(os.getenv("EG_POLL_INTERVAL", "0.5")) socket_timeout = float(os.getenv("EG_SOCKET_TIMEOUT", "0.005")) tunneling_enabled = bool(os.getenv("EG_ENABLE_TUNNELING", "False").lower() == "true") ssh_port = int(os.getenv("EG_SSH_PORT", "22")) eg_response_ip = os.getenv("EG_RESPONSE_IP", None) desired_response_port = int(os.getenv("EG_RESPONSE_PORT", 8877)) response_port_retries = int(os.getenv("EG_RESPONSE_PORT_RETRIES", 10)) response_addr_any = bool(os.getenv("EG_RESPONSE_ADDR_ANY", "False").lower() == "true") connection_interval = ( poll_interval / 100.0 ) # already polling, so make connection timeout a fraction of outer poll # Minimum port range size and max retries min_port_range_size = int(os.getenv("EG_MIN_PORT_RANGE_SIZE", "1000")) max_port_range_retries = int(os.getenv("EG_MAX_PORT_RANGE_RETRIES", "5")) # Number of seconds in 100 years as the max keep-alive interval value. max_keep_alive_interval = 100 * 365 * 24 * 60 * 60 # Allow users to specify local ips (regular expressions can be used) that should not be included # when determining the response address. For example, on systems with many network interfaces, # some may have their IPs appear the local interfaces list (e.g., docker's 172.17.0.* is an example) # that should not be used. This env can be used to indicate such IPs. prohibited_local_ips = os.getenv("EG_PROHIBITED_LOCAL_IPS", "").split(",") def _get_local_ip() -> str: """ Honor the prohibited IPs, locating the first not in the list. """ for ip in localinterfaces.public_ips(): is_prohibited = False for prohibited_ip in prohibited_local_ips: # exhaust prohibited list, applying regexs if re.match(prohibited_ip, ip): is_prohibited = True break if not is_prohibited: return ip return localinterfaces.public_ips()[0] # all were prohibited, so go with the first local_ip = _get_local_ip() random.seed() class KernelChannel(Enum): """ Enumeration used to better manage tunneling """ SHELL = "SHELL" IOPUB = "IOPUB" STDIN = "STDIN" HEARTBEAT = "HB" CONTROL = "CONTROL" COMMUNICATION = ( "EG_COMM" # Optional channel for remote launcher to issue interrupts - NOT a ZMQ channel ) class Response(asyncio.Event): """Combines the event behavior with the kernel launch response.""" _response = None @property def response(self): return self._response @response.setter def response(self, value): """Set the response. NOTE: this marks the event as set.""" self._response = value self.set() class ResponseManager(SingletonConfigurable): """Singleton that manages the responses from each kernel launcher at startup. This singleton does the following: 1. Acquires a public and private RSA key pair at first use to encrypt and decrypt the received responses. The public key is sent to the launcher during startup and is used by the launcher to encrypt the AES key the launcher uses to encrypt the connection information, while the private key remains in the server and is used to decrypt the AES key from the response - which it then uses to decrypt the connection information. 2. Creates a single socket based on the configuration settings that is listened on via a periodic callback. 3. On receipt, it decrypts the response (key then connection info) and posts the response payload to a map identified by the kernel_id embedded in the response. 4. Provides a wait mechanism for callers to poll to get their connection info based on their registration (of kernel_id). """ KEY_SIZE = 1024 # Can be small since it's only used to {en,de}crypt the AES key. _instance = None def __init__(self, **kwargs: dict[str, Any] | None): """Initialize the manager.""" super().__init__(**kwargs) self._response_ip = None self._response_port = None self._response_socket = None self._connection_processor = None # Create encryption keys... self._private_key = RSA.generate(ResponseManager.KEY_SIZE) self._public_key = self._private_key.publickey() self._public_pem = self._public_key.export_key("PEM") # Event facility... self._response_registry = {} # Start the response manager (create socket, periodic callback, etc.) ... self._start_response_manager() @property def public_key(self) -> str: """Provides the string-form of public key PEM with header/footer/newlines stripped.""" return ( self._public_pem.decode() .replace("-----BEGIN PUBLIC KEY-----", "") .replace("-----END PUBLIC KEY-----", "") .replace("\n", "") ) @property def response_address(self) -> str: return self._response_ip + ":" + str(self._response_port) def register_event(self, kernel_id: str) -> None: """Register kernel_id so its connection information can be processed.""" self._response_registry[kernel_id] = Response() async def get_connection_info(self, kernel_id: str) -> dict: """Performs a timeout wait on the event, returning the conenction information on completion.""" await asyncio.wait_for(self._response_registry[kernel_id].wait(), connection_interval) return self._response_registry.pop(kernel_id).response def _prepare_response_socket(self) -> None: """Prepares the response socket on which connection info arrives from remote kernel launcher.""" s = socket(AF_INET, SOCK_STREAM) s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) # If response_addr_any is enabled (default disabled), we will permit the server to listen # on all addresses, else we will honor a configured response IP (via env) over the local IP # (which is the default). # Multiple IP bindings should be configured for containerized configurations (k8s) that need to # launch kernels into external YARN clusters. bind_ip = local_ip if eg_response_ip is None else eg_response_ip bind_ip = bind_ip if response_addr_any is False else "" response_port = desired_response_port for port in random_ports(response_port, response_port_retries + 1): try: s.bind((bind_ip, port)) except OSError as e: if e.errno == errno.EADDRINUSE: self.log.info(f"Response port {port} is already in use, trying another port...") continue elif e.errno in (errno.EACCES, getattr(errno, "WSAEACCES", errno.EACCES)): self.log.warning( f"Permission to bind to response port {port} denied - continuing..." ) continue else: msg = f"Failed to bind to port '{port}' for response address due to: '{e}'" raise RuntimeError(msg) from e else: response_port = port break else: msg = f"No available response port could be found after {response_port_retries + 1} attempts" self.log.critical(msg) raise RuntimeError(msg) self.log.info( f"Enterprise Gateway is bound to port {response_port} " f"for remote kernel connection information." ) s.listen(128) s.settimeout(socket_timeout) self._response_socket = s self._response_port = response_port self._response_ip = local_ip if eg_response_ip is None else eg_response_ip def _start_response_manager(self) -> None: """If not already started, creates and starts the periodic callback to process connections.""" if self._response_socket is None: self._prepare_response_socket() if self._connection_processor is None: self._connection_processor = PeriodicCallback(self._process_connections, 0.1, 0.1) self._connection_processor.start() def stop_response_manager(self) -> None: """Stops the connection processor.""" if self._connection_processor is not None: self._connection_processor.stop() self._connection_processor = None if self._response_socket is not None: self._response_socket = None async def _process_connections(self) -> None: """Checks the socket for data, if found, decrypts the payload and posts to 'wait map'.""" loop = asyncio.get_event_loop() data = "" try: conn, addr = await loop.sock_accept(self._response_socket) while True: buffer = await loop.sock_recv(conn, 1024) if not buffer: # send is complete, process payload self.log.debug(f"Received payload '{data}'") payload = self._decode_payload(data) self.log.debug(f"Decrypted payload '{payload}'") self._post_connection(payload) break data = data + buffer.decode( encoding="utf-8" ) # append what we received until we get no more... conn.close() except timeout: pass except Exception as ex: self.log.error(f"Failure occurred processing connection: {ex}") def _decode_payload(self, data: json) -> dict: """ Decodes the payload. Decodes the payload, identifying the payload's version and returns a dictionary representing the kernel's connection information. Version "0" payloads do not specify a kernel-id within the payload, nor do they include a 'key', 'version' or 'conn_info' fields. They are purely an AES encrypted form of the base64-encoded JSON connection information, and encrypted using the kernel-id as a key. Since no kernel-id is in the payload, we will capture the keys of registered kernel-ids and attempt to decrypt the payload until we find the appropriate registrant. Version "1+" payloads are a base64-encoded JSON string consisting of a 'version', 'key' and 'conn_info' fields. The 'key' field will be decrpyted using the private key to reveal the AES key, which is then used to decrypt the `conn_info` field. Once decryption has taken place, the connection information string is loaded into a dictionary and returned. """ payload_str = base64.b64decode(data) try: payload = json.loads(payload_str) # Get the version version = payload.get("version") if version is None: msg = "Payload received from kernel does not include a version indicator!" raise ValueError(msg) self.log.debug(f"Version {version} payload received.") if version == 1: # Decrypt the AES key using the RSA private key encrypted_aes_key = base64.b64decode(payload["key"].encode()) cipher = PKCS1_v1_5.new(self._private_key) aes_key = cipher.decrypt(encrypted_aes_key, b"\x42") # Per docs, don't convey that decryption returned sentinel. So just let # things fail "naturally". # Decrypt and unpad the connection information using the just-decrypted AES key cipher = AES.new(aes_key, AES.MODE_ECB) encrypted_connection_info = base64.b64decode(payload["conn_info"].encode()) connection_info_str = unpad(cipher.decrypt(encrypted_connection_info), 16).decode() else: msg = f"Unexpected version indicator received: {version}!" raise ValueError(msg) except Exception as ex: # Could be version "0", walk the registrant kernel-ids and attempt to decrypt using each as a key. # If none are found, re-raise the triggering exception. self.log.debug(f"decode_payload exception - {ex.__class__.__name__}: {ex}") connection_info_str = None for kernel_id in self._response_registry: aes_key = kernel_id[0:16] try: cipher = AES.new(aes_key.encode("utf-8"), AES.MODE_ECB) decrypted_payload = cipher.decrypt(payload_str) # Version "0" responses use custom padding, so remove that here. connection_info_str = "".join( [decrypted_payload.decode("utf-8").rsplit("}", 1)[0], "}"] ) # Try to load as JSON new_connection_info = json.loads(connection_info_str) # Add kernel_id into dict, then dump back to string so this can be processed as valid response new_connection_info["kernel_id"] = kernel_id connection_info_str = json.dumps(new_connection_info) self.log.warning( f"WARNING!!!! Legacy kernel response received for kernel_id '{kernel_id}'! " "Update kernel launchers to current version!" ) break # If we're here, we made it! except Exception as ex2: # Any exception fails this experiment and we continue self.log.debug( "Received the following exception detecting legacy kernel response - {}: {}".format( ex2.__class__.__name__, ex2 ) ) connection_info_str = None if connection_info_str is None: raise ex # and convert to usable dictionary connection_info = json.loads(connection_info_str) return connection_info def _post_connection(self, connection_info: dict) -> None: """Posts connection information into "wait map" based on kernel_id value.""" kernel_id = connection_info.get("kernel_id") if kernel_id is None: self.log.error("No kernel id found in response! Kernel launch will fail.") return if kernel_id not in self._response_registry: self.log.error( f"Kernel id '{kernel_id}' has not been registered and will not be processed!" ) return self.log.debug(f"Connection info received for kernel '{kernel_id}': {connection_info}") self._response_registry[kernel_id].response = connection_info class BaseProcessProxyABC(metaclass=abc.ABCMeta): """ Process Proxy Abstract Base Class. Defines the required methods for process proxy classes. Some implementation is also performed by these methods - common to all subclasses. """ def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict): # noqa: F821 """ Initialize the process proxy instance. Parameters ---------- kernel_manager : RemoteKernelManager The kernel manager instance tied to this process proxy. This drives the process proxy method calls. proxy_config : dict The dictionary of per-kernel config settings. If none are specified, this will be an empty dict. """ self.kernel_manager = kernel_manager self.proxy_config = proxy_config # Initialize to 0 IP primarily so restarts of remote kernels don't encounter local-only enforcement during # relaunch (see jupyter_client.manager.start_kernel(). self.kernel_manager.ip = "0.0.0.0" # noqa self.log = kernel_manager.log # extract the kernel_id string from the connection file and set the KERNEL_ID environment variable if self.kernel_manager.kernel_id is None: self.kernel_manager.kernel_id = ( os.path.basename(self.kernel_manager.connection_file) .replace("kernel-", "") .replace(".json", "") ) self.kernel_id = self.kernel_manager.kernel_id self.kernel_launch_timeout = default_kernel_launch_timeout self.lower_port = 0 self.upper_port = 0 self._validate_port_range() # Handle authorization sets... # Take union of unauthorized users... self.unauthorized_users = self.kernel_manager.unauthorized_users if proxy_config.get("unauthorized_users"): self.unauthorized_users = self.unauthorized_users.union( proxy_config.get("unauthorized_users").split(",") ) # Let authorized users override global value - if set on kernelspec... if proxy_config.get("authorized_users"): self.authorized_users = set(proxy_config.get("authorized_users").split(",")) else: self.authorized_users = self.kernel_manager.authorized_users # Represents the local process (from popen) if applicable. Note that we could have local_proc = None even when # the subclass is a LocalProcessProxy (or YarnProcessProxy). This will happen if EG is restarted and the # persisted kernel-sessions indicate that its now running on a different server. In those cases, we use the ip # member variable to determine if the persisted state is local or remote and use signals with the pid to # implement the poll, kill and send_signal methods. As a result, what was a local kernel with one EG instance # could be a remote kernel in a restarted EG instance - and vice versa. self.local_proc = None self.ip = None self.pid = 0 self.pgid = 0 _remote_user = os.getenv("EG_REMOTE_USER") self.remote_pwd = os.getenv("EG_REMOTE_PWD") self._use_gss_raw = os.getenv("EG_REMOTE_GSS_SSH", "False") if self._use_gss_raw.lower() not in ("", "true", "false"): msg = ( "Invalid Value for EG_REMOTE_GSS_SSH expected one of " f'"", "True", "False", got {self._use_gss_raw!r}' ) raise ValueError(msg) self.use_gss = self._use_gss_raw == "true" if self.use_gss: if self.remote_pwd or _remote_user: warnings.warn( "Both `EG_REMOTE_GSS_SSH` and one of `EG_REMOTE_PWD` or " "`EG_REMOTE_USER` is set. " "Those options are mutually exclusive, you configuration may be incorrect. " "EG_REMOTE_GSS_SSH will take priority.", stacklevel=2, ) self.remote_user = None else: self.remote_user = _remote_user if _remote_user else getpass.getuser() @abc.abstractmethod async def launch_process(self, kernel_cmd: str, **kwargs: dict[str, Any] | None) -> None: """ Provides basic implementation for launching the process corresponding to the process proxy. All overrides should call this method via `super()` so that basic/common operations can be performed. Leaf class implementations are required to perform the actual process launch depending on the type of process proxy. Parameters ---------- kernel_cmd : str The properly formatted string composed from the argv stanza of the kernelspec with all curly-braced substitutions performed. kwargs : optional Additional arguments used during the launch - primarily the env to use for the kernel. """ env_dict = kwargs.get("env") if env_dict is None: env_dict = dict(os.environ.copy()) kwargs.update({"env": env_dict}) # see if KERNEL_LAUNCH_TIMEOUT was included from user. If so, override default if env_dict.get("KERNEL_LAUNCH_TIMEOUT"): self.kernel_launch_timeout = float(env_dict.get("KERNEL_LAUNCH_TIMEOUT")) # add the applicable kernel_id and language to the env dict env_dict["KERNEL_ID"] = self.kernel_id kernel_language = "unknown-kernel-language" if len(self.kernel_manager.kernel_spec.language) > 0: kernel_language = self.kernel_manager.kernel_spec.language.lower() # if already set in env: stanza, let that override. env_dict["KERNEL_LANGUAGE"] = env_dict.get("KERNEL_LANGUAGE", kernel_language) # Remove any potential sensitive (e.g., passwords) or annoying values (e.g., LG_COLORS) for k in env_pop_list: env_dict.pop(k, None) self._enforce_authorization(**kwargs) # Filter sensitive values from being logged env_copy = kwargs.get("env").copy() if sensitive_env_keys: for key in list(env_copy): if any(phrase in key.lower() for phrase in sensitive_env_keys): env_copy[key] = redaction_mask self.log.debug(f"BaseProcessProxy.launch_process() env: {env_copy}") def launch_kernel( self, cmd: list[str], **kwargs: dict[str, Any] | None ) -> subprocess.Popen[str | bytes]: """ Returns the result of launching the kernel via Popen. This method exists to allow process proxies to perform any final preparations for launch, including the removal of any arguments that are not recoginized by Popen. """ # Remove kernel_headers kwargs.pop("kernel_headers", None) return launch_kernel(cmd, **kwargs) def cleanup(self) -> None: # noqa """Performs optional cleanup after kernel is shutdown. Child classes are responsible for implementations.""" pass def poll(self) -> Any | None: """ Determines if process proxy is still alive. If this corresponds to a local (popen) process, poll() is called on the subprocess. Otherwise, the zero signal is used to determine if active. """ if self.local_proc: return self.local_proc.poll() return self.send_signal(0) def wait(self) -> int | None: """ Wait for the process to become inactive. """ # If we have a local_proc, call its wait method. This will clean up any defunct processes when the kernel # is shutdown (when using waitAppCompletion = false). Otherwise (if no local_proc) we'll use polling to # determine if a (remote or revived) process is still active. if self.local_proc: return self.local_proc.wait() for _ in range(max_poll_attempts): if self.poll(): time.sleep(poll_interval) else: break else: self.log.warning( "Wait timeout of {} seconds exhausted. Continuing...".format( max_poll_attempts * poll_interval ) ) return None def send_signal(self, signum: int) -> bool | None: """ Send signal `signum` to process proxy. Parameters ---------- signum : int The signal number to send. Zero is used to determine heartbeat. """ # if we have a local process, use its method, else determine if the ip is local or remote and issue # the appropriate version to signal the process. result = None if self.local_proc: if self.pgid > 0 and hasattr(os, "killpg"): try: os.killpg(self.pgid, signum) return result except OSError: pass result = self.local_proc.send_signal(signum) else: if self.ip and self.pid > 0: if BaseProcessProxyABC.ip_is_local(self.ip): result = self.local_signal(signum) else: result = self.remote_signal(signum) return result def kill(self) -> bool | None: """ Terminate the process proxy process. First attempts graceful termination, then forced termination. Note that this should only be necessary if the message-based kernel termination has proven unsuccessful. """ # If we have a local process, use its method, else signal soft kill first before hard kill. result = self.terminate() # Send -15 signal first i = 1 while self.poll() is None and i <= max_poll_attempts: time.sleep(poll_interval) i = i + 1 if i > max_poll_attempts: # Send -9 signal if process is still alive if self.local_proc: result = self.local_proc.kill() self.log.debug(f"BaseProcessProxy.kill(): {result}") else: if self.ip and self.pid > 0: if BaseProcessProxyABC.ip_is_local(self.ip): result = self.local_signal(signal.SIGKILL) else: result = self.remote_signal(signal.SIGKILL) self.log.debug(f"SIGKILL signal sent to pid: {self.pid}") return result def terminate(self) -> bool | None: """ Gracefully terminate the process proxy process. Note that this should only be necessary if the message-based kernel termination has proven unsuccessful. """ # If we have a local process, use its method, else send signal SIGTERM to soft kill. result = None if self.local_proc: result = self.local_proc.terminate() self.log.debug(f"BaseProcessProxy.terminate(): {result}") else: if self.ip and self.pid > 0: if BaseProcessProxyABC.ip_is_local(self.ip): result = self.local_signal(signal.SIGTERM) else: result = self.remote_signal(signal.SIGTERM) self.log.debug(f"SIGTERM signal sent to pid: {self.pid}") return result @staticmethod def ip_is_local(ip: str) -> bool: """ Returns True if `ip` is considered local to this server, False otherwise. """ return localinterfaces.is_public_ip(ip) or localinterfaces.is_local_ip(ip) def _get_ssh_client(self, host: str) -> SSHClient | None: """ Create a SSH Client based on host, username and password if provided. If there is any AuthenticationException/SSHException, raise HTTP Error 403 as permission denied. :param host: :return: ssh client instance """ ssh = None try: ssh = paramiko.SSHClient() ssh.load_system_host_keys() host_ip = gethostbyname(host) if self.use_gss: self.log.debug("Connecting to remote host via GSS.") ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(host_ip, port=ssh_port, gss_auth=True) else: ssh.set_missing_host_key_policy(paramiko.RejectPolicy()) if self.remote_pwd: self.log.debug("Connecting to remote host with username and password.") ssh.connect( host_ip, port=ssh_port, username=self.remote_user, password=self.remote_pwd, ) else: self.log.debug("Connecting to remote host with ssh key.") ssh.connect(host_ip, port=ssh_port, username=self.remote_user) except Exception as e: http_status_code = 500 current_host = gethostbyname(gethostname()) error_message = ( "Exception '{}' occurred when creating a SSHClient at {} connecting " "to '{}:{}' with user '{}', message='{}'.".format( type(e).__name__, current_host, host, ssh_port, self.remote_user, e ) ) if e is paramiko.SSHException or paramiko.AuthenticationException: http_status_code = 403 error_message_prefix = "Failed to authenticate SSHClient with password" error_message = error_message_prefix + ( " provided" if self.remote_pwd else "-less SSH" ) error_message = error_message + "and EG_REMOTE_GSS_SSH={!r} ({})".format( self._use_gss_raw, self.use_gss ) self.log_and_raise(http_status_code=http_status_code, reason=error_message) return ssh def rsh(self, host: str, command: str) -> list[str]: """ Executes a command on a remote host using ssh. Parameters ---------- host : str The host on which the command is executed. command : str The command to execute. Returns ------- lines : List The command's output. If stdout is zero length, the stderr output is returned. """ ssh = self._get_ssh_client(host) try: stdin, stdout, stderr = ssh.exec_command(command, timeout=30) lines = stdout.readlines() if len(lines) == 0: # if nothing in stdout, return stderr lines = stderr.readlines() except Exception as e: # Let caller decide if exception should be logged raise e finally: if ssh: ssh.close() return lines def remote_signal(self, signum: int) -> bool | None: """ Sends signal `signum` to process proxy on remote host. """ val = None # if we have a process group, use that, else use the pid... target = "-" + str(self.pgid) if self.pgid > 0 and signum > 0 else str(self.pid) cmd = f"kill -{signum} {target}; echo $?" if signum > 0: # only log if meaningful signal (not for poll) self.log.debug(f"Sending signal: {signum} to target: {target} on host: {self.ip}") try: result = self.rsh(self.ip, cmd) except Exception as e: self.log.warning( "Remote signal({}) to '{}' on host '{}' failed with exception '{}'.".format( signum, target, self.ip, e ) ) return False for line in result: val = line.strip() if val == "0": return None return False def local_signal(self, signum: int) -> bool | None: """ Sends signal `signum` to local process. """ # if we have a process group, use that, else use the pid... target = "-" + str(self.pgid) if self.pgid > 0 and signum > 0 else str(self.pid) if signum > 0: # only log if meaningful signal (not for poll) self.log.debug(f"Sending signal: {signum} to target: {target}") cmd = ["kill", "-" + str(signum), target] with open(os.devnull, "w") as devnull: result = subprocess.call(cmd, stderr=devnull) if result == 0: return None return False def _enforce_authorization(self, **kwargs: dict[str, Any] | None) -> None: """ Applies any authorization configuration using the kernel user. Regardless of impersonation enablement, this method first adds the appropriate value for EG_IMPERSONATION_ENABLED into environment (for use by kernelspecs), then ensures that KERNEL_USERNAME has a value and is present in the environment (again, for use by kernelspecs). If unset, KERNEL_USERNAME will be defaulted to the current user. Authorization is performed by comparing the value of KERNEL_USERNAME with each value in the set of unauthorized users. If any (case-sensitive) matches are found, HTTP error 403 (Forbidden) will be raised - preventing the launch of the kernel. If the authorized_users set is non-empty, it is then checked to ensure the value of KERNEL_USERNAME is present in that list. If not found, HTTP error 403 will be raised. It is assumed that the kernelspec logic will take the appropriate steps to impersonate the user identified by KERNEL_USERNAME when impersonation_enabled is True. """ # Get the env env_dict = kwargs.get("env") # Although it may already be set in the env, just override in case it was only set via command line or config # Convert to string since execve() (called by Popen in base classes) wants string values. env_dict["EG_IMPERSONATION_ENABLED"] = str(self.kernel_manager.impersonation_enabled) # Ensure KERNEL_USERNAME is set kernel_username = KernelSessionManager.get_kernel_username(**kwargs) # Now perform authorization checks if kernel_username in self.unauthorized_users: self._raise_authorization_error(kernel_username, "not authorized") # If authorized users are non-empty, ensure user is in that set. if self.authorized_users.__len__() > 0 and kernel_username not in self.authorized_users: self._raise_authorization_error(kernel_username, "not in the set of users authorized") def _raise_authorization_error(self, kernel_username: str, differentiator_clause: str) -> None: """ Raises a 403 status code after building the appropriate message. """ kernel_name = self.kernel_manager.kernel_spec.display_name kernel_clause = f" '{kernel_name}'." if kernel_name is not None else "s." error_message = ( f"User '{kernel_username}' is {differentiator_clause} to start kernel{kernel_clause} " "Ensure KERNEL_USERNAME is set to an appropriate value and retry the request." ) self.log_and_raise(http_status_code=403, reason=error_message) def get_process_info(self) -> dict[str, Any]: """ Captures the base information necessary for kernel persistence relative to process proxies. The superclass method must always be called first to ensure proper ordering. Since this is the most base class, no call to `super()` is necessary. """ process_info = {"pid": self.pid, "pgid": self.pgid, "ip": self.ip} return process_info def load_process_info(self, process_info: dict[str, Any]) -> None: """ Loads the base information necessary for kernel persistence relative to process proxies. The superclass method must always be called first to ensure proper ordering. Since this is the most base class, no call to `super()` is necessary. """ self.pid = process_info["pid"] self.pgid = process_info["pgid"] self.ip = process_info["ip"] self.kernel_manager.ip = process_info["ip"] def _validate_port_range(self) -> None: """ Validates the port range configuration option to ensure appropriate values. """ # Let port_range override global value - if set on kernelspec... port_range = self.kernel_manager.port_range if self.proxy_config.get("port_range"): port_range = self.proxy_config.get("port_range") try: port_ranges = port_range.split("..") self.lower_port = int(port_ranges[0]) self.upper_port = int(port_ranges[1]) port_range_size = self.upper_port - self.lower_port if port_range_size != 0: if port_range_size < min_port_range_size: self.log_and_raise( http_status_code=500, reason="Port range validation failed for range: '{}'. " "Range size must be at least {} as specified by env EG_MIN_PORT_RANGE_SIZE".format( port_range, min_port_range_size ), ) # According to RFC 793, port is a 16-bit unsigned int. Which means the port # numbers must be in the range (0, 65535). However, within that range, # ports 0 - 1023 are called "well-known ports" and are typically reserved for # specific purposes. For example, 0 is reserved for random port assignment, # 80 is used for HTTP, 443 for TLS/SSL, 25 for SMTP, etc. But, there is # flexibility as one can choose any port with the aforementioned protocols. # Ports 1024 - 49151 are called "user or registered ports" that are bound to # services running on the server listening to client connections. And, ports # 49152 - 65535 are called "dynamic or ephemeral ports". A TCP connection # has two endpoints. Each endpoint consists of an IP address and a port number. # And, each connection is made up of a 4-tuple consisting of -- client-IP, # client-port, server-IP, and server-port. A service runs on a server with a # specific IP and is bound to a specific "user or registered port" that is # advertised for clients to connect. So, when a client connects to a service # running on a server, three out of 4-tuple - client-IP, client-port, server-IP - # are already known. To be able to serve multiple clients concurrently, the # server's IP stack assigns an ephemeral port for the connection to complete # the 4-tuple. # # In case of JEG, we will accept ports in the range 1024 - 65535 as these days # admins use dedicated hosts for individual services. if self.lower_port < 1024 or self.lower_port > 65535: self.log_and_raise( http_status_code=500, reason=f"Invalid port range '{port_range}' specified. " "Range for valid port numbers is (1024, 65535).", ) if self.upper_port < 1024 or self.upper_port > 65535: self.log_and_raise( http_status_code=500, reason=f"Invalid port range '{port_range}' specified. " "Range for valid port numbers is (1024, 65535).", ) except ValueError as ve: self.log_and_raise( http_status_code=500, reason=f"Port range validation failed for range: '{port_range}'. " f"Error was: {ve}", ) except IndexError as ie: self.log_and_raise( http_status_code=500, reason=f"Port range validation failed for range: '{port_range}'. " f"Error was: {ie}", ) self.kernel_manager.port_range = port_range def select_ports(self, count: int) -> list[int]: """ Selects and returns n random ports that adhere to the configured port range, if applicable. Parameters ---------- count : int The number of ports to return Returns ------- List - ports available and adhering to the configured port range """ ports = [] sockets = [] for _ in range(count): sock = self.select_socket() ports.append(sock.getsockname()[1]) sockets.append(sock) for sock in sockets: sock.close() return ports def select_socket(self, ip: str | None = "") -> socket: """ Creates and returns a socket whose port adheres to the configured port range, if applicable. Parameters ---------- ip : str Optional ip address to which the port is bound Returns ------- socket - Bound socket that is available and adheres to configured port range """ sock = socket(AF_INET, SOCK_STREAM) found_port = False retries = 0 while not found_port: try: sock.bind((ip, self._get_candidate_port())) found_port = True except Exception: retries = retries + 1 if retries > max_port_range_retries: self.log_and_raise( http_status_code=500, reason="Failed to locate port within range {} after {} " "retries!".format(self.kernel_manager.port_range, max_port_range_retries), ) return sock def _get_candidate_port(self) -> int: """Randomly selects a port number within the configured range. If no range is configured, the 0 port is used - allowing the server to choose from the full range. """ range_size = self.upper_port - self.lower_port if range_size == 0: return 0 return random.randint(self.lower_port, self.upper_port) def log_and_raise(self, http_status_code: int | None = None, reason: str | None = None) -> None: """ Helper method that combines the logging and raising of exceptions. If http_status_code is provided an HTTPError is created using the status code and reason. If http_status_code is not provided, a RuntimeError is raised with reason as the message. In either case, an error is logged using the reason. If reason is not provided a generic message will be used. Parameters ---------- http_status_code : int The status code to raise reason : str The message to log and associate with the exception """ if reason is None: reason = "Internal server issue!" self.log.error(reason) if http_status_code: raise web.HTTPError(status_code=http_status_code, reason=reason) else: raise RuntimeError(reason) class LocalProcessProxy(BaseProcessProxyABC): """ Manages the lifecycle of a locally launched kernel process. This process proxy is used when no other process proxy is configured. """ def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict): # noqa: F821 """Initialize the proxy.""" super().__init__(kernel_manager, proxy_config) kernel_manager.ip = localinterfaces.LOCALHOST async def launch_process( self, kernel_cmd: str, **kwargs: dict[str, Any] | None ) -> type[LocalProcessProxy]: """Launch a process for a kernel.""" await super().launch_process(kernel_cmd, **kwargs) # launch the local run.sh self.local_proc = self.launch_kernel(kernel_cmd, **kwargs) self.pid = self.local_proc.pid if hasattr(os, "getpgid"): try: self.pgid = os.getpgid(self.pid) except OSError: pass self.ip = local_ip self.log.info( "Local kernel launched on '{}', pid: {}, pgid: {}, KernelID: {}, cmd: '{}'".format( self.ip, self.pid, self.pgid, self.kernel_id, kernel_cmd ) ) return self class RemoteProcessProxy(BaseProcessProxyABC, metaclass=abc.ABCMeta): """ Abstract Base Class implementation associated with remote process proxies. """ def __init__(self, kernel_manager, proxy_config): """Initialize the proxy.""" super().__init__(kernel_manager, proxy_config) self.response_socket = None self.start_time = None self.assigned_ip = None self.assigned_host = "" self.comm_ip = None self.comm_port = 0 self.tunneled_connect_info = ( None # Contains the destination connection info when tunneling in use ) self.tunnel_processes = {} self.response_manager = ( ResponseManager.instance() ) # This will create the key pair and socket on first use self.response_manager.register_event(self.kernel_id) self.kernel_manager.response_address = self.response_manager.response_address self.kernel_manager.public_key = self.response_manager.public_key async def launch_process(self, kernel_cmd, **kwargs): """Launch a process for a kernel.""" # Pass along port-range info to kernels... kwargs["env"]["EG_MIN_PORT_RANGE_SIZE"] = str(min_port_range_size) kwargs["env"]["EG_MAX_PORT_RANGE_RETRIES"] = str(max_port_range_retries) await super().launch_process(kernel_cmd, **kwargs) # remove connection file because a) its not necessary any longer since launchers will return # the connection information which will (sufficiently) remain in memory and b) launchers # landing on this node may want to write to this file and be denied access. self.kernel_manager.cleanup_connection_file() @abc.abstractmethod def confirm_remote_startup(self): """Confirms the remote process has started and returned necessary connection information.""" pass def detect_launch_failure(self) -> None: """ Helper method called from implementations of `confirm_remote_startup()` that checks if self.local_proc (a popen instance) has terminated prior to the confirmation of startup. This prevents users from having to wait for the kernel timeout duration to know if the launch fails. It also helps distinguish local invocation issues from remote post-launch issues since the failure will be relatively immediate. Note that this method only applies to those process proxy implementations that launch from the local node. Proxies like DistributedProcessProxy use rsh against a remote node, so there's not `local_proc` in play to interrogate. """ # Check if the local proc has faulted (poll() will return non-None with a non-zero return # code in such cases). If a fault was encountered, raise server error (500) with a message # indicating to check the EG log for more information. if self.local_proc: poll_result = self.local_proc.poll() if poll_result and poll_result > 0: self.local_proc.wait() error_message = ( f"Error occurred during launch of KernelID: {self.kernel_id}. " "Check Enterprise Gateway log for more information." ) self.local_proc = None self.log_and_raise(http_status_code=500, reason=error_message) def _tunnel_to_kernel( self, connection_info: dict, server: str, port: int = ssh_port, key: str | None = None ) -> tuple: """ Tunnel connections to a kernel over SSH This will open five SSH tunnels from localhost on this machine to the ports associated with the kernel. See jupyter_client/connect.py for original implementation. """ cf = connection_info lports = self.select_ports(5) rports = ( cf["shell_port"], cf["iopub_port"], cf["stdin_port"], cf["hb_port"], cf["control_port"], ) channels = ( KernelChannel.SHELL, KernelChannel.IOPUB, KernelChannel.STDIN, KernelChannel.HEARTBEAT, KernelChannel.CONTROL, ) remote_ip = cf["ip"] if not tunnel.try_passwordless_ssh(server + ":" + str(port), key): self.log_and_raise( http_status_code=403, reason="Must use password-less scheme by setting up the " "SSH public key on the cluster nodes", ) for lp, rp, kc in zip(lports, rports, channels): self._create_ssh_tunnel(kc, lp, rp, remote_ip, server, port, key) return tuple(lports) def _tunnel_to_port( self, kernel_channel: KernelChannel, remote_ip: str, remote_port: int, server: str, port: int = ssh_port, key: str | None = None, ) -> int: """ Analogous to _tunnel_to_kernel, but deals with a single port. This will typically be called for any one-off ports that require tunnelling. Note - this method assumes that passwordless ssh is in use and has been previously validated. """ local_port = self.select_ports(1)[0] self._create_ssh_tunnel( kernel_channel, local_port, remote_port, remote_ip, server, port, key ) return local_port def _create_ssh_tunnel( self, kernel_channel: KernelChannel, local_port: int, remote_port: int, remote_ip: str, server: str, port: int, key: str, ) -> None: """ Creates an SSH tunnel between the local and remote port/server for the given kernel channel. """ channel_name = kernel_channel.value self.log.debug( "Creating SSH tunnel for '{}': 127.0.0.1:'{}' to '{}':'{}'".format( channel_name, local_port, remote_ip, remote_port ) ) try: process = self._spawn_ssh_tunnel( kernel_channel, local_port, remote_port, remote_ip, server, port, key ) self.tunnel_processes[channel_name] = process except Exception as e: self.log_and_raise( http_status_code=500, reason=f"Could not open SSH tunnel for port {channel_name}. Exception: '{e}'", ) def _spawn_ssh_tunnel( self, kernel_channel: KernelChannel, local_port: int, remote_port: int, remote_ip: str, server: str, port: int = ssh_port, key: str | None = None, ): """ This method spawns a child process to create an SSH tunnel and returns the spawned process. ZMQ's implementation returns a pid on UNIX based platforms and a process handle/reference on Win32. By consistently returning a process handle/reference on both UNIX and Win32 platforms, this method enables the caller to deal with the same currency regardless of the platform. For example, on both UNIX and Win32 platforms, the developer will have the option to stash the child process reference and manage it's lifecycle consistently. On UNIX based platforms, ZMQ's implementation is more generic to be able to handle various use-cases. ZMQ's implementation also requests the spawned process to go to background using '-f' command-line option. As a result, the spawned process becomes an orphan and any references to the process obtained using it's pid become stale. On the other hand, this implementation is specifically for password-less SSH login WITHOUT the '-f' command-line option thereby allowing the spawned process to be owned by the parent process. This allows the parent process to control the lifecycle of it's child processes and do appropriate cleanup during termination. """ if sys.platform == "win32": ssh_server = server + ":" + str(port) return tunnel.paramiko_tunnel(local_port, remote_port, ssh_server, remote_ip, key) else: ssh = "ssh -p %s -o ServerAliveInterval=%i" % ( port, self._get_keep_alive_interval(kernel_channel), ) cmd = "%s -S none -L 127.0.0.1:%i:%s:%i %s" % ( ssh, local_port, remote_ip, remote_port, server, ) return pexpect.spawn(cmd, env=os.environ.copy().pop("SSH_ASKPASS", None)) def _get_keep_alive_interval(self, kernel_channel: KernelChannel) -> int: cull_idle_timeout = self.kernel_manager.cull_idle_timeout if ( kernel_channel == KernelChannel.COMMUNICATION or kernel_channel == KernelChannel.CONTROL or cull_idle_timeout <= 0 or cull_idle_timeout > max_keep_alive_interval ): # For COMMUNICATION and CONTROL channels, keep-alive interval will be set to # max_keep_alive_interval to make sure that the SSH session does not timeout # or expire for a very long time. Also, if cull_idle_timeout is unspecified, # negative, or a very large value, then max_keep_alive_interval will be # used as keep-alive value. return max_keep_alive_interval # Ideally, keep-alive interval should be greater than cull_idle_timeout. So, we # will add 60 seconds to cull_idle_timeout to come up with the value for keep-alive # interval for the rest of the kernel channels. return cull_idle_timeout + 60 async def receive_connection_info(self) -> bool: """ Monitors the response address for connection info sent by the remote kernel launcher. """ # Polls the socket using accept. When data is found, returns ready indicator and encrypted data. ready_to_connect = False try: connect_info = await self.response_manager.get_connection_info(self.kernel_id) self._setup_connection_info(connect_info) ready_to_connect = True except Exception as e: if type(e) is timeout or type(e) is asyncio.TimeoutError: self.log.debug( "Waiting for KernelID '{}' to send connection info from host '{}' - retrying...".format( self.kernel_id, self.assigned_host ) ) else: error_message = ( "Exception occurred waiting for connection file response for KernelId '{}' " "on host '{}': {}".format(self.kernel_id, self.assigned_host, e) ) self.kill() self.log_and_raise(http_status_code=500, reason=error_message) return ready_to_connect def _setup_connection_info(self, connect_info: dict) -> None: """ Take connection info (returned from launcher or loaded from session persistence) and properly configure port variables for the 5 kernel and (possibly) the launcher communication port. If tunneling is enabled, these ports will be tunneled with the original port information recorded. """ self.log.debug( f"Host assigned to the kernel is: '{self.assigned_host}' '{self.assigned_ip}'" ) connect_info["ip"] = ( self.assigned_ip ) # Set connection to IP address of system where the kernel was launched if tunneling_enabled is True: # Capture the current(tunneled) connect_info relative to the IP and ports (including the # communication port - if present). self.tunneled_connect_info = dict(connect_info) # Open tunnels to the 5 ZMQ kernel ports tunnel_ports = self._tunnel_to_kernel(connect_info, self.assigned_ip) self.log.debug(f"Local ports used to create SSH tunnels: '{tunnel_ports}'") # Replace the remote connection ports with the local ports used to create SSH tunnels. connect_info["ip"] = "127.0.0.1" connect_info["shell_port"] = tunnel_ports[0] connect_info["iopub_port"] = tunnel_ports[1] connect_info["stdin_port"] = tunnel_ports[2] connect_info["hb_port"] = tunnel_ports[3] connect_info["control_port"] = tunnel_ports[4] # If a communication port was provided, tunnel it if "comm_port" in connect_info: self.comm_ip = connect_info["ip"] tunneled_comm_port = int(connect_info["comm_port"]) self.comm_port = self._tunnel_to_port( KernelChannel.COMMUNICATION, self.assigned_ip, tunneled_comm_port, self.assigned_ip, ) connect_info["comm_port"] = self.comm_port self.log.debug( "Established gateway communication to: {}:{} for KernelID '{}' via tunneled port " "127.0.0.1:{}".format( self.assigned_ip, tunneled_comm_port, self.kernel_id, self.comm_port ) ) else: # tunneling not enabled, still check for and record communication port if "comm_port" in connect_info: self.comm_ip = connect_info["ip"] self.comm_port = int(connect_info["comm_port"]) self.log.debug( "Established gateway communication to: {}:{} for KernelID '{}'".format( self.assigned_ip, self.comm_port, self.kernel_id ) ) # If no communication port was provided, record that fact as well since this is useful to know if "comm_port" not in connect_info: self.log.debug( "Gateway communication port has NOT been established for KernelID '{}' (optional).".format( self.kernel_id ) ) self._update_connection(connect_info) def _update_connection(self, connect_info: dict) -> None: """ Updates the connection info member variables of the kernel manager. Also pulls the PID and PGID info, if present, in case we need to use it for lifecycle management. Note: Do NOT update connect_info with IP and other such artifacts in this method/function. """ # Reset the ports to 0 so load can take place (which resets the members to value from file or json)... self.kernel_manager.stdin_port = self.kernel_manager.iopub_port = ( self.kernel_manager.shell_port ) = self.kernel_manager.hb_port = self.kernel_manager.control_port = 0 if connect_info: # Load new connection information into memory. No need to write back out to a file or track loopback, etc. # The launcher may also be sending back process info, so check and extract self._extract_pid_info(connect_info) self.kernel_manager.load_connection_info(info=connect_info) self.log.debug( "Received connection info for KernelID '{}' from host '{}': {}...".format( self.kernel_id, self.assigned_host, connect_info ) ) else: error_message = ( f"Unexpected runtime encountered for Kernel ID '{self.kernel_id}' - " "connection information is null!" ) self.log_and_raise(http_status_code=500, reason=error_message) self._close_response_socket() self.kernel_manager._connection_file_written = ( True # allows for cleanup of local files (as necessary) ) def _close_response_socket(self) -> None: # If there's a response-socket, close it since its no longer needed. if self.response_socket: try: self.log.debug("response socket still open, close it") self.response_socket.shutdown(SHUT_RDWR) self.response_socket.close() except OSError: pass # tolerate exceptions here since we don't need this socket and would like ot continue self.response_socket = None def _extract_pid_info(self, connect_info: dict) -> None: """ Extracts any PID, PGID info from the payload received on the response socket. """ pid = connect_info.pop("pid", None) if pid: try: self.pid = int(pid) except ValueError: self.log.warning( f"pid returned from kernel launcher is not an integer: {pid} - ignoring." ) pid = None pgid = connect_info.pop("pgid", None) if pgid: try: self.pgid = int(pgid) except ValueError: self.log.warning( f"pgid returned from kernel launcher is not an integer: {pgid} - ignoring." ) pgid = None if ( pid or pgid ): # if either process ids were updated, update the ip as well and don't use local_proc self.ip = self.assigned_ip if not BaseProcessProxyABC.ip_is_local( self.ip ): # only unset local_proc if we're remote self.local_proc = None async def handle_timeout(self): """ Checks to see if the kernel launch timeout has been exceeded while awaiting connection info. """ await asyncio.sleep(poll_interval) time_interval = RemoteProcessProxy.get_time_diff( self.start_time, RemoteProcessProxy.get_current_time() ) if time_interval > self.kernel_launch_timeout: error_http_code = 500 reason = f"Waited too long ({self.kernel_launch_timeout}s) to get connection file" timeout_message = f"KernelID: '{self.kernel_id}' launch timeout due to: {reason}" await asyncio.get_event_loop().run_in_executor(None, self.kill) self.log_and_raise(http_status_code=error_http_code, reason=timeout_message) def cleanup(self): """ Terminates tunnel processes, if applicable. """ self.assigned_ip = None for kernel_channel, process in self.tunnel_processes.items(): self.log.debug(f"cleanup: terminating {kernel_channel} tunnel process.") process.terminate() self.tunnel_processes.clear() super().cleanup() def _send_listener_request(self, request: dict, shutdown_socket: bool = False) -> None: """ Sends the request dictionary to the kernel listener via the comm port. Caller is responsible for handling any exceptions. """ if self.comm_port > 0: sock = socket(AF_INET, SOCK_STREAM) try: sock.settimeout(socket_timeout) sock.connect((self.comm_ip, self.comm_port)) sock.send(json.dumps(request).encode(encoding="utf-8")) finally: if shutdown_socket: try: sock.shutdown(SHUT_WR) except Exception as e2: if isinstance(e2, OSError) and e2.errno == errno.ENOTCONN: # Listener is not connected. This is probably a follow-on to ECONNREFUSED on connect self.log.debug( f"OSError(ENOTCONN) raised on socket shutdown, listener " f"has likely already exited. Cannot send '{request}'" ) else: self.log.warning( f"Exception occurred attempting to shutdown communication " f"socket to {self.comm_ip}:{self.comm_port} " f"for KernelID '{self.kernel_id}' (ignored): {e2!s}" ) sock.close() else: self.log.debug( f"Invalid comm port, not sending request '{request}' to comm_port '{self.comm_port}'." ) def send_signal(self, signum): """ Sends `signum` via the communication port. The kernel launcher listening on its communication port will receive the signum and perform the necessary signal operation local to the process. """ # If the launcher returned a comm_port value, then use that to send the signal, # else, defer to the superclass - which will use a remote shell to issue kill. # Note that if the target process is running as a different user than the REMOTE_USER, # using anything other than the socket-based signal (via signal_addr) will not work. if self.comm_port > 0: try: self._send_listener_request({"signum": signum}) if signum > 0: # Polling (signum == 0) is too frequent self.log.debug(f"Signal ({signum}) sent via gateway communication port.") return None except Exception as e: if ( isinstance(e, OSError) and e.errno == errno.ECONNREFUSED ): # Return False since there's no process. self.log.debug("ERROR: ECONNREFUSED, no process listening, cannot send signal.") return False self.log.warning( "An unexpected exception occurred sending signal ({}) for KernelID '{}': {}".format( signum, self.kernel_id, str(e) ) ) return super().send_signal(signum) def shutdown_listener(self): """ Sends a shutdown request to the kernel launcher listener. """ # If a comm port has been established, instruct the listener to shutdown so that proper # kernel termination can occur. If not done, the listener keeps the launcher process # active, even after the kernel has terminated, leading to less than graceful terminations. if self.comm_port > 0: shutdown_request = {} shutdown_request["shutdown"] = 1 try: self._send_listener_request(shutdown_request, shutdown_socket=True) self.log.debug("Shutdown request sent to listener via gateway communication port.") except Exception as e: if not isinstance(e, OSError) or e.errno != errno.ECONNREFUSED: self.log.warning( "An unexpected exception occurred sending listener shutdown to {}:{} for " "KernelID '{}': {}".format( self.comm_ip, self.comm_port, self.kernel_id, str(e) ) ) # Also terminate the tunnel process for the communication port - if in play. Failure to terminate # this process results in the kernel (launcher) appearing to remain alive following the shutdown # request, which triggers the "forced kill" termination logic. comm_port_name = KernelChannel.COMMUNICATION.value comm_port_tunnel = self.tunnel_processes.get(comm_port_name, None) if comm_port_tunnel: self.log.debug(f"shutdown_listener: terminating {comm_port_name} tunnel process.") comm_port_tunnel.terminate() del self.tunnel_processes[comm_port_name] def get_process_info(self): """ Captures the base information necessary for kernel persistence relative to remote processes. """ process_info = super().get_process_info() process_info.update( { "assigned_ip": self.assigned_ip, "assigned_host": self.assigned_host, "comm_ip": self.comm_ip, "comm_port": self.comm_port, "tunneled_connect_info": self.tunneled_connect_info, } ) return process_info def load_process_info(self, process_info): """ Captures the base information necessary for kernel persistence relative to remote processes. """ super().load_process_info(process_info) self.assigned_ip = process_info["assigned_ip"] self.assigned_host = process_info["assigned_host"] self.comm_ip = process_info["comm_ip"] self.comm_port = process_info["comm_port"] if ( "tunneled_connect_info" in process_info and process_info["tunneled_connect_info"] is not None ): # If this was a tunneled connection, re-establish tunnels. Note, this will reset the # communication socket (comm_ip, comm_port) members as well. self._setup_connection_info(process_info["tunneled_connect_info"]) def log_and_raise(self, http_status_code: int | None = None, reason: str | None = None): """ Override log_and_raise method in order to verify that the response socket is properly closed before raise exception """ self._close_response_socket() super().log_and_raise(http_status_code, reason) @staticmethod def get_current_time(): """Return the current time stamp in UTC time epoch format in milliseconds.""" return timegm(_tz.utcnow().utctimetuple()) * 1000 @staticmethod def get_time_diff(time1, time2): """Return the difference between two timestamps in seconds, assuming the timestamp is in milliseconds.""" # e.g. the difference between 1504028203000 and 1504028208300 is 5300 milliseconds or 5.3 seconds diff = abs(time2 - time1) return float("%d.%d" % (diff / 1000, diff % 1000)) ================================================ FILE: enterprise_gateway/services/processproxies/spark_operator.py ================================================ """A spark operator process proxy.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations from ..kernels.remotemanager import RemoteKernelManager from .crd import CustomResourceProcessProxy class SparkOperatorProcessProxy(CustomResourceProcessProxy): """Spark operator process proxy.""" # Identifies the kind of object being managed by this process proxy. # For these values we will prefer the values found in the 'kind' field # of the object's metadata. This attribute is strictly used to provide # context to log messages. object_kind = "SparkApplication" def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict): """Initialize the proxy.""" super().__init__(kernel_manager, proxy_config) self.group = "sparkoperator.k8s.io" self.version = "v1beta2" self.plural = "sparkapplications" ================================================ FILE: enterprise_gateway/services/processproxies/yarn.py ================================================ """Code related to managing kernels running in YARN clusters.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import asyncio import errno import logging import os import signal import socket import time from typing import Any, ClassVar from jupyter_client import localinterfaces from yarn_api_client.base import Response from yarn_api_client.resource_manager import ResourceManager from ..kernels.remotemanager import RemoteKernelManager from ..sessions.kernelsessionmanager import KernelSessionManager from .processproxy import RemoteProcessProxy # Default logging level of yarn-api and underlying connectionpool produce too much noise - raise to warning only. logging.getLogger("yarn_api_client").setLevel(os.getenv("EG_YARN_LOG_LEVEL", logging.WARNING)) logging.getLogger("urllib3.connectionpool").setLevel( os.environ.get("EG_YARN_LOG_LEVEL", logging.WARNING) ) local_ip = localinterfaces.public_ips()[0] poll_interval = float(os.getenv("EG_POLL_INTERVAL", "0.5")) max_poll_attempts = int(os.getenv("EG_MAX_POLL_ATTEMPTS", "10")) yarn_shutdown_wait_time = float(os.getenv("EG_YARN_SHUTDOWN_WAIT_TIME", "15.0")) # cert_path: Boolean, defaults to `True`, that controls # whether we verify the server's TLS certificate in yarn-api-client. # Or a string, in which case it must be a path to a CA bundle(.pem file) to use. cert_path = os.getenv("EG_YARN_CERT_BUNDLE", True) mutual_authentication = os.getenv("EG_YARN_MUTUAL_AUTHENTICATION", "REQUIRED") class YarnClusterProcessProxy(RemoteProcessProxy): """ Kernel lifecycle management for YARN clusters. """ initial_states: ClassVar = {"NEW", "SUBMITTED", "ACCEPTED", "RUNNING"} final_states: ClassVar = {"FINISHED", "KILLED", "FAILED"} def __init__(self, kernel_manager: RemoteKernelManager, proxy_config: dict): """Initialize the proxy.""" super().__init__(kernel_manager, proxy_config) self.application_id = None self.last_known_state = None self.candidate_queue = None self.candidate_partition = None self.yarn_endpoint = proxy_config.get("yarn_endpoint", kernel_manager.yarn_endpoint) self.alt_yarn_endpoint = proxy_config.get( "alt_yarn_endpoint", kernel_manager.alt_yarn_endpoint ) self.yarn_endpoint_security_enabled = proxy_config.get( "yarn_endpoint_security_enabled", kernel_manager.yarn_endpoint_security_enabled ) # YARN applications tend to take longer than the default 5 second wait time. Rather than # require a command-line option for those using YARN, we'll adjust based on a local env that # defaults to 15 seconds. Note: we'll only adjust if the current wait time is shorter than # the desired value. if kernel_manager.shutdown_wait_time < yarn_shutdown_wait_time: kernel_manager.shutdown_wait_time = yarn_shutdown_wait_time self.log.debug( "{class_name} shutdown wait time adjusted to {wait_time} seconds.".format( class_name=type(self).__name__, wait_time=kernel_manager.shutdown_wait_time ) ) # If yarn resource check is enabled and it isn't available immediately, # 20% of kernel_launch_timeout is used to wait # and retry at fixed interval before pronouncing as not feasible to launch. self.yarn_resource_check_wait_time = 0.20 * self.kernel_launch_timeout def _initialize_resource_manager(self, **kwargs: dict[str, Any] | None) -> None: """Initialize the Hadoop YARN Resource Manager instance used for this kernel's lifecycle.""" endpoints = None if self.yarn_endpoint: endpoints = [self.yarn_endpoint] # Only check alternate if "primary" is set. if self.alt_yarn_endpoint: endpoints.append(self.alt_yarn_endpoint) if self.yarn_endpoint_security_enabled: from requests_kerberos import DISABLED, OPTIONAL, REQUIRED, HTTPKerberosAuth auth = HTTPKerberosAuth( mutual_authentication={ "REQUIRED": REQUIRED, "OPTIONAL": OPTIONAL, "DISABLED": DISABLED, }.get(mutual_authentication.upper()) ) else: # If we have the appropriate version of yarn-api-client, use its SimpleAuth class. # This allows EG to continue to issue requests against the YARN api when anonymous # access is not allowed. (Default is to allow anonymous access.) try: from yarn_api_client.auth import SimpleAuth kernel_username = KernelSessionManager.get_kernel_username(**kwargs) auth = SimpleAuth(kernel_username) self.log.debug( f"Using SimpleAuth with '{kernel_username}' against endpoints: {endpoints}" ) except ImportError: auth = None self.resource_mgr = ResourceManager( service_endpoints=endpoints, auth=auth, verify=cert_path ) self.rm_addr = self.resource_mgr.get_active_endpoint() async def launch_process( self, kernel_cmd: str, **kwargs: dict[str, Any] | None ) -> YarnClusterProcessProxy: """ Launches the specified process within a YARN cluster environment. """ self._initialize_resource_manager(**kwargs) # checks to see if the queue resource is available # if not available, kernel startup is not attempted self.confirm_yarn_queue_availability(**kwargs) await super().launch_process(kernel_cmd, **kwargs) # launch the local run.sh - which is configured for yarn-cluster... self.local_proc = self.launch_kernel(kernel_cmd, **kwargs) self.pid = self.local_proc.pid self.ip = local_ip self.log.debug( "Yarn cluster kernel launched using YARN RM address: {}, pid: {}, Kernel ID: {}, cmd: '{}'".format( self.rm_addr, self.local_proc.pid, self.kernel_id, kernel_cmd ) ) await self.confirm_remote_startup() return self def confirm_yarn_queue_availability(self, **kwargs: dict[str, Any] | None) -> None: """ Submitting jobs to yarn queue and then checking till the jobs are in running state will lead to orphan jobs being created in some scenarios. We take kernel_launch_timeout time and divide this into two parts. If the queue is unavailable we take max 20% of the time to poll the queue periodically and if the queue becomes available the rest of timeout is met in 80% of the remaining time. This algorithm is subject to change. Please read the below cases to understand when and how checks are applied. Confirms if the yarn queue has capacity to handle the resource requests that will be sent to it. First check ensures the driver and executor memory request falls within the container size of yarn configuration. This check requires executor and driver memory to be available in the env. Second,Current version of check, takes into consideration node label partitioning on given queues. Provided the queue name and node label this checks if the given partition has capacity available for kernel startup. All Checks are optional. If we have KERNEL_EXECUTOR_MEMORY and KERNEL_DRIVER_MEMORY specified, first check is performed. If we have KERNEL_QUEUE and KERNEL_NODE_LABEL specified, second check is performed. Proper error messages are sent back for user experience :param kwargs: :return: """ env_dict = kwargs.get("env", {}) executor_memory = int(env_dict.get("KERNEL_EXECUTOR_MEMORY", 0)) driver_memory = int(env_dict.get("KERNEL_DRIVER_MEMORY", 0)) if executor_memory * driver_memory > 0: container_memory = self.resource_mgr.cluster_node_container_memory() if max(executor_memory, driver_memory) > container_memory: self.log_and_raise( http_status_code=500, reason="Container Memory not sufficient for a executor/driver allocation", ) candidate_queue_name = env_dict.get("KERNEL_QUEUE", None) node_label = env_dict.get("KERNEL_NODE_LABEL", None) partition_availability_threshold = float(env_dict.get("YARN_PARTITION_THRESHOLD", 95.0)) if candidate_queue_name is None or node_label is None: return # else the resources may or may not be available now. it may be possible that if we wait then the resources # become available. start a timeout process self.start_time = RemoteProcessProxy.get_current_time() self.candidate_queue = self.resource_mgr.cluster_scheduler_queue(candidate_queue_name) if self.candidate_queue is None: self.log.warning( f"Queue: {candidate_queue_name} not found in cluster." "Availability check will not be performed" ) return self.candidate_partition = self.resource_mgr.cluster_queue_partition( self.candidate_queue, node_label ) if self.candidate_partition is None: self.log.debug( f"Partition: {node_label} not found in {candidate_queue_name} queue." "Availability check will not be performed" ) return self.log.debug( f"Checking endpoint: {self.yarn_endpoint} if partition: {self.candidate_partition} " f"has used capacity <= {partition_availability_threshold}%" ) yarn_available = self.resource_mgr.cluster_scheduler_queue_availability( self.candidate_partition, partition_availability_threshold ) if not yarn_available: self.log.debug( "Retrying for {} ms since resources are not available".format( self.yarn_resource_check_wait_time ) ) while not yarn_available: self.handle_yarn_queue_timeout() yarn_available = self.resource_mgr.cluster_scheduler_queue_availability( self.candidate_partition, partition_availability_threshold ) # subtracting the total amount of time spent for polling for queue availability self.kernel_launch_timeout -= RemoteProcessProxy.get_time_diff( self.start_time, RemoteProcessProxy.get_current_time() ) def handle_yarn_queue_timeout(self) -> None: """Handle a yarn queue timeout.""" time.sleep(poll_interval) time_interval = RemoteProcessProxy.get_time_diff( self.start_time, RemoteProcessProxy.get_current_time() ) if time_interval > self.yarn_resource_check_wait_time: error_http_code = 500 reason = "Yarn Compute Resource is unavailable after {} seconds".format( self.yarn_resource_check_wait_time ) self.log_and_raise(http_status_code=error_http_code, reason=reason) def poll(self) -> bool | None: """Submitting a new kernel/app to YARN will take a while to be ACCEPTED. Thus application ID will probably not be available immediately for poll. So will regard the application as RUNNING when application ID still in ACCEPTED or SUBMITTED state. :return: None if the application's ID is available and state is ACCEPTED/SUBMITTED/RUNNING. Otherwise False. """ result = False if self._get_application_id(): state = self._query_app_state_by_id(self.application_id) if state in YarnClusterProcessProxy.initial_states: result = None # The following produces too much output (every 3 seconds by default), so commented-out at this time. # self.log.debug("YarnProcessProxy.poll, application ID: {}, kernel ID: {}, state: {}". # format(self.application_id, self.kernel_id, state)) return result def send_signal(self, signum: int) -> bool | None: """Currently only support 0 as poll and other as kill. :param signum :return: """ if signum == 0: return self.poll() elif signum == signal.SIGKILL: return self.kill() else: # Yarn api doesn't support the equivalent to interrupts, so take our chances # via a remote signal. Note that this condition cannot check against the # signum value because altternate interrupt signals might be in play. return super().send_signal(signum) def kill(self) -> bool | None: """Kill a kernel. :return: None if the application existed and is not in RUNNING state, False otherwise. """ state = None result = False if self._get_application_id(): self._kill_app_by_id(self.application_id) # Check that state has moved to a final state (most likely KILLED) i = 1 state = self._query_app_state_by_id(self.application_id) while state not in YarnClusterProcessProxy.final_states and i <= max_poll_attempts: time.sleep(poll_interval) state = self._query_app_state_by_id(self.application_id) i = i + 1 if state in YarnClusterProcessProxy.final_states: result = None if result is False: # We couldn't terminate via Yarn, try remote signal result = super().kill() self.log.debug( "YarnClusterProcessProxy.kill, application ID: {}, kernel ID: {}, state: {}, result: {}".format( self.application_id, self.kernel_id, state, result ) ) return result def cleanup(self) -> None: """Clean up the proxy""" # we might have a defunct process (if using waitAppCompletion = false) - so poll, kill, wait when we have # a local_proc. if self.local_proc: self.log.debug( "YarnClusterProcessProxy.cleanup: Clearing possible defunct process, pid={}...".format( self.local_proc.pid ) ) if super().poll(): super().kill() super().wait() self.local_proc = None # reset application id to force new query - handles kernel restarts/interrupts self.application_id = None # for cleanup, we should call the superclass last super().cleanup() async def confirm_remote_startup(self) -> None: """Confirms the yarn application is in a started state before returning. Should post-RUNNING states be unexpectedly encountered (FINISHED, KILLED, FAILED) then we must throw, otherwise the rest of the gateway will believe its talking to a valid kernel. """ self.start_time = RemoteProcessProxy.get_current_time() i = 0 ready_to_connect = False # we're ready to connect when we have a connection file to use while not ready_to_connect: i += 1 await self.handle_timeout() if self._get_application_id(True): # Once we have an application ID, start monitoring state, obtain assigned host and get connection info app_state = self._get_application_state() if app_state in YarnClusterProcessProxy.final_states: error_message = ( "KernelID: '{}', ApplicationID: '{}' unexpectedly found in state '{}'" " during kernel startup!".format( self.kernel_id, self.application_id, app_state ) ) self.log_and_raise(http_status_code=500, reason=error_message) self.log.debug( "{}: State: '{}', Host: '{}', KernelID: '{}', ApplicationID: '{}'".format( i, app_state, self.assigned_host, self.kernel_id, self.application_id ) ) if self.assigned_host: ready_to_connect = await self.receive_connection_info() else: self.detect_launch_failure() async def handle_timeout(self) -> None: """Checks to see if the kernel launch timeout has been exceeded while awaiting connection info.""" await asyncio.sleep(poll_interval) time_interval = RemoteProcessProxy.get_time_diff( self.start_time, RemoteProcessProxy.get_current_time() ) if time_interval > self.kernel_launch_timeout: reason = ( "Application ID is None. Failed to submit a new application to YARN within {} seconds. " "Check Enterprise Gateway log for more information.".format( self.kernel_launch_timeout ) ) error_http_code = 500 if self._get_application_id(True): if self._query_app_state_by_id(self.application_id) != "RUNNING": reason = ( "YARN resources unavailable after {} seconds for app {}, launch timeout: {}! " "Check YARN configuration.".format( time_interval, self.application_id, self.kernel_launch_timeout ) ) error_http_code = 503 else: reason = ( "App {} is RUNNING, but waited too long ({} secs) to get connection file. " "Check YARN logs for more information.".format( self.application_id, self.kernel_launch_timeout ) ) await asyncio.get_event_loop().run_in_executor(None, self.kill) timeout_message = f"KernelID: '{self.kernel_id}' launch timeout due to: {reason}" self.log_and_raise(http_status_code=error_http_code, reason=timeout_message) def get_process_info(self) -> dict[str, Any]: """Captures the base information necessary for kernel persistence relative to YARN clusters.""" process_info = super().get_process_info() process_info.update({"application_id": self.application_id}) return process_info def load_process_info(self, process_info: dict[str, Any]) -> None: """Loads the base information necessary for kernel persistence relative to YARN clusters.""" super().load_process_info(process_info) self.application_id = process_info["application_id"] def _get_application_state(self) -> str: # Gets the current application state using the application_id already obtained. Once the assigned host # has been identified, 'amHostHttpAddress' is nolonger accessed. app_state = self.last_known_state app = self._query_app_by_id(self.application_id) if app: if app.get("state"): app_state = app.get("state") self.last_known_state = app_state if not self.assigned_host and app.get("amHostHttpAddress"): self.assigned_host = app.get("amHostHttpAddress").split(":")[0] # Set the kernel manager ip to the actual host where the application landed. self.assigned_ip = socket.gethostbyname(self.assigned_host) return app_state def _get_application_id(self, ignore_final_states: bool = False) -> str: # Return the kernel's YARN application ID if available, otherwise None. If we're obtaining application_id # from scratch, do not consider kernels in final states. if not self.application_id: app = self._query_app_by_name(self.kernel_id) state_condition = True if isinstance(app, dict): state = app.get("state") self.last_known_state = state if ignore_final_states: state_condition = state not in YarnClusterProcessProxy.final_states if len(app.get("id", "")) > 0 and state_condition: self.application_id = app["id"] time_interval = RemoteProcessProxy.get_time_diff( self.start_time, RemoteProcessProxy.get_current_time() ) self.log.info( "ApplicationID: '{}' assigned for KernelID: '{}', " "state: {}, {} seconds after starting.".format( app["id"], self.kernel_id, state, time_interval ) ) if not self.application_id: self.log.debug( f"ApplicationID not yet assigned for KernelID: '{self.kernel_id}' - retrying..." ) return self.application_id def _query_app_by_name(self, kernel_id: str) -> dict: """Retrieve application by using kernel_id as the unique app name. With the started_time_begin as a parameter to filter applications started earlier than the target one from YARN. When submit a new app, it may take a while for YARN to accept and run and generate the application ID. Note: if a kernel restarts with the same kernel id as app name, multiple applications will be returned. For now, the app/kernel with the top most application ID will be returned as the target app, assuming the app ID will be incremented automatically on the YARN side. :param kernel_id: as the unique app name for query :return: The JSON object of an application. """ top_most_app_id = "" target_app = None try: response = self.resource_mgr.cluster_applications( started_time_begin=str(self.start_time) ) except OSError as sock_err: if sock_err.errno == errno.ECONNREFUSED: self.log.warning( "YARN RM address: '{}' refused the connection. Is the resource manager running?".format( self.rm_addr ) ) else: self.log.warning( "Query for kernel ID '{}' failed with exception: {} - '{}'. Continuing...".format( kernel_id, type(sock_err), sock_err ) ) except Exception as e: self.log.warning( "Query for kernel ID '{}' failed with exception: {} - '{}'. Continuing...".format( kernel_id, type(e), e ) ) else: data = response.data if ( isinstance(data, dict) and isinstance(data.get("apps"), dict) and "app" in data.get("apps") ): for app in data["apps"]["app"]: if app.get("name", "").find(kernel_id) >= 0 and app.get("id") > top_most_app_id: target_app = app top_most_app_id = app.get("id") return target_app def _query_app_by_id(self, app_id: str) -> dict: """Retrieve an application by application ID. :param app_id :return: The JSON object of an application. """ app = None try: response = self.resource_mgr.cluster_application(application_id=app_id) except Exception as e: self.log.warning( f"Query for application ID '{app_id}' failed with exception: '{e}'. Continuing..." ) else: data = response.data if isinstance(data, dict) and "app" in data: app = data["app"] return app def _query_app_state_by_id(self, app_id: str) -> str: """Return the state of an application. If a failure occurs, the last known state is returned. :param app_id: :return: application state (str) """ state = self.last_known_state try: response = self.resource_mgr.cluster_application_state(application_id=app_id) except Exception as e: self.log.warning( f"Query for application '{app_id}' state failed with exception: '{e}'. " f"Continuing with last known state = '{state}'..." ) else: state = response.data["state"] self.last_known_state = state return state def _kill_app_by_id(self, app_id: str) -> Response: """Kill an application. If the app's state is FINISHED or FAILED, it won't be changed to KILLED. :param app_id :return: The JSON response of killing the application. """ response = None try: response = self.resource_mgr.cluster_application_kill(application_id=app_id) except Exception as e: self.log.warning( f"Termination of application '{app_id}' failed with exception: '{e}'. Continuing..." ) return response ================================================ FILE: enterprise_gateway/services/sessions/__init__.py ================================================ ================================================ FILE: enterprise_gateway/services/sessions/handlers.py ================================================ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. """Tornado handlers for session CRUD.""" from typing import List import jupyter_server.services.sessions.handlers as jupyter_server_handlers import tornado from jupyter_server.utils import ensure_async from ...mixins import CORSMixin, JSONErrorsMixin, TokenAuthorizationMixin class SessionRootHandler( TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, jupyter_server_handlers.SessionRootHandler ): """Extends the jupyter_server root session handler with token auth, CORS, and JSON errors. """ async def get(self) -> None: """Overrides the super class method to honor the kernel listing configuration setting. Raises ------ tornado.web.HTTPError If eg_list_kernels is False, respond with 403 Forbidden """ if "eg_list_kernels" not in self.settings or not self.settings["eg_list_kernels"]: raise tornado.web.HTTPError(403, "Forbidden") else: await ensure_async(super().get()) default_handlers: List[tuple] = [] for path, cls in jupyter_server_handlers.default_handlers: if cls.__name__ in globals(): # Use the same named class from here if it exists default_handlers.append((path, globals()[cls.__name__])) else: # Everything should have CORS and token auth bases = (TokenAuthorizationMixin, CORSMixin, cls) default_handlers.append((path, type(cls.__name__, bases, {}))) ================================================ FILE: enterprise_gateway/services/sessions/kernelsessionmanager.py ================================================ """Session manager that keeps all its metadata in memory.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations import getpass import json import os import threading import requests from jupyter_core.paths import jupyter_data_dir from requests.auth import HTTPBasicAuth, HTTPDigestAuth from traitlets import Bool, CaselessStrEnum, Unicode, default from traitlets.config.configurable import LoggingConfigurable kernels_lock = threading.Lock() # These will be located under the `persistence_root` and exist # to make integration with ContentsManager implementations easier. KERNEL_SESSIONS_DIR_NAME = "kernel_sessions" class KernelSessionManager(LoggingConfigurable): """ KernelSessionManager is used to save and load kernel sessions from persistent storage. KernelSessionManager provides the basis for an HA solution. It loads the complete set of persisted kernel sessions during construction. Following construction the parent object calls start_sessions to allow Enterprise Gateway to validate that all loaded sessions are still valid. Those that it cannot 'revive' are marked for deletion and the in-memory dictionary is updated - and the entire collection is written to store (file or database). As kernels are created and destroyed, the KernelSessionManager is called upon to keep kernel session state consistent. NOTE: This class is essentially an abstract base class that requires its `load_sessions` and `save_sessions` have implementations in subclasses. abc.MetaABC is not used due to conflicts with derivation of LoggingConfigurable - which seemed more important. """ # Session Persistence session_persistence_env = "EG_KERNEL_SESSION_PERSISTENCE" session_persistence_default_value = False enable_persistence = Bool( session_persistence_default_value, config=True, help="""Enable kernel session persistence (True or False). Default = False (EG_KERNEL_SESSION_PERSISTENCE env var)""", ) @default("enable_persistence") def _session_persistence_default(self) -> bool: return bool( os.getenv( self.session_persistence_env, str(self.session_persistence_default_value) ).lower() == "true" ) # Persistence root persistence_root_env = "EG_PERSISTENCE_ROOT" persistence_root = Unicode( config=True, help="""Identifies the root 'directory' under which the 'kernel_sessions' node will reside. This directory should exist. (EG_PERSISTENCE_ROOT env var)""", ) @default("persistence_root") def _persistence_root_default(self) -> str: return os.getenv(self.persistence_root_env, "/") def __init__(self, kernel_manager: RemoteMappingKernelManager, **kwargs): # noqa: F821 """Initialize the manager.""" super().__init__(**kwargs) self.kernel_manager = kernel_manager self._sessions = {} self._sessionsByUser = {} def create_session(self, kernel_id: str, **kwargs) -> None: """ Creates a session associated with this kernel. All items associated with the active kernel's state are saved. Parameters ---------- kernel_id : str The uuid string associated with the active kernel **kwargs : optional Information used for the launch of the kernel """ km = self.kernel_manager.get_kernel(kernel_id) # Compose the kernel_session entry kernel_session = {} kernel_session["kernel_id"] = kernel_id kernel_session["username"] = KernelSessionManager.get_kernel_username(**kwargs) kernel_session["kernel_name"] = km.kernel_name # Build the inner dictionaries: connection_info, process_proxy and add to kernel_session kernel_session["connection_info"] = km.get_connection_info() kernel_session["launch_args"] = kwargs.copy() kernel_session["process_info"] = ( km.process_proxy.get_process_info() if km.process_proxy else {} ) self._save_session(kernel_id, kernel_session) def refresh_session(self, kernel_id: str) -> None: """ Refreshes the session from its persisted state. Called on kernel restarts. """ self.log.debug(f"Refreshing kernel session for id: {kernel_id}") km = self.kernel_manager.get_kernel(kernel_id) # Compose the kernel_session entry kernel_session = self._sessions[kernel_id] # Build the inner dictionaries: connection_info, process_proxy and add to kernel_session kernel_session["connection_info"] = km.get_connection_info() kernel_session["process_info"] = ( km.process_proxy.get_process_info() if km.process_proxy else {} ) self._save_session(kernel_id, kernel_session) def _save_session(self, kernel_id: str, kernel_session: dict) -> None: # Write/commit the addition, update dictionary kernels_lock.acquire() try: self._sessions[kernel_id] = kernel_session username = kernel_session["username"] if username not in self._sessionsByUser: self._sessionsByUser[username] = [] self._sessionsByUser[username].append(kernel_id) else: # Only append if not there yet (e.g. restarts will be there already) if kernel_id not in self._sessionsByUser[username]: self._sessionsByUser[username].append(kernel_id) self.save_session(kernel_id) # persist changes in file/DB etc. finally: kernels_lock.release() def start_session(self, kernel_id: str) -> bool | None: """Start a session for a given kernel.""" kernel_session = self._sessions.get(kernel_id, None) if kernel_session is not None: return self._start_session(kernel_session) return None def start_sessions(self) -> None: """ Attempt to start persisted sessions. Determines if session startup was successful. If unsuccessful, the session is removed from persistent storage. """ if self.enable_persistence: self.load_sessions() sessions_to_remove = [] for kernel_id, kernel_session in self._sessions.items(): self.log.info( "Attempting startup of persisted kernel session for id: %s..." % kernel_id ) if self._start_session(kernel_session): self.log.info( "Startup of persisted kernel session for id '{}' was successful. Client should " "reconnect kernel.".format(kernel_id) ) else: sessions_to_remove.append(kernel_id) self.log.warning( "Startup of persisted kernel session for id '{}' was not successful. Check if " "client is still active and restart kernel.".format(kernel_id) ) self._delete_sessions(sessions_to_remove) def _start_session(self, kernel_session: dict) -> bool: # Attempt to start kernel from persisted state. if started, record kernel_session in dictionary # else delete session kernel_id = kernel_session["kernel_id"] kernel_started = self.kernel_manager.start_kernel_from_session( kernel_id=kernel_id, kernel_name=kernel_session["kernel_name"], connection_info=kernel_session["connection_info"], process_info=kernel_session["process_info"], launch_args=kernel_session["launch_args"], ) if not kernel_started: return False return True def delete_session(self, kernel_id: str) -> None: """ Removes saved session associated with kernel_id from dictionary and persisted storage. """ self._delete_sessions([kernel_id]) if self.enable_persistence: self.log.info("Deleted persisted kernel session for id: %s" % kernel_id) def _delete_sessions(self, kernel_ids: list[str]) -> None: # Remove unstarted sessions and rewrite kernels_lock.acquire() try: for kernel_id in kernel_ids: # Prior to removing session, update the per User list kernel_session = self._sessions.get(kernel_id, None) if kernel_session is not None: username = kernel_session["username"] if ( username in self._sessionsByUser and kernel_id in self._sessionsByUser[username] ): self._sessionsByUser[username].remove(kernel_id) self._sessions.pop(kernel_id, None) self.delete_sessions(kernel_ids) finally: kernels_lock.release() @staticmethod def pre_save_transformation(session: dict) -> dict: """Handle a pre_save for a session.""" kernel_id = next(iter(session.keys())) session_info = session[kernel_id] if session_info.get("connection_info"): info = session_info["connection_info"] key = info.get("key") if key: info["key"] = key.decode("utf8") return session @staticmethod def post_load_transformation(session: dict) -> dict: """Handle a post_load for a session.""" kernel_id = next(iter(session.keys())) session_info = session[kernel_id] if session_info.get("connection_info"): info = session_info["connection_info"] key = info.get("key") if key: info["key"] = key.encode("utf8") return session # abstractmethod def load_sessions(self) -> None: """ Load and initialize _sessions member from persistent storage. This method is called from start_sessions(). """ msg = "KernelSessionManager.load_sessions() requires an implementation!" raise NotImplementedError(msg) # abstractmethod def load_session(self, kernel_id: str) -> None: """ Load and initialize _sessions member from persistent storage for a single kernel. This method is called from refresh_sessions(). """ msg = "KernelSessionManager.load_session() requires an implementation!" raise NotImplementedError(msg) # abstractmethod def delete_sessions(self, kernel_ids: list[str]) -> None: """ Delete the sessions in persistent storage. Caller is responsible for synchronizing call. """ msg = "KernelSessionManager.delete_sessions(kernel_ids) requires an implementation!" raise NotImplementedError(msg) def save_session(self, kernel_id: str) -> None: """ Saves the sessions dictionary to persistent store. Caller is responsible for synchronizing call. """ msg = "KernelSessionManager.save_session(kernel_id) requires an implementation!" raise NotImplementedError(msg) def active_sessions(self, username: str) -> int: """ Returns the number of active sessions for the given username. Parameters ---------- username : str The username associated with the active session Returns ------- int corresponding to the number of active sessions associated with given user """ if username in self._sessionsByUser: return len(self._sessionsByUser[username]) return 0 @staticmethod def get_kernel_username(**kwargs) -> str: """ Returns the kernel's logical username from env dict. Checks the process env for KERNEL_USERNAME. If set, that value is returned, else KERNEL_USERNAME is initialized to the current user and that value is returned. Parameters ---------- kwargs : dict from which request env is accessed. Returns ------- str indicating kernel username """ # Get the env env_dict = kwargs.get("env", {}) # Ensure KERNEL_USERNAME is set kernel_username = env_dict.get("KERNEL_USERNAME") if kernel_username is None: kernel_username = getpass.getuser() env_dict["KERNEL_USERNAME"] = kernel_username return kernel_username class FileKernelSessionManager(KernelSessionManager): """ Performs kernel session persistence operations against the file `sessions.json` located in the kernel_sessions directory in the directory pointed to by the persistence_root parameter (default JUPYTER_DATA_DIR). """ # Change the default to Jupyter Data Dir. @default("persistence_root") def _persistence_root_default(self) -> str: return os.getenv(self.persistence_root_env, jupyter_data_dir()) def __init__(self, kernel_manager: RemoteMappingKernelManager, **kwargs): # noqa: F821 """Initialize the manager.""" super().__init__(kernel_manager, **kwargs) if self.enable_persistence: self.log.info(f"Kernel session persistence location: {self._get_sessions_loc()}") def delete_sessions(self, kernel_ids: list[str]) -> None: """Delete the sessions for a list of kernels.""" if self.enable_persistence: for kernel_id in kernel_ids: kernel_file_name = "".join([kernel_id, ".json"]) kernel_session_file_path = os.path.join(self._get_sessions_loc(), kernel_file_name) if os.path.exists(kernel_session_file_path): os.remove(kernel_session_file_path) def save_session(self, kernel_id: str) -> None: """Save the session for a kernel.""" if self.enable_persistence and kernel_id is not None: kernel_file_name = "".join([kernel_id, ".json"]) kernel_session_file_path = os.path.join(self._get_sessions_loc(), kernel_file_name) temp_session = {} temp_session[kernel_id] = self._sessions[kernel_id] with open(kernel_session_file_path, "w") as fp: json.dump(KernelSessionManager.pre_save_transformation(temp_session), fp) def load_sessions(self) -> None: """Load the sessions.""" if self.enable_persistence: kernel_session_files = [ json_files for json_files in os.listdir(self._get_sessions_loc()) if json_files.endswith(".json") ] for kernel_session_file in kernel_session_files: self._load_session_from_file(kernel_session_file) def load_session(self, kernel_id: str) -> None: """Load the session for a kernel.""" if self.enable_persistence and kernel_id is not None: kernel_session_file = "".join([kernel_id, ".json"]) self._load_session_from_file(kernel_session_file) def _load_session_from_file(self, file_name: str) -> None: kernel_session_file_path = os.path.join(self._get_sessions_loc(), file_name) if os.path.exists(kernel_session_file_path): self.log.debug(f"Loading saved session(s) from {kernel_session_file_path}") try: with open(kernel_session_file_path) as fp: self._sessions.update( KernelSessionManager.post_load_transformation(json.load(fp)) ) except json.JSONDecodeError as e: self.log.error( f"Failed to load session from {kernel_session_file_path}: Invalid JSON - {e}" ) except Exception as e: self.log.error( f"Failed to load session from {kernel_session_file_path}: {type(e).__name__} - {e}" ) def _get_sessions_loc(self) -> str: path = os.path.join(self.persistence_root, KERNEL_SESSIONS_DIR_NAME) if not os.path.exists(path): os.makedirs(path, 0o755) return path class WebhookKernelSessionManager(KernelSessionManager): """ Performs kernel session persistence operations against URL provided (EG_WEBHOOK_URL). The URL must have 4 endpoints associated with it. 1 delete endpoint that takes a list of kernel ids in the body, 1 post endpoint that takes kernels id as a url param and the kernel session as the body, 1 get endpoint that returns all kernel sessions, and 1 get endpoint that returns a specific kernel session based on kernel id as url param. """ # Webhook URL webhook_url_env = "EG_WEBHOOK_URL" webhook_url = Unicode( config=True, allow_none=True, help="""URL endpoint for webhook kernel session manager""", ) @default("webhook_url") def _webhook_url_default(self) -> str | None: return os.getenv(self.webhook_url_env, None) # Webhook Username webhook_username_env = "EG_WEBHOOK_USERNAME" webhook_username = Unicode( config=True, allow_none=True, help="""Username for webhook kernel session manager API auth""", ) @default("webhook_username") def _webhook_username_default(self) -> str | None: return os.getenv(self.webhook_username_env, None) # Webhook Password webhook_password_env = "EG_WEBHOOK_PASSWORD" # noqa webhook_password = Unicode( config=True, allow_none=True, help="""Password for webhook kernel session manager API auth""", ) @default("webhook_password") def _webhook_password_default(self) -> str | None: return os.getenv(self.webhook_password_env, None) # Auth Type auth_type_env = "EG_AUTH_TYPE" auth_type = CaselessStrEnum( config=True, allow_none=True, values=["basic", "digest"], help="""Authentication type for webhook kernel session manager API. Either basic, digest or None""", ) @default("auth_type") def _auth_type_default(self) -> str | None: return os.getenv(self.auth_type_env, None) def __init__(self, kernel_manager: RemoteMappingKernelManager, **kwargs): # noqa: F821 """Initialize the manager.""" super().__init__(kernel_manager, **kwargs) if self.enable_persistence: self.log.info("Webhook kernel session persistence activated") self.auth = "" if self.auth_type: if self.webhook_username and self.webhook_password: if self.auth_type == "basic": self.auth = HTTPBasicAuth(self.webhook_username, self.webhook_password) elif self.auth_type == "digest": self.auth = HTTPDigestAuth(self.webhook_username, self.webhook_password) elif self.auth_type is None: self.auth = "" else: self.log.error("No such option for auth_type/EG_AUTH_TYPE") else: self.log.error("Username and/or password aren't set") def delete_sessions(self, kernel_ids: list[str]) -> None: """ Deletes kernel sessions from database :param list of strings kernel_ids: A list of kernel ids """ if self.enable_persistence: response = requests.delete( self.webhook_url, auth=self.auth, json=kernel_ids, timeout=60 ) self.log.debug(f"Webhook kernel session deleting: {kernel_ids}") if response.status_code != 204: self.log.error(response.raise_for_status()) def save_session(self, kernel_id: str) -> None: """ Saves kernel session to database :param string kernel_id: A kernel id """ if self.enable_persistence and kernel_id is not None: temp_session = {} temp_session[kernel_id] = self._sessions[kernel_id] body = KernelSessionManager.pre_save_transformation(temp_session) response = requests.post( f"{self.webhook_url}/{kernel_id}", auth=self.auth, json=body, timeout=60 ) self.log.debug(f"Webhook kernel session saving: {kernel_id}") if response.status_code != 204: self.log.error(response.raise_for_status()) def load_sessions(self) -> None: """ Loads kernel sessions from database """ if self.enable_persistence: response = requests.get(self.webhook_url, auth=self.auth, timeout=60) if response.status_code == 200: kernel_sessions = response.json() for kernel_session in kernel_sessions: self._load_session_from_response(kernel_session) else: self.log.error(response.raise_for_status()) def load_session(self, kernel_id: str) -> None: """ Loads a kernel session from database :param string kernel_id: A kernel id """ if self.enable_persistence and kernel_id is not None: response = requests.get(f"{self.webhook_url}/{kernel_id}", auth=self.auth, timeout=60) if response.status_code == 200: kernel_session = response.json() self._load_session_from_response(kernel_session) else: self.log.error(response.raise_for_status()) def _load_session_from_response(self, kernel_session: dict) -> None: """ Loads kernel session to current session :param dictionary kernel_session: Kernel session information """ self.log.debug("Loading saved session(s)") self._sessions.update( KernelSessionManager.post_load_transformation(kernel_session["kernel_session"]) ) ================================================ FILE: enterprise_gateway/services/sessions/sessionmanager.py ================================================ """Session manager that keeps all its metadata in memory.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import uuid from typing import Any, Hashable, List, Optional from tornado import web from traitlets.config.configurable import LoggingConfigurable from enterprise_gateway.services.kernels.remotemanager import RemoteMappingKernelManager class SessionManager(LoggingConfigurable): """Simple implementation of the SessionManager interface that allows clients to associate basic metadata with a kernel. Parameters ---------- kernel_manager : RemoteMappingKernelManager Used to start a kernel when creating a session Attributes ---------- kernel_manager : RemoteMappingKernelManager Used to start a kernel when creating a session _sessions : list Sessions _columns : list Session metadata key names """ def __init__(self, kernel_manager: RemoteMappingKernelManager, *args, **kwargs): """Initialize the session manager.""" super().__init__(*args, **kwargs) self.kernel_manager = kernel_manager self._sessions = [] self._columns = ["session_id", "path", "kernel_id"] def session_exists(self, path: str, *args, **kwargs) -> bool: """Checks to see if the session with the given path value exists. Parameters ---------- path : str Session path value to search on Returns ------- bool """ return bool([item for item in self._sessions if item["path"] == path]) def new_session_id(self) -> str: """Creates a uuid for a new session.""" return str(uuid.uuid4()) async def create_session( self, path: Optional[str] = None, kernel_name: Optional[str] = None, kernel_id: Optional[str] = None, *args, **kwargs, ) -> dict: """Creates a session and returns its model. Launches a kernel and stores the session metadata for later lookup. Parameters ---------- path : str Path value to store in the session metadata kernel_name : str Kernel spec name kernel_id : str Existing kernel ID to bind to the session (unsupported) Returns ------- dict Session model """ session_id = self.new_session_id() # allow nbm to specify kernels cwd kernel_id = await self.kernel_manager.start_kernel(path=path, kernel_name=kernel_name) return self.save_session(session_id, path=path, kernel_id=kernel_id) def save_session( self, session_id: str, path: Optional[str] = None, kernel_id: Optional[str] = None, *args, **kwargs, ) -> dict: """Saves the metadata for the session with the given `session_id`. Given a `session_id` (and any other of the arguments), this method appends a dictionary to the in-memory list of sessions. Parameters ---------- session_id : str UUID for the session; this method must be given a session_id path : str Path for the given notebook kernel_id : str ID for the kernel associated with this session Returns ------- dict Session model with `session_id`, `path`, and `kernel_id` keys """ self._sessions.append({"session_id": session_id, "path": path, "kernel_id": kernel_id}) return self.get_session(session_id=session_id) def get_session_by_key(self, key: Hashable, val: Any, *args, **kwargs) -> Optional[dict]: """Gets the first session with the given key/value pair. Parameters ---------- key : hashable Session metadata key to match value : any Session metadata value to match Returns ------- dict Matching session model or None if not found """ s = [item for item in self._sessions if item[key] == val] return None if not s else s[0] def get_session(self, **kwargs) -> dict: """Returns the model for a particular session. Takes a keyword argument and searches for the value in the in-memory session store. Returns the entire session model. Parameters ---------- **kwargs : keyword argument One of the key/value pairs from `_columns` Raises ------ TypeError If there are no kwargs or none of them match a key/column used in the metadata tornado.web.HTTPError 404 Not Found if no session matches the provided metadata Returns ------- model : dict All the information from the session described by the kwarg """ if not kwargs: msg = "Must specify a column to query" raise TypeError(msg) for param in kwargs: if param not in self._columns: msg = f"No such column: {param}" raise TypeError(msg) # multiple columns are never passed into kwargs so just using the # first and only one. column = next(iter(kwargs.keys())) row = self.get_session_by_key(column, kwargs[column]) if not row: raise web.HTTPError(404, "Session not found: %s" % kwargs[column]) return self.row_to_model(row) def update_session(self, session_id: str, *args, **kwargs) -> None: """Updates the values in the session store. Update the values of the session model with the given `session_id` with the values from the keyword arguments. Parameters ---------- session_id : str UUID that identifies a session in the sqlite3 database **kwargs : str Key/value pairs to store Raises ------ KeyError If no session matches the given `session_id` """ if not kwargs: # no changes return row = self.get_session_by_key("session_id", session_id) if not row: raise KeyError self._sessions.remove(row) if "path" in kwargs: row["path"] = kwargs["path"] if "kernel_id" in kwargs: row["kernel_id"] = kwargs["kernel_id"] self._sessions.append(row) def row_to_model(self, row: dict, *args, **kwargs) -> dict: """Turns a "row" in the in-memory session store into a model dictionary. Parameters ---------- row : dict Maps `id` to `session_id`, `notebook` to a dict containing the `path`, and `kernel` to the kernel model looked up using the `kernel_id` """ if row["kernel_id"] not in self.kernel_manager: # The kernel was killed or died without deleting the session. # We can't use delete_session here because that tries to find # and shut down the kernel. self._sessions.remove(row) raise KeyError model = { "id": row["session_id"], "notebook": {"path": row["path"]}, "kernel": self.kernel_manager.kernel_model(row["kernel_id"]), } return model def list_sessions(self, *args, **kwargs) -> List[dict]: """Returns a list of dictionaries containing all the information from the session store. Returns ------- list Dictionaries from `row_to_model` """ return [self.row_to_model(r) for r in self._sessions] async def delete_session(self, session_id: str, *args, **kwargs) -> None: """Deletes the session in the session store with given `session_id`. Raises ------ KeyError If the `session_id` is not in the store """ # Check that session exists before deleting s = self.get_session_by_key("session_id", session_id) if not s: raise KeyError await self.kernel_manager.shutdown_kernel(s["kernel_id"]) self._sessions.remove(s) ================================================ FILE: enterprise_gateway/tests/__init__.py ================================================ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from tornado import ioloop def teardown(): """The test fixture appears to leak something on certain platforms that endlessly tries an async socket connect and fails after the tests end. As a stopgap, force a cleanup here. """ ioloop.IOLoop.current().stop() # Close is not necessary since process termination closes the loop. This was causing intermittent # `Event loop is closed` exceptions. These didn't affect the test resutls, but produced output that # was otherwise misleading noise. # ioloop.IOLoop.current().close(True) ================================================ FILE: enterprise_gateway/tests/resources/failing_code2.ipynb ================================================ { "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "import not-a-real-module" ] } ], "metadata": { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "2.7.10" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: enterprise_gateway/tests/resources/failing_code3.ipynb ================================================ { "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "import not-a-real-module" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.4.3" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: enterprise_gateway/tests/resources/kernel_api2.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# API Creation \n", "This notebook is a sample of how to author a REST API in the notebook environment." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import os, json" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "hello_message = 'hello {}'\n", "people = ['Corey', 'Nitin', 'Pete']" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Hello" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /hello\n", "print(hello_message.format('world'))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'args': { \n", " 'person' : people\n", " }\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /hello/person\n", "req = json.loads(REQUEST)\n", "hello_person = req['args']['person'][0]\n", "print(hello_message.format(hello_person))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'args': { \n", " 'person' : people\n", " }\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /hello/persons\n", "req = json.loads(REQUEST)\n", "hello_persons = req['args']['person']\n", "print(hello_message.format(', '.join(hello_persons)))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /hello/people\n", "hello_people = people\n", "print(hello_message.format(', '.join(hello_people)))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'path' : {\n", " 'person' : 'test_person'\n", " }\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /hello/:person\n", "req = json.loads(REQUEST)\n", "print(hello_message.format(req['path']['person']))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Messages" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /message\n", "print(hello_message)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'body' : 'test value'\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# PUT /message\n", "req = json.loads(REQUEST)\n", "hello_message = req['body']\n", "print(hello_message)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## People" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /people\n", "print(json.dumps(people))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'body' : ['Rick', 'Maggie', 'Glenn', 'Carol', 'Daryl']\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# POST /people\n", "req = json.loads(REQUEST)\n", "people = req['body']\n", "print(json.dumps(people))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'body' : 'Michonne'\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# PUT /people\n", "req = json.loads(REQUEST)\n", "people.append(req['body'])\n", "print(json.dumps(people))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'path' : {\n", " 'index' : 1\n", " }\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# DELETE /people/:index\n", "req = json.loads(REQUEST)\n", "people.remove(people[int(req['path']['index'])])\n", "print(json.dumps(people))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Error" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /error\n", "this cell should print an error in the reponse" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import sys" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /stderr\n", "print 'I am text on stdout'\n", "print >> sys.stderr, 'I am text on stderr'" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Misc" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'path' : {\n", " 'time': 1\n", " }\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /sleep/:time\n", "req = json.loads(REQUEST)\n", "from time import sleep\n", "sleep_time = int(req['path']['time'])\n", "sleep(sleep_time)\n", "print(\"Slept for {} seconds\".format(sleep_time))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /execute_result\n", "1+1" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'headers' : {\n", " 'Content-Type': 'application/json'\n", " }\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /content-type\n", "req = json.loads(REQUEST)\n", "print(req['headers']['Content-Type'])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# GET /multi\n", "x = 1" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /multi\n", "print('x is {}'.format(x))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /env_kernel_gateway\n", "print('KERNEL_GATEWAY is {}'.format(os.getenv('KERNEL_GATEWAY')))" ] } ], "metadata": { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "2.7.10" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: enterprise_gateway/tests/resources/kernel_api3.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# API Creation \n", "This notebook is a sample of how to author a REST API in the notebook environment." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import os, json" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "hello_message = 'hello {}'\n", "people = ['Corey', 'Nitin', 'Pete']" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Hello" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /hello\n", "print(hello_message.format('world'))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'args': { \n", " 'person' : people\n", " }\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /hello/person\n", "req = json.loads(REQUEST)\n", "hello_person = req['args']['person'][0]\n", "print(hello_message.format(hello_person))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'args': { \n", " 'person' : people\n", " }\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /hello/persons\n", "req = json.loads(REQUEST)\n", "hello_persons = req['args']['person']\n", "print(hello_message.format(', '.join(hello_persons)))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /hello/people\n", "hello_people = people\n", "print(hello_message.format(', '.join(hello_people)))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'path' : {\n", " 'person' : 'test_person'\n", " }\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /hello/:person\n", "req = json.loads(REQUEST)\n", "print(hello_message.format(req['path']['person']))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Messages" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /message\n", "print(hello_message)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'body' : 'test value'\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# PUT /message\n", "req = json.loads(REQUEST)\n", "hello_message = req['body']\n", "print(hello_message)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## People" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /people\n", "print(json.dumps(people))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'body' : ['Rick', 'Maggie', 'Glenn', 'Carol', 'Daryl']\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# POST /people\n", "req = json.loads(REQUEST)\n", "people = req['body']\n", "print(json.dumps(people))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'body' : 'Michonne'\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# PUT /people\n", "req = json.loads(REQUEST)\n", "people.append(req['body'])\n", "print(json.dumps(people))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'path' : {\n", " 'index' : 1\n", " }\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# DELETE /people/:index\n", "req = json.loads(REQUEST)\n", "people.remove(people[int(req['path']['index'])])\n", "print(json.dumps(people))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Error" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /error\n", "this cell should print an error in the reponse" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import sys" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /stderr\n", "print('I am text on stdout')\n", "print('I am text on stderr', file=sys.stderr)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Misc" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'path' : {\n", " 'time': 1\n", " }\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /sleep/:time\n", "req = json.loads(REQUEST)\n", "from time import sleep\n", "sleep_time = int(req['path']['time'])\n", "sleep(sleep_time)\n", "print(\"Slept for {} seconds\".format(sleep_time))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /execute_result\n", "1+1" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "REQUEST = json.dumps({\n", " 'headers' : {\n", " 'Content-Type': 'application/json'\n", " }\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /content-type\n", "req = json.loads(REQUEST)\n", "print(req['headers']['Content-Type'])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# GET /multi\n", "x = 1" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /multi\n", "print('x is {}'.format(x))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /env_kernel_gateway\n", "print('KERNEL_GATEWAY is {}'.format(os.getenv('KERNEL_GATEWAY')))" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.5.1" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: enterprise_gateway/tests/resources/kernels/kernel_defaults_test/kernel.json ================================================ { "display_name": "Kernel Defaults Testing", "language": "python", "env": { "KERNEL_VAR1": "kernel_var1_default", "KERNEL_VAR2": "kernel_var2_default", "OTHER_VAR1": "other_var1_default", "OTHER_VAR2": "other_var2_default", "PROCESS_VAR1": "process_var1_default", "PROCESS_VAR2": "process_var2_default" }, "argv": ["python", "-m", "ipykernel_launcher", "-f", "{connection_file}"] } ================================================ FILE: enterprise_gateway/tests/resources/public/index.html ================================================ Hello world!

Hello world!

================================================ FILE: enterprise_gateway/tests/resources/responses_2.ipynb ================================================ { "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import json" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /json\n", "print '''{ \"hello\" : \"world\"}'''" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# ResponseInfo GET /json\n", "print json.dumps({\n", " 'headers' : {\n", " 'Content-Type' : 'application/json'\n", " }\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /nocontent\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# ResponseInfo GET /nocontent\n", "print json.dumps({\n", " 'status' : 204\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /etag\n", "print '''{ \"hello\" : \"world\"}'''" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# ResponseInfo GET /etag\n", "print json.dumps({\n", " 'headers' : {\n", " 'Content-Type' : 'application/json',\n", " 'Etag' : '1234567890'\n", " }\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.10" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: enterprise_gateway/tests/resources/responses_3.ipynb ================================================ { "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import json" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /json\n", "print('''{ \"hello\" : \"world\"}''')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# ResponseInfo GET /json\n", "print(json.dumps({\n", " 'headers' : {\n", " 'Content-Type' : 'application/json'\n", " }\n", " })\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /nocontent\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# ResponseInfo GET /nocontent\n", "print(json.dumps({\n", " 'status' : 204\n", " })\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /etag\n", "print('''{ \"hello\" : \"world\"}''')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# ResponseInfo GET /etag\n", "print(json.dumps({\n", " 'headers' : {\n", " 'Content-Type' : 'application/json',\n", " 'Etag' : '1234567890'\n", " }\n", " })\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.4.3" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: enterprise_gateway/tests/resources/simple_api2.ipynb ================================================ { "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import json" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "name = 'Test Name'" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /name\n", "print name " ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# POST /name\n", "req = json.loads(REQUEST)\n", "name = req['body']\n", "print(name)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3.0 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "2.7.10" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: enterprise_gateway/tests/resources/simple_api3.ipynb ================================================ { "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import json" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "name = 'Test Name'" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /name\n", "print(name)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# POST /name\n", "req = json.loads(REQUEST)\n", "name = req['body']\n", "print(name)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.4.3" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: enterprise_gateway/tests/resources/unknown_kernel.ipynb ================================================ { "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# GET /fake", "print 'I am not a real lang!'" ] } ], "metadata": { "kernelspec": { "display_name": "Fake Language 2000", "language": "fakelang", "name": "fakelang2000" }, "language_info": { "codemirror_mode": { "name": "fakelang", "version": 2000 }, "file_extension": ".fl", "mimetype": "text/x-fake-lang", "name": "fakelang", "nbconvert_exporter": "fakelang", "pygments_lexer": "fakelang", "version": "2000" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: enterprise_gateway/tests/resources/zen2.ipynb ================================================ { "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "import this" ] } ], "metadata": { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "2.7.10" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: enterprise_gateway/tests/resources/zen3.ipynb ================================================ { "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "import this" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.4.3" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: enterprise_gateway/tests/test_enterprise_gateway.py ================================================ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. """Tests for jupyter-enterprise-gateway.""" import os import time import uuid from tempfile import TemporaryDirectory from tornado.escape import json_decode, url_escape from tornado.testing import gen_test from .test_handlers import TestHandlers pjoin = os.path.join class TestEnterpriseGateway(TestHandlers): def setUp(self): super().setUp() # Enable debug logging if necessary # app = self.get_app() # app.settings['kernel_manager'].log.level = logging.DEBUG @gen_test def test_max_kernels_per_user(self): """ Number of kernels should be limited per user. """ self.get_app() self.app.max_kernels_per_user = 1 # Request a kernel for bob bob_response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body='{"env": {"KERNEL_USERNAME": "bob"} }' ) self.assertEqual(bob_response.code, 201) # Request a kernel for alice alice_response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body='{"env": {"KERNEL_USERNAME": "alice"} }', ) self.assertEqual(alice_response.code, 201) # Request another for alice - 403 expected failed_response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body='{"env": {"KERNEL_USERNAME": "alice"} }', raise_error=False, ) self.assertEqual(failed_response.code, 403) # Shut down the kernel for alice kernel = json_decode(alice_response.body) response = yield self.http_client.fetch( self.get_url("/api/kernels/" + url_escape(kernel["id"])), method="DELETE" ) self.assertEqual(response.code, 204) # Try again for alice - expect success alice_response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body='{"env": {"KERNEL_USERNAME": "alice"} }', ) self.assertEqual(alice_response.code, 201) @gen_test def test_authorization(self): """ Verify authorized users can start a kernel, unauthorized users cannot """ self.get_app() self.app.authorized_users = {"bob", "alice", "bad_guy"} self.app.unauthorized_users = {"bad_guy"} # Request a kernel for alice alice_response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body='{"env": {"KERNEL_USERNAME": "alice"} }', ) self.assertEqual(alice_response.code, 201) # Request a kernel for bad_guy - 403 expected failed_response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body='{"env": {"KERNEL_USERNAME": "bad_guy"} }', raise_error=False, ) self.assertEqual(failed_response.code, 403) @gen_test def test_port_range(self): """ Verify port-range behaviors are correct """ app = self.get_app() self.app.port_range = "10000..10999" # range too small # Request a kernel for alice - 500 expected alice_response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body='{"env": {"KERNEL_USERNAME": "alice"} }', raise_error=False, ) self.assertEqual(alice_response.code, 500) self.app.port_range = "100..11099" # invalid lower port # Request a kernel for alice - 500 expected alice_response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body='{"env": {"KERNEL_USERNAME": "alice"} }', raise_error=False, ) self.assertEqual(alice_response.code, 500) self.app.port_range = "10000..65537" # invalid upper port # Request a kernel for alice - 500 expected alice_response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body='{"env": {"KERNEL_USERNAME": "alice"} }', raise_error=False, ) self.assertEqual(alice_response.code, 500) self.app.port_range = "30000..31000" # valid range # Request a kernel for alice - 201 expected alice_response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body='{"env": {"KERNEL_USERNAME": "alice"} }', ) self.assertEqual(alice_response.code, 201) # validate ports are in range body = json_decode(alice_response.body) kernel_id = body["id"] port_list = app.settings["kernel_manager"]._kernels.get(kernel_id).ports for port in port_list: self.assertTrue(30000 <= port <= 31000) @gen_test def test_dynamic_updates(self): app = self.app # Get the actual EnterpriseGatewayApp instance s1 = time.time() name = app.config_file_name + ".py" with TemporaryDirectory("_1") as td1: os.environ["JUPYTER_CONFIG_DIR"] = td1 config_file = pjoin(td1, name) with open(config_file, "w") as f: f.writelines( [ "c.EnterpriseGatewayApp.impersonation_enabled = False\n", "c.AsyncMappingKernelManager.cull_connected = False\n", ] ) # app.jupyter_path.append(td1) app.load_config_file() app.add_dynamic_configurable("EnterpriseGatewayApp", app) app.add_dynamic_configurable("RemoteMappingKernelManager", app.kernel_manager) with self.assertRaises(RuntimeError): app.add_dynamic_configurable("Bogus", app.log) self.assertEqual(app.impersonation_enabled, False) self.assertEqual(app.kernel_manager.cull_connected, False) # Ensure file update doesn't happen during same second as initial value. # This is necessary on test systems that don't have finer-grained # timestamps (of less than a second). s2 = time.time() if s2 - s1 < 1.0: time.sleep(1.0 - (s2 - s1)) # update config file with open(config_file, "w") as f: f.writelines( [ "c.EnterpriseGatewayApp.impersonation_enabled = True\n", "c.AsyncMappingKernelManager.cull_connected = True\n", ] ) # trigger reload and verify updates app.update_dynamic_configurables() self.assertEqual(app.impersonation_enabled, True) self.assertEqual(app.kernel_manager.cull_connected, True) # repeat to ensure no unexpected changes occurred app.update_dynamic_configurables() self.assertEqual(app.impersonation_enabled, True) self.assertEqual(app.kernel_manager.cull_connected, True) @gen_test def test_kernel_id_env_var(self): """ Verify kernel is created with the given kernel id """ expected_kernel_id = str(uuid.uuid4()) kernel_response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body='{"env": {"KERNEL_ID": "%s"}}' % expected_kernel_id, raise_error=False, ) self.assertEqual(kernel_response.code, 201) kernel = json_decode(kernel_response.body) self.assertEqual(expected_kernel_id, kernel["id"]) ================================================ FILE: enterprise_gateway/tests/test_gatewayapp.py ================================================ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. """Tests for basic gateway app behavior.""" import logging import os import unittest from tornado.testing import AsyncHTTPTestCase, ExpectLog from enterprise_gateway.enterprisegatewayapp import EnterpriseGatewayApp from enterprise_gateway.mixins import EnterpriseGatewayConfigMixin RESOURCES = os.path.join(os.path.dirname(__file__), "resources") class TestGatewayAppConfig(unittest.TestCase): """Tests configuration of the gateway app.""" def setUp(self): """Saves a copy of the environment.""" self.environ = dict(os.environ) def tearDown(self): """Resets the environment.""" os.environ.clear() os.environ.update(self.environ) def _assert_envs_to_traitlets(self, env_prefix: str): app = EnterpriseGatewayApp() app.init_configurables() self.assertEqual(app.port, 1234) self.assertEqual(app.port_retries, 4321) self.assertEqual(app.ip, "1.1.1.1") self.assertEqual(app.auth_token, "fake-token") self.assertEqual(app.allow_credentials, "true") self.assertEqual(app.allow_headers, "Authorization") self.assertEqual(app.allow_methods, "GET") self.assertEqual(app.allow_origin, "*") self.assertEqual(app.expose_headers, "X-Fake-Header") self.assertEqual(app.max_age, "5") self.assertEqual(app.base_url, "/fake/path") self.assertEqual(app.max_kernels, 1) self.assertEqual(app.default_kernel_name, "fake_kernel") self.assertEqual(app.keyfile, "/test/fake.key") self.assertEqual(app.certfile, "/test/fake.crt") self.assertEqual(app.client_ca, "/test/fake_ca.crt") self.assertEqual(app.ssl_version, 3) if env_prefix == "EG_": # These options did not exist in JKG self.assertEqual(app.kernel_session_manager.enable_persistence, True) self.assertEqual( app.availability_mode, EnterpriseGatewayConfigMixin.AVAILABILITY_REPLICATION ) def test_config_env_vars_bc(self): """B/C env vars should be honored for traitlets.""" # Environment vars are always strings os.environ["KG_PORT"] = "1234" os.environ["KG_PORT_RETRIES"] = "4321" os.environ["KG_IP"] = "1.1.1.1" os.environ["KG_AUTH_TOKEN"] = "fake-token" os.environ["KG_ALLOW_CREDENTIALS"] = "true" os.environ["KG_ALLOW_HEADERS"] = "Authorization" os.environ["KG_ALLOW_METHODS"] = "GET" os.environ["KG_ALLOW_ORIGIN"] = "*" os.environ["KG_EXPOSE_HEADERS"] = "X-Fake-Header" os.environ["KG_MAX_AGE"] = "5" os.environ["KG_BASE_URL"] = "/fake/path" os.environ["KG_MAX_KERNELS"] = "1" os.environ["KG_DEFAULT_KERNEL_NAME"] = "fake_kernel" os.environ["KG_KEYFILE"] = "/test/fake.key" os.environ["KG_CERTFILE"] = "/test/fake.crt" os.environ["KG_CLIENT_CA"] = "/test/fake_ca.crt" os.environ["KG_SSL_VERSION"] = "3" self._assert_envs_to_traitlets("KG_") def test_config_env_vars(self): """Env vars should be honored for traitlets.""" # Environment vars are always strings os.environ["EG_PORT"] = "1234" os.environ["EG_PORT_RETRIES"] = "4321" os.environ["EG_IP"] = "1.1.1.1" os.environ["EG_AUTH_TOKEN"] = "fake-token" os.environ["EG_ALLOW_CREDENTIALS"] = "true" os.environ["EG_ALLOW_HEADERS"] = "Authorization" os.environ["EG_ALLOW_METHODS"] = "GET" os.environ["EG_ALLOW_ORIGIN"] = "*" os.environ["EG_EXPOSE_HEADERS"] = "X-Fake-Header" os.environ["EG_MAX_AGE"] = "5" os.environ["EG_BASE_URL"] = "/fake/path" os.environ["EG_MAX_KERNELS"] = "1" os.environ["EG_DEFAULT_KERNEL_NAME"] = "fake_kernel" os.environ["EG_KEYFILE"] = "/test/fake.key" os.environ["EG_CERTFILE"] = "/test/fake.crt" os.environ["EG_CLIENT_CA"] = "/test/fake_ca.crt" os.environ["EG_SSL_VERSION"] = "3" os.environ["EG_KERNEL_SESSION_PERSISTENCE"] = ( "True" # availability mode will be defaulted to replication ) self._assert_envs_to_traitlets("EG_") def test_ssl_options_no_config(self): app = EnterpriseGatewayApp() ssl_options = app._build_ssl_options() self.assertIsNone(ssl_options) def test_authorizer_class_default(self): """Test that authorizer_class defaults to None when not configured.""" app = EnterpriseGatewayApp() app.init_configurables() app.init_webapp() # By default, should use AllowAllAuthorizer from jupyter_server.auth.authorizer import AllowAllAuthorizer authorizer = app.web_app.settings.get("authorizer") self.assertIsNotNone(authorizer) self.assertIsInstance(authorizer, AllowAllAuthorizer) def test_authorizer_class_env_var(self): """Test that authorizer_class can be configured via environment variable.""" # Create a custom authorizer for testing from jupyter_server.auth.authorizer import Authorizer class CustomTestAuthorizer(Authorizer): """Test authorizer for validation""" def is_authorized(self, handler, user, action, resource): return True # Set the environment variable to point to our custom authorizer # We need to make it importable first import sys from types import ModuleType # Create a test module test_module = ModuleType("test_auth_module") test_module.CustomTestAuthorizer = CustomTestAuthorizer sys.modules["test_auth_module"] = test_module try: os.environ["EG_AUTHORIZER_CLASS"] = "test_auth_module.CustomTestAuthorizer" app = EnterpriseGatewayApp() app.init_configurables() app.init_webapp() # Should use our custom authorizer authorizer = app.web_app.settings.get("authorizer") self.assertIsNotNone(authorizer) self.assertIsInstance(authorizer, CustomTestAuthorizer) finally: # Clean up if "test_auth_module" in sys.modules: del sys.modules["test_auth_module"] class TestGatewayAppBase(AsyncHTTPTestCase, ExpectLog): """Base class for integration style tests using HTTP/Websockets against an instance of the gateway app. Attributes ---------- app : KernelGatewayApp Instance of the app """ def tearDown(self): """Shuts down the app after test run.""" if self.app: self.app.shutdown() super().tearDown() def get_app(self): """Returns a tornado.web.Application for the Tornado test runner.""" if hasattr(self, "_app"): return self._app self.app = EnterpriseGatewayApp(log_level=logging.CRITICAL) self.setup_app() self.app.init_configurables() self.setup_configurables() self.app.init_webapp() return self.app.web_app def setup_app(self): """Override to configure KernelGatewayApp instance before initializing configurables and the web app. """ pass def setup_configurables(self): """Override to configure further settings, such as the personality.""" pass ================================================ FILE: enterprise_gateway/tests/test_handlers.py ================================================ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. """Tests for jupyter-websocket mode.""" import json import os from tornado.escape import json_decode, json_encode, url_escape from tornado.gen import Return, coroutine from tornado.httpclient import HTTPRequest from tornado.testing import gen_test from tornado.websocket import websocket_connect from .test_gatewayapp import RESOURCES, TestGatewayAppBase class TestHandlers(TestGatewayAppBase): """ Base class for jupyter-websocket mode tests that spawn kernels. """ def setup_app(self): """Configure JUPYTER_PATH so that we can use local kernelspec files for testing.""" os.environ["JUPYTER_PATH"] = RESOURCES # These are required for setup of test_kernel_defaults # Note: We still reference the DEPRECATED config parameter and environment variable so that # we can test client_envs and inherited_envs, respectively. self.app.env_whitelist = ["TEST_VAR", "OTHER_VAR1", "OTHER_VAR2"] os.environ["EG_ENV_PROCESS_WHITELIST"] = "PROCESS_VAR1,PROCESS_VAR2" os.environ["PROCESS_VAR1"] = "process_var1_override" def tearDown(self): """Shuts down the app after test run.""" # Clean out items added to env if "JUPYTER_PATH" in os.environ: os.environ.pop("JUPYTER_PATH") if "EG_ENV_PROCESS_WHITELIST" in os.environ: os.environ.pop("EG_ENV_PROCESS_WHITELIST") if "PROCESS_VAR1" in os.environ: os.environ.pop("PROCESS_VAR1") super().tearDown() @coroutine def spawn_kernel(self, kernel_body="{}"): """Spawns a kernel using the gateway API and connects a websocket client to it. Parameters ---------- kernel_body : str POST /api/kernels body Returns ------- Future Promise of a WebSocketClientConnection """ # Request a kernel response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body=kernel_body ) self.assertEqual(response.code, 201) # Connect to the kernel via websocket kernel = json_decode(response.body) ws_url = "ws://localhost:{}/api/kernels/{}/channels".format( self.get_http_port(), url_escape(kernel["id"]) ) ws = yield websocket_connect(ws_url) raise Return(ws) def execute_request(self, code): """Creates an execute_request message. Parameters ---------- code : str Code to execute Returns ------- dict The message """ return { "header": { "username": "", "version": "5.0", "session": "", "msg_id": "fake-msg-id", "msg_type": "execute_request", }, "parent_header": {}, "channel": "shell", "content": { "code": code, "silent": False, "store_history": False, "user_expressions": {}, }, "metadata": {}, "buffers": {}, } @coroutine def await_stream(self, ws): """Returns stream output associated with an execute_request.""" while 1: msg = yield ws.read_message() msg = json_decode(msg) msg_type = msg["msg_type"] parent_msg_id = msg["parent_header"]["msg_id"] if msg_type == "stream" and parent_msg_id == "fake-msg-id": raise Return(msg["content"]) class TestDefaults(TestHandlers): """Tests gateway behavior.""" @gen_test def test_startup(self): """Root of kernels resource should be OK.""" self.app.web_app.settings["eg_list_kernels"] = True response = yield self.http_client.fetch(self.get_url("/api/kernels")) self.assertEqual(response.code, 200) @gen_test def test_headless(self): """Other notebook resources should not exist.""" response = yield self.http_client.fetch(self.get_url("/api/contents"), raise_error=False) self.assertEqual(response.code, 404) response = yield self.http_client.fetch(self.get_url("/"), raise_error=False) self.assertEqual(response.code, 404) response = yield self.http_client.fetch(self.get_url("/tree"), raise_error=False) self.assertEqual(response.code, 404) @gen_test def test_check_origin(self): """Allow origin setting should pass through to base handlers.""" response = yield self.http_client.fetch( self.get_url("/api/kernelspecs"), method="GET", headers={"Origin": "fake.com:8888"}, raise_error=False, ) self.assertEqual(response.code, 404) app = self.get_app() app.settings["allow_origin"] = "*" response = yield self.http_client.fetch( self.get_url("/api/kernelspecs"), method="GET", headers={"Origin": "fake.com:8888"}, raise_error=False, ) self.assertEqual(response.code, 200) @gen_test def test_auth_token(self): """All server endpoints should check the configured auth token.""" # Set token requirement app = self.get_app() app.settings["eg_auth_token"] = "fake-token" # Requst API without the token response = yield self.http_client.fetch( self.get_url("/api"), method="GET", raise_error=False ) self.assertEqual(response.code, 401) # Now with it response = yield self.http_client.fetch( self.get_url("/api"), method="GET", headers={"Authorization": "token fake-token"}, raise_error=False, ) self.assertEqual(response.code, 200) # Request kernelspecs without the token response = yield self.http_client.fetch( self.get_url("/api/kernelspecs"), method="GET", raise_error=False ) self.assertEqual(response.code, 401) # Now with it response = yield self.http_client.fetch( self.get_url("/api/kernelspecs"), method="GET", headers={"Authorization": "token fake-token"}, raise_error=False, ) self.assertEqual(response.code, 200) # Request a kernel without the token response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body="{}", raise_error=False ) self.assertEqual(response.code, 401) # Request with the token now response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body="{}", headers={"Authorization": "token fake-token"}, raise_error=False, ) self.assertEqual(response.code, 201) kernel = json_decode(response.body) # Request kernel info without the token response = yield self.http_client.fetch( self.get_url("/api/kernels/" + url_escape(kernel["id"])), method="GET", raise_error=False, ) self.assertEqual(response.code, 401) # Now with it response = yield self.http_client.fetch( self.get_url("/api/kernels/" + url_escape(kernel["id"])), method="GET", headers={"Authorization": "token fake-token"}, raise_error=False, ) self.assertEqual(response.code, 200) # Request websocket connection without the token ws_url = "ws://localhost:{}/api/kernels/{}/channels".format( self.get_http_port(), url_escape(kernel["id"]) ) # No option to ignore errors so try/except try: ws = yield websocket_connect(ws_url) except Exception as ex: self.assertEqual(ex.code, 401) else: self.assertTrue(False, "no exception raised") # Now request the websocket with the token ws_req = HTTPRequest(ws_url, headers={"Authorization": "token fake-token"}) ws = yield websocket_connect(ws_req) ws.close() @gen_test def test_cors_headers(self): """All kernel endpoints should respond with configured CORS headers.""" app = self.get_app() app.settings["eg_allow_credentials"] = "false" app.settings["eg_allow_headers"] = "Authorization,Content-Type" app.settings["eg_allow_methods"] = "GET,POST" app.settings["eg_allow_origin"] = "https://jupyter.org" app.settings["eg_expose_headers"] = "X-My-Fake-Header" app.settings["eg_max_age"] = "600" app.settings["eg_list_kernels"] = True # Get kernels to check headers response = yield self.http_client.fetch(self.get_url("/api/kernels"), method="GET") self.assertEqual(response.code, 200) self.assertEqual(response.headers["Access-Control-Allow-Credentials"], "false") self.assertEqual( response.headers["Access-Control-Allow-Headers"], "Authorization,Content-Type" ) self.assertEqual(response.headers["Access-Control-Allow-Methods"], "GET,POST") self.assertEqual(response.headers["Access-Control-Allow-Origin"], "https://jupyter.org") self.assertEqual(response.headers["Access-Control-Expose-Headers"], "X-My-Fake-Header") self.assertEqual(response.headers["Access-Control-Max-Age"], "600") self.assertEqual(response.headers.get("Content-Security-Policy"), None) @gen_test def test_max_kernels(self): """Number of kernels should be limited.""" app = self.get_app() app.settings["eg_max_kernels"] = 1 # Request a kernel response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body="{}" ) self.assertEqual(response.code, 201) # Request another response2 = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body="{}", raise_error=False ) self.assertEqual(response2.code, 403) # Shut down the kernel kernel = json_decode(response.body) response = yield self.http_client.fetch( self.get_url("/api/kernels/" + url_escape(kernel["id"])), method="DELETE" ) self.assertEqual(response.code, 204) # Try again response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body="{}" ) self.assertEqual(response.code, 201) @gen_test def test_get_api(self): """Server should respond with the API version metadata.""" response = yield self.http_client.fetch(self.get_url("/api")) self.assertEqual(response.code, 200) info = json_decode(response.body) self.assertIn("version", info) self.assertIn("gateway_version", info) @gen_test def test_get_kernelspecs(self): """Server should respond with kernel spec metadata.""" response = yield self.http_client.fetch(self.get_url("/api/kernelspecs")) self.assertEqual(response.code, 200) specs = json_decode(response.body) self.assertIn("kernelspecs", specs) self.assertIn("default", specs) @gen_test def test_get_kernels(self): """Server should respond with running kernel information.""" self.app.web_app.settings["eg_list_kernels"] = True response = yield self.http_client.fetch(self.get_url("/api/kernels")) self.assertEqual(response.code, 200) kernels = json_decode(response.body) self.assertEqual(len(kernels), 0) # Launch a kernel response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body="{}" ) self.assertEqual(response.code, 201) kernel = json_decode(response.body) # Check the list again response = yield self.http_client.fetch(self.get_url("/api/kernels")) self.assertEqual(response.code, 200) kernels = json_decode(response.body) self.assertEqual(len(kernels), 1) self.assertEqual(kernels[0]["id"], kernel["id"]) @gen_test def test_kernel_comm(self): """Default kernel should launch and accept commands.""" ws = yield self.spawn_kernel() # Send a request for kernel info ws.write_message( json_encode( { "header": { "username": "", "version": "5.0", "session": "", "msg_id": "fake-msg-id", "msg_type": "kernel_info_request", }, "parent_header": {}, "channel": "shell", "content": {}, "metadata": {}, "buffers": {}, } ) ) # Assert the reply comes back. Test will timeout if this hangs. # Note that this range may be side-effected by upstream changes, # so we will add a print (and increase its length to 8). for _ in range(8): msg = yield ws.read_message() msg = json_decode(msg) if msg["msg_type"] == "kernel_info_reply": break else: self.assertTrue(False, "never received kernel_info_reply") ws.close() @gen_test def test_no_discovery(self): """The list of kernels / sessions should be forbidden by default.""" response = yield self.http_client.fetch(self.get_url("/api/kernels"), raise_error=False) self.assertEqual(response.code, 403) response = yield self.http_client.fetch(self.get_url("/api/sessions"), raise_error=False) self.assertEqual(response.code, 403) @gen_test def test_crud_sessions(self): """Server should create, list, and delete sessions.""" app = self.get_app() app.settings["eg_list_kernels"] = True # Ensure no sessions by default response = yield self.http_client.fetch(self.get_url("/api/sessions")) self.assertEqual(response.code, 200) sessions = json_decode(response.body) self.assertEqual(len(sessions), 0) # Launch a session response = yield self.http_client.fetch( self.get_url("/api/sessions"), method="POST", body='{"id":"any","notebook":{"path":"anywhere"},"kernel":{"name":"python"}}', ) self.assertEqual(response.code, 201) session = json_decode(response.body) # Check the list again response = yield self.http_client.fetch(self.get_url("/api/sessions")) self.assertEqual(response.code, 200) sessions = json_decode(response.body) self.assertEqual(len(sessions), 1) self.assertEqual(sessions[0]["id"], session["id"]) # Delete the session response = yield self.http_client.fetch( self.get_url("/api/sessions/" + session["id"]), method="DELETE" ) self.assertEqual(response.code, 204) # Make sure the list is empty response = yield self.http_client.fetch(self.get_url("/api/sessions")) self.assertEqual(response.code, 200) sessions = json_decode(response.body) self.assertEqual(len(sessions), 0) @gen_test def test_json_errors(self): """Handlers should always return JSON errors.""" # A handler that we override response = yield self.http_client.fetch(self.get_url("/api/kernels"), raise_error=False) body = json_decode(response.body) self.assertEqual(response.code, 403) self.assertEqual(body["reason"], "Forbidden") # A handler from the notebook base response = yield self.http_client.fetch( self.get_url("/api/kernels/1-2-3-4-5"), raise_error=False ) body = json_decode(response.body) self.assertEqual(response.code, 404) # Base handler json_errors decorator does not capture reason properly # self.assertEqual(body['reason'], 'Not Found') self.assertIn("1-2-3-4-5", body["message"]) # The last resort not found handler response = yield self.http_client.fetch(self.get_url("/fake-endpoint"), raise_error=False) body = json_decode(response.body) self.assertEqual(response.code, 404) self.assertEqual(body["reason"], "Not Found") @gen_test def test_kernel_env(self): """Kernel should start with environment vars defined in the request.""" # Note: Only envs in request prefixed with KERNEL_ or in env_whitelist (TEST_VAR) # with the exception of KERNEL_GATEWAY - which is "system owned". kernel_body = json.dumps( { "name": "python", "env": { "KERNEL_FOO": "kernel-foo-value", "NOT_KERNEL": "ignored", "KERNEL_GATEWAY": "overridden", "TEST_VAR": "allowed", }, } ) ws = yield self.spawn_kernel(kernel_body) req = self.execute_request( "import os; " 'print(os.getenv("KERNEL_FOO"), ' 'os.getenv("NOT_KERNEL"), ' 'os.getenv("KERNEL_GATEWAY"), ' 'os.getenv("TEST_VAR"))' ) ws.write_message(json_encode(req)) content = yield self.await_stream(ws) self.assertEqual(content["name"], "stdout") self.assertIn("kernel-foo-value", content["text"]) self.assertNotIn("ignored", content["text"]) self.assertNotIn("overridden", content["text"]) self.assertIn("allowed", content["text"]) ws.close() @gen_test def test_kernel_defaults(self): """Kernel should start with env vars defined in request overriding env vars defined in kernelspec.""" # Note: Only envs in request prefixed with KERNEL_ or in env_whitelist (OTHER_VAR1, OTHER_VAR2) # with the exception of KERNEL_GATEWAY - which is "system owned" - will be set in kernel env. # Since OTHER_VAR1 is not in the request, its existing value in kernel.json will be used. # NOTE: This test requires use of the kernels/kernel_defaults_test/kernel.json file. kernel_body = json.dumps( { "name": "kernel_defaults_test", "env": { "KERNEL_VAR1": "kernel_var1_override", # Ensure this value overrides that in kernel.json "KERNEL_VAR3": "kernel_var3_value", # Any KERNEL_ flows to kernel "OTHER_VAR2": "other_var2_override", # Ensure this value overrides that in kernel.json "KERNEL_GATEWAY": "kernel_gateway_override", # Ensure KERNEL_GATEWAY is not overridden }, } ) ws = yield self.spawn_kernel(kernel_body) req = self.execute_request( 'import os; print(os.getenv("KERNEL_VAR1"), os.getenv("KERNEL_VAR2"), ' 'os.getenv("KERNEL_VAR3"), os.getenv("KERNEL_GATEWAY"), os.getenv("OTHER_VAR1"), ' 'os.getenv("OTHER_VAR2"), os.getenv("PROCESS_VAR1"), os.getenv("PROCESS_VAR2"))' ) ws.write_message(json_encode(req)) content = yield self.await_stream(ws) self.assertEqual(content["name"], "stdout") self.assertIn("kernel_var1_override", content["text"]) self.assertIn("kernel_var2_default", content["text"]) self.assertIn("kernel_var3_value", content["text"]) self.assertNotIn("kernel_gateway_override", content["text"]) self.assertIn("other_var1_default", content["text"]) self.assertIn("other_var2_override", content["text"]) self.assertIn("process_var1_override", content["text"]) self.assertIn("process_var2_default", content["text"]) ws.close() @gen_test def test_get_swagger_yaml_spec(self): """Getting the swagger.yaml spec should be ok""" response = yield self.http_client.fetch(self.get_url("/api/swagger.yaml")) self.assertEqual(response.code, 200) @gen_test def test_get_swagger_json_spec(self): """Getting the swagger.json spec should be ok""" response = yield self.http_client.fetch(self.get_url("/api/swagger.json")) self.assertEqual(response.code, 200) @gen_test def test_kernel_env_auth_token(self): """Kernel should not have EG_AUTH_TOKEN in its environment.""" os.environ["EG_AUTH_TOKEN"] = "fake-secret" ws = None try: ws = yield self.spawn_kernel() req = self.execute_request('import os; print(os.getenv("EG_AUTH_TOKEN"))') ws.write_message(json_encode(req)) content = yield self.await_stream(ws) self.assertNotIn("fake-secret", content["text"]) finally: del os.environ["EG_AUTH_TOKEN"] if ws: ws.close() class TestCustomDefaultKernel(TestHandlers): """Tests gateway behavior when setting a custom default kernelspec.""" def setup_app(self): self.app.default_kernel_name = "fake-kernel" @gen_test def test_default_kernel_name(self): """The default kernel name should be used on empty requests.""" # Request without an explicit kernel name response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="POST", body="", raise_error=False ) self.assertEqual(response.code, 500) self.assertTrue("raise NoSuchKernel" in str(response.body)) class TestEnableDiscovery(TestHandlers): """Tests gateway behavior with kernel listing enabled.""" def setup_configurables(self): """Enables kernel listing for all tests.""" self.app.list_kernels = True @gen_test def test_enable_kernel_list(self): """The list of kernels, sessions, and activities should be available.""" response = yield self.http_client.fetch( self.get_url("/api/kernels"), ) self.assertEqual(response.code, 200) self.assertTrue("[]" in str(response.body)) response = yield self.http_client.fetch( self.get_url("/api/sessions"), ) self.assertEqual(response.code, 200) self.assertTrue("[]" in str(response.body)) class TestBaseURL(TestHandlers): """Tests gateway behavior when a custom base URL is configured.""" def setup_app(self): """Sets the custom base URL and enables kernel listing.""" self.app.base_url = "/fake/path" def setup_configurables(self): """Enables kernel listing for all tests.""" self.app.list_kernels = True @gen_test def test_base_url(self): """Server should mount resources under configured base.""" # Should not exist at root response = yield self.http_client.fetch( self.get_url("/api/kernels"), method="GET", raise_error=False ) self.assertEqual(response.code, 404) # Should exist under path response = yield self.http_client.fetch( self.get_url("/fake/path/api/kernels"), method="GET" ) self.assertEqual(response.code, 200) class TestRelativeBaseURL(TestHandlers): """Tests gateway behavior when a relative base URL is configured.""" def setup_app(self): """Sets the custom base URL as a relative path.""" self.app.base_url = "fake/path" @gen_test def test_base_url(self): """Server should mount resources under fixed base.""" self.app.web_app.settings["eg_list_kernels"] = True # Should exist under path response = yield self.http_client.fetch( self.get_url("/fake/path/api/kernels"), method="GET" ) self.assertEqual(response.code, 200) class TestWildcardEnvs(TestHandlers): """Base class for jupyter-websocket mode tests that spawn kernels.""" def setup_app(self): """Configure JUPYTER_PATH so that we can use local kernelspec files for testing.""" super().setup_app() # overwrite env_whitelist self.app.env_whitelist = ["*"] @gen_test def test_kernel_wildcard_env(self): """Kernel should start with environment vars defined in the request.""" # Note: Since env_whitelist == '*', all values should be present. kernel_body = json.dumps( { "name": "python", "env": { "KERNEL_FOO": "kernel-foo-value", "OTHER_VAR1": "other-var1-value", "OTHER_VAR2": "other-var2-value", "TEST_VAR": "test-var-value", }, } ) ws = yield self.spawn_kernel(kernel_body) req = self.execute_request( "import os; " 'print(os.getenv("KERNEL_FOO"), ' 'os.getenv("OTHER_VAR1"), ' 'os.getenv("OTHER_VAR2"), ' 'os.getenv("TEST_VAR"))' ) ws.write_message(json_encode(req)) content = yield self.await_stream(ws) self.assertEqual(content["name"], "stdout") self.assertIn("kernel-foo-value", content["text"]) self.assertIn("other-var1-value", content["text"]) self.assertIn("other-var2-value", content["text"]) self.assertIn("test-var-value", content["text"]) ws.close() ================================================ FILE: enterprise_gateway/tests/test_kernelspec_cache.py ================================================ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. """Tests for KernelSpecCache.""" import asyncio import json import os import shutil import sys import jupyter_core.paths import pytest from jupyter_client.kernelspec import KernelSpecManager, NoSuchKernel from enterprise_gateway.services.kernelspecs import KernelSpecCache # BEGIN - Remove once transition to jupyter_server occurs def mkdir(tmp_path, *parts): path = tmp_path.joinpath(*parts) if not path.exists(): path.mkdir(parents=True) return path home_dir = pytest.fixture(lambda tmp_path: mkdir(tmp_path, "home")) data_dir = pytest.fixture(lambda tmp_path: mkdir(tmp_path, "data")) config_dir = pytest.fixture(lambda tmp_path: mkdir(tmp_path, "config")) runtime_dir = pytest.fixture(lambda tmp_path: mkdir(tmp_path, "runtime")) system_jupyter_path = pytest.fixture(lambda tmp_path: mkdir(tmp_path, "share", "jupyter")) env_jupyter_path = pytest.fixture(lambda tmp_path: mkdir(tmp_path, "env", "share", "jupyter")) system_config_path = pytest.fixture(lambda tmp_path: mkdir(tmp_path, "etc", "jupyter")) env_config_path = pytest.fixture(lambda tmp_path: mkdir(tmp_path, "env", "etc", "jupyter")) @pytest.fixture def environ( monkeypatch, tmp_path, home_dir, data_dir, config_dir, runtime_dir, system_jupyter_path, system_config_path, env_jupyter_path, env_config_path, ): monkeypatch.setenv("HOME", str(home_dir)) monkeypatch.setenv("PYTHONPATH", os.pathsep.join(sys.path)) monkeypatch.setenv("JUPYTER_NO_CONFIG", "1") monkeypatch.setenv("JUPYTER_CONFIG_DIR", str(config_dir)) monkeypatch.setenv("JUPYTER_DATA_DIR", str(data_dir)) monkeypatch.setenv("JUPYTER_RUNTIME_DIR", str(runtime_dir)) monkeypatch.setattr(jupyter_core.paths, "SYSTEM_JUPYTER_PATH", [str(system_jupyter_path)]) monkeypatch.setattr(jupyter_core.paths, "ENV_JUPYTER_PATH", [str(env_jupyter_path)]) monkeypatch.setattr(jupyter_core.paths, "SYSTEM_CONFIG_PATH", [str(system_config_path)]) monkeypatch.setattr(jupyter_core.paths, "ENV_CONFIG_PATH", [str(env_config_path)]) # END - Remove once transition to jupyter_server occurs kernelspec_json = { "argv": ["cat", "{connection_file}"], "display_name": "Test kernel: {kernel_name}", } def _install_kernelspec(kernels_dir, kernel_name): """install a sample kernel in a kernels directory""" kernelspec_dir = os.path.join(kernels_dir, kernel_name) os.makedirs(kernelspec_dir) json_file = os.path.join(kernelspec_dir, "kernel.json") named_json = kernelspec_json.copy() named_json["display_name"] = named_json["display_name"].format(kernel_name=kernel_name) with open(json_file, "w") as f: json.dump(named_json, f) return kernelspec_dir def _modify_kernelspec(kernelspec_dir, kernel_name): json_file = os.path.join(kernelspec_dir, "kernel.json") kernel_json = kernelspec_json.copy() kernel_json["display_name"] = f"{kernel_name} modified!" with open(json_file, "w") as f: json.dump(kernel_json, f) kernelspec_location = pytest.fixture(lambda data_dir: mkdir(data_dir, "kernels")) other_kernelspec_location = pytest.fixture( lambda env_jupyter_path: mkdir(env_jupyter_path, "kernels") ) @pytest.fixture def setup_kernelspecs(environ, kernelspec_location): # Only populate factory info _install_kernelspec(str(kernelspec_location), "test1") _install_kernelspec(str(kernelspec_location), "test2") _install_kernelspec(str(kernelspec_location), "test3") @pytest.fixture def kernel_spec_manager(environ, setup_kernelspecs): yield KernelSpecManager(ensure_native_kernel=False) @pytest.fixture def kernel_spec_cache(is_enabled, kernel_spec_manager): kspec_cache = KernelSpecCache.instance( kernel_spec_manager=kernel_spec_manager, cache_enabled=is_enabled ) yield kspec_cache kspec_cache = None KernelSpecCache.clear_instance() @pytest.fixture(params=[False, True]) # Add types as needed def is_enabled(request): return request.param async def tests_get_all_specs(kernel_spec_cache): kspecs = await kernel_spec_cache.get_all_specs() assert len(kspecs) == 3 async def tests_get_named_spec(kernel_spec_cache): kspec = await kernel_spec_cache.get_kernel_spec("test2") assert kspec.display_name == "Test kernel: test2" async def tests_get_modified_spec(kernel_spec_cache): kspec = await kernel_spec_cache.get_kernel_spec("test2") assert kspec.display_name == "Test kernel: test2" # Modify entry _modify_kernelspec(kspec.resource_dir, "test2") await asyncio.sleep(0.5) # sleep for a half-second to allow cache to update item kspec = await kernel_spec_cache.get_kernel_spec("test2") assert kspec.display_name == "test2 modified!" async def tests_add_spec(kernel_spec_cache, kernelspec_location, other_kernelspec_location): assert len(kernel_spec_cache.observed_dirs) == (1 if kernel_spec_cache.cache_enabled else 0) assert ( str(kernelspec_location) in kernel_spec_cache.observed_dirs if kernel_spec_cache.cache_enabled else True ) _install_kernelspec(str(other_kernelspec_location), "added") kspec = await kernel_spec_cache.get_kernel_spec("added") # Ensure new location has been added to observed_dirs assert len(kernel_spec_cache.observed_dirs) == (2 if kernel_spec_cache.cache_enabled else 0) assert ( str(other_kernelspec_location) in kernel_spec_cache.observed_dirs if kernel_spec_cache.cache_enabled else True ) assert kspec.display_name == "Test kernel: added" assert kernel_spec_cache.cache_misses == (1 if kernel_spec_cache.cache_enabled else 0) # Add another to an existing observed directory, no cache miss here _install_kernelspec(str(kernelspec_location), "added2") await asyncio.sleep( 0.5 ) # sleep for a half-second to allow cache to add item (no cache miss in this case) kspec = await kernel_spec_cache.get_kernel_spec("added2") assert kspec.display_name == "Test kernel: added2" assert kernel_spec_cache.cache_misses == (1 if kernel_spec_cache.cache_enabled else 0) async def tests_remove_spec(kernel_spec_cache): kspec = await kernel_spec_cache.get_kernel_spec("test2") assert kspec.display_name == "Test kernel: test2" assert kernel_spec_cache.cache_misses == 0 shutil.rmtree(kspec.resource_dir) await asyncio.sleep(0.5) # sleep for a half-second to allow cache to remove item with pytest.raises(NoSuchKernel): await kernel_spec_cache.get_kernel_spec("test2") assert kernel_spec_cache.cache_misses == (1 if kernel_spec_cache.cache_enabled else 0) async def tests_get_missing(kernel_spec_cache): with pytest.raises(NoSuchKernel): await kernel_spec_cache.get_kernel_spec("missing") assert kernel_spec_cache.cache_misses == (1 if kernel_spec_cache.cache_enabled else 0) ================================================ FILE: enterprise_gateway/tests/test_mixins.py ================================================ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. """Tests for handler mixins.""" import json import unittest try: from unittest.mock import Mock except ImportError: # Python 2.7: use backport from unittest.mock import Mock from tornado import web from enterprise_gateway.mixins import JSONErrorsMixin, TokenAuthorizationMixin class SuperTokenAuthHandler: """Super class for the handler using TokenAuthorizationMixin.""" is_prepared = False def prepare(self): # called by the mixin when authentication succeeds self.is_prepared = True class TestableTokenAuthHandler(TokenAuthorizationMixin, SuperTokenAuthHandler): """Implementation that uses the TokenAuthorizationMixin for testing.""" __test__ = False def __init__(self, token=""): self.settings = {"eg_auth_token": token} self.arguments = {} self.response = None self.status_code = None def send_error(self, status_code): self.status_code = status_code def get_argument(self, name, default=""): return self.arguments.get(name, default) class TestTokenAuthMixin(unittest.TestCase): """Unit tests the Token authorization mixin.""" def setUp(self): """Creates a handler that uses the mixin.""" self.mixin = TestableTokenAuthHandler("YouKnowMe") def test_no_token_required(self): """No token required - status should be None.""" self.mixin.settings["eg_auth_token"] = "" self.mixin.prepare() self.assertEqual(self.mixin.is_prepared, True) self.assertEqual(self.mixin.status_code, None) def test_missing_token(self): """Missing token - tatus should be 'unauthorized'.""" attrs = {"headers": {}} self.mixin.request = Mock(**attrs) self.mixin.prepare() self.assertEqual(self.mixin.is_prepared, False) self.assertEqual(self.mixin.status_code, 401) def test_valid_header_token(self): """Valid header token - status should be None.""" attrs = {"headers": {"Authorization": "token YouKnowMe"}} self.mixin.request = Mock(**attrs) self.mixin.prepare() self.assertEqual(self.mixin.is_prepared, True) self.assertEqual(self.mixin.status_code, None) def test_wrong_header_token(self): """Wrong header token - status should be 'unauthorized'.""" attrs = {"headers": {"Authorization": "token NeverHeardOf"}} self.mixin.request = Mock(**attrs) self.mixin.prepare() self.assertEqual(self.mixin.is_prepared, False) self.assertEqual(self.mixin.status_code, 401) def test_valid_url_token(self): """Valid url token - status should be None.""" self.mixin.arguments["token"] = "YouKnowMe" attrs = {"headers": {}} self.mixin.request = Mock(**attrs) self.mixin.prepare() self.assertEqual(self.mixin.is_prepared, True) self.assertEqual(self.mixin.status_code, None) def test_wrong_url_token(self): """Wrong url token - tatus should be 'unauthorized'.""" self.mixin.arguments["token"] = "NeverHeardOf" attrs = {"headers": {}} self.mixin.request = Mock(**attrs) self.mixin.prepare() self.assertEqual(self.mixin.is_prepared, False) self.assertEqual(self.mixin.status_code, 401) def test_differing_tokens_valid_url(self): """Differing tokens - status should be None, URL token takes precedence""" self.mixin.arguments["token"] = "YouKnowMe" attrs = {"headers": {"Authorization": "token NeverHeardOf"}} self.mixin.request = Mock(**attrs) self.mixin.prepare() self.assertEqual(self.mixin.is_prepared, True) self.assertEqual(self.mixin.status_code, None) def test_differing_tokens_wrong_url(self): """Differing token w/ wrong url - status should be 'unauthorized', URL token takes precedence""" attrs = {"headers": {"Authorization": "token YouKnowMe"}} self.mixin.request = Mock(**attrs) self.mixin.arguments["token"] = "NeverHeardOf" self.mixin.prepare() self.assertEqual(self.mixin.is_prepared, False) self.assertEqual(self.mixin.status_code, 401) class TestableJSONErrorsHandler(JSONErrorsMixin): """Implementation that uses the JSONErrorsMixin for testing.""" __test__ = False def __init__(self): self.headers = {} self.response = None self.status_code = None self.reason = None def finish(self, response): self.response = response def set_status(self, status_code, reason=None): self.status_code = status_code self.reason = reason def set_header(self, name, value): self.headers[name] = value class TestJSONErrorsMixin(unittest.TestCase): """Unit tests the JSON errors mixin.""" def setUp(self): """Creates a handler that uses the mixin.""" self.mixin = TestableJSONErrorsHandler() def test_status(self): """Status should be set on the response.""" self.mixin.write_error(404) response = json.loads(self.mixin.response) self.assertEqual(self.mixin.status_code, 404) self.assertEqual(response["reason"], "Not Found") self.assertEqual(response["message"], "") def test_custom_status(self): """Custom reason from exeception should be set in the response.""" exc = web.HTTPError(500, reason="fake-reason") self.mixin.write_error(500, exc_info=[None, exc]) response = json.loads(self.mixin.response) self.assertEqual(self.mixin.status_code, 500) self.assertEqual(response["reason"], "fake-reason") self.assertEqual(response["message"], "") def test_log_message(self): """Custom message from exeception should be set in the response.""" exc = web.HTTPError(410, log_message="fake-message") self.mixin.write_error(410, exc_info=[None, exc]) response = json.loads(self.mixin.response) self.assertEqual(self.mixin.status_code, 410) self.assertEqual(response["reason"], "Gone") self.assertEqual(response["message"], "fake-message") ================================================ FILE: enterprise_gateway/tests/test_process_proxy.py ================================================ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. """Tests for process proxy functionality.""" import os import unittest from unittest.mock import Mock, patch from tornado import web from enterprise_gateway.services.processproxies.container import _parse_prohibited_ids # Mock Kubernetes configuration before importing the module with patch('kubernetes.config.load_incluster_config'), patch('kubernetes.config.load_kube_config'): from enterprise_gateway.services.processproxies.k8s import KubernetesProcessProxy class TestParseProhibitedIds(unittest.TestCase): """Test parsing of prohibited UID/GID environment variables.""" def test_default_value(self): with patch.dict(os.environ, {}, clear=False): os.environ.pop("TEST_IDS", None) result = _parse_prohibited_ids("TEST_IDS", "0") self.assertEqual(result, [0]) def test_multiple_values(self): with patch.dict(os.environ, {"TEST_IDS": "0,1000"}): result = _parse_prohibited_ids("TEST_IDS", "0") self.assertEqual(result, [0, 1000]) def test_values_with_spaces(self): with patch.dict(os.environ, {"TEST_IDS": "0, 1000, 65534"}): result = _parse_prohibited_ids("TEST_IDS", "0") self.assertEqual(result, [0, 1000, 65534]) def test_invalid_entries_raise_value_error(self): with patch.dict(os.environ, {"TEST_IDS": "0,abc,1000"}): with self.assertRaises(ValueError) as ctx: _parse_prohibited_ids("TEST_IDS", "0") self.assertIn("abc", str(ctx.exception)) self.assertIn("TEST_IDS", str(ctx.exception)) def test_username_instead_of_uid_raises_value_error(self): with patch.dict(os.environ, {"TEST_IDS": "root"}): with self.assertRaises(ValueError) as ctx: _parse_prohibited_ids("TEST_IDS", "0") self.assertIn("root", str(ctx.exception)) def test_empty_entries_ignored(self): with patch.dict(os.environ, {"TEST_IDS": "0,,1000"}): result = _parse_prohibited_ids("TEST_IDS", "0") self.assertEqual(result, [0, 1000]) class TestContainerProxyProhibitedIds(unittest.TestCase): """Test UID/GID validation in ContainerProcessProxy.""" def setUp(self): self.mock_kernel_manager = Mock() self.mock_kernel_manager.get_kernel_username.return_value = "testuser" self.mock_kernel_manager.port_range = "0..0" self.proxy_config = {"kernel_id": "test-kernel-id", "kernel_name": "python3"} with patch( 'enterprise_gateway.services.processproxies.k8s.KernelSessionManager' ) as mock_session_manager, patch( 'enterprise_gateway.services.processproxies.processproxy.ResponseManager' ): mock_session_manager.get_kernel_username.return_value = "testuser" self.proxy = KubernetesProcessProxy(self.mock_kernel_manager, self.proxy_config) def _make_kwargs(self, uid=None, gid=None): env = {} if uid is not None: env["KERNEL_UID"] = uid if gid is not None: env["KERNEL_GID"] = gid return {"env": env} def test_valid_uid_gid_passes(self): kwargs = self._make_kwargs(uid="1000", gid="100") self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(kwargs["env"]["KERNEL_UID"], "1000") self.assertEqual(kwargs["env"]["KERNEL_GID"], "100") def test_defaults_used_when_not_provided(self): kwargs = self._make_kwargs() self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(kwargs["env"]["KERNEL_UID"], "1000") self.assertEqual(kwargs["env"]["KERNEL_GID"], "100") def test_prohibited_uid_exact_match(self): kwargs = self._make_kwargs(uid="0", gid="100") with self.assertRaises(web.HTTPError) as ctx: self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(ctx.exception.status_code, 403) def test_prohibited_gid_exact_match(self): kwargs = self._make_kwargs(uid="1000", gid="0") with self.assertRaises(web.HTTPError) as ctx: self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(ctx.exception.status_code, 403) def test_trailing_whitespace_uid_denied(self): kwargs = self._make_kwargs(uid="0 ", gid="100") with self.assertRaises(web.HTTPError) as ctx: self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(ctx.exception.status_code, 403) def test_leading_whitespace_uid_denied(self): kwargs = self._make_kwargs(uid=" 0", gid="100") with self.assertRaises(web.HTTPError) as ctx: self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(ctx.exception.status_code, 403) def test_leading_zeros_uid_denied(self): kwargs = self._make_kwargs(uid="00", gid="100") with self.assertRaises(web.HTTPError) as ctx: self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(ctx.exception.status_code, 403) def test_plus_sign_uid_denied(self): kwargs = self._make_kwargs(uid="+0", gid="100") with self.assertRaises(web.HTTPError) as ctx: self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(ctx.exception.status_code, 403) def test_non_numeric_uid_rejected(self): kwargs = self._make_kwargs(uid="abc", gid="100") with self.assertRaises(web.HTTPError) as ctx: self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(ctx.exception.status_code, 403) def test_empty_uid_rejected(self): kwargs = self._make_kwargs(uid="", gid="100") with self.assertRaises(web.HTTPError) as ctx: self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(ctx.exception.status_code, 403) def test_negative_uid_rejected(self): kwargs = self._make_kwargs(uid="-1", gid="100") with self.assertRaises(web.HTTPError) as ctx: self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(ctx.exception.status_code, 403) self.assertIn("must be in range", ctx.exception.reason) def test_negative_gid_rejected(self): kwargs = self._make_kwargs(uid="1000", gid="-1") with self.assertRaises(web.HTTPError) as ctx: self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(ctx.exception.status_code, 403) self.assertIn("must be in range", ctx.exception.reason) def test_uid_exceeding_uint32_max_rejected(self): kwargs = self._make_kwargs(uid="4294967296", gid="100") with self.assertRaises(web.HTTPError) as ctx: self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(ctx.exception.status_code, 403) self.assertIn("must be in range", ctx.exception.reason) def test_gid_exceeding_uint32_max_rejected(self): kwargs = self._make_kwargs(uid="1000", gid="4294967296") with self.assertRaises(web.HTTPError) as ctx: self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(ctx.exception.status_code, 403) self.assertIn("must be in range", ctx.exception.reason) def test_uid_at_uint32_max_allowed(self): kwargs = self._make_kwargs(uid="4294967295", gid="100") self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(kwargs["env"]["KERNEL_UID"], "4294967295") def test_normalized_values_stored(self): kwargs = self._make_kwargs(uid=" 1000 ", gid=" 100 ") self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(kwargs["env"]["KERNEL_UID"], "1000") self.assertEqual(kwargs["env"]["KERNEL_GID"], "100") def test_both_uid_and_gid_checked_independently(self): kwargs = self._make_kwargs(uid="1000", gid="0") with self.assertRaises(web.HTTPError) as ctx: self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(ctx.exception.status_code, 403) self.assertIn("GID", ctx.exception.reason) def test_trailing_whitespace_gid_denied(self): kwargs = self._make_kwargs(uid="1000", gid="0 ") with self.assertRaises(web.HTTPError) as ctx: self.proxy._enforce_prohibited_ids(**kwargs) self.assertEqual(ctx.exception.status_code, 403) class TestKubernetesProcessProxy(unittest.TestCase): """Test secure template substitution in Kubernetes process proxy.""" def setUp(self): """Set up test fixtures.""" self.mock_kernel_manager = Mock() self.mock_kernel_manager.get_kernel_username.return_value = "testuser" self.mock_kernel_manager.port_range = "0..0" # Mock port range # Mock proxy config self.proxy_config = {"kernel_id": "test-kernel-id", "kernel_name": "python3"} with patch( 'enterprise_gateway.services.processproxies.k8s.KernelSessionManager' ) as mock_session_manager, patch( 'enterprise_gateway.services.processproxies.processproxy.ResponseManager' ): mock_session_manager.get_kernel_username.return_value = "testuser" self.proxy = KubernetesProcessProxy(self.mock_kernel_manager, self.proxy_config) self.proxy.kernel_id = "test-kernel-id" def test_valid_template_substitution(self): """Test valid template variable substitution.""" test_cases = [ # Basic variable substitution ("{{ kernel_id }}", {"kernel_id": "test-123"}, "test-123"), # Multiple variables ( "{{ kernel_namespace }}-{{ kernel_id }}", {"kernel_namespace": "default", "kernel_id": "test-123"}, "default-test-123", ), # Variables with underscores ("{{ kernel_image_pull_policy }}", {"kernel_image_pull_policy": "Always"}, "Always"), # Whitespace handling ("{{ kernel_id }}", {"kernel_id": "test-123"}, "test-123"), ] for template, variables, expected in test_cases: with self.subTest(template=template): result = self.proxy._safe_template_substitute(template, variables) self.assertEqual(result, expected) def test_missing_variables_fallback(self): # Test the full pod name determination process kwargs = { "env": { "KERNEL_POD_NAME": "{{ missing_var }}", "KERNEL_NAMESPACE": "production", } } with patch.object(self.proxy, 'log'), patch( 'enterprise_gateway.services.processproxies.k8s.KernelSessionManager' ) as mock_session_manager: mock_session_manager.get_kernel_username.return_value = "testuser" result = self.proxy._determine_kernel_pod_name(**kwargs) # Should fall back to default naming: kernel_username + "-" + kernel_id self.assertEqual(result, "testuser-test-kernel-id") def test_malicious_template_injection_prevention(self): """Test prevention of malicious template injection attacks.""" malicious_templates = [ # Python code execution attempts "{{ ''.__class__.__mro__[1].__subclasses__()[104].__init__.__globals__['sys'].exit() }}", "{{ __import__('os').system('rm -rf /') }}", "{{ exec('print(\"pwned\")') }}", "{{ eval('1+1') }}", # Attribute access attempts "{{ kernel_id.__class__ }}", "{{ kernel_id.__dict__ }}", "{{ kernel_id.__globals__ }}", # Function calls "{{ range(10) }}", "{{ len(kernel_id) }}", "{{ str.upper(kernel_id) }}", # Jinja2 filters and expressions "{{ kernel_id|upper }}", "{{ kernel_id + '_suffix' }}", "{{ 1 + 1 }}", # Complex expressions "{{ kernel_id if kernel_id else 'default' }}", "{{ kernel_id[:5] }}", ] variables = {"kernel_id": "test-123"} for malicious_template in malicious_templates: with self.subTest(template=malicious_template), patch.object( self.proxy, 'log' ) as mock_log: result = self.proxy._safe_template_substitute(malicious_template, variables) # All malicious templates should be treated as invalid and return None self.assertIsNone(result) mock_log.warning.assert_called_once() # Should warn about unsupported expressions self.assertIn("Invalid template syntax", mock_log.warning.call_args[0][0]) def test_pod_name_determination_with_templates(self): """Test complete pod name determination with template processing.""" kwargs = { "env": { "KERNEL_POD_NAME": "{{ kernel_namespace }}-{{ kernel_id }}", "KERNEL_NAMESPACE": "production", "KERNEL_IMAGE": "python:3.11", } } with patch.object(self.proxy, 'log'): result = self.proxy._determine_kernel_pod_name(**kwargs) # Should get processed and DNS-normalized self.assertEqual(result, "production-test-kernel-id") def test_pod_name_determination_with_malicious_template(self): """Test pod name determination with malicious template falls back to default.""" kwargs = { "env": { "KERNEL_POD_NAME": "{{ __import__('os').system('evil') }}", "KERNEL_NAMESPACE": "production", } } with patch.object(self.proxy, 'log'), patch( 'enterprise_gateway.services.processproxies.k8s.KernelSessionManager' ) as mock_session_manager: mock_session_manager.get_kernel_username.return_value = "testuser" result = self.proxy._determine_kernel_pod_name(**kwargs) # Should fall back to default naming self.assertEqual(result, "testuser-test-kernel-id") def test_pod_name_determination_with_missing_variables(self): """Test pod name determination with missing variables falls back to default.""" kwargs = { "env": { "KERNEL_POD_NAME": "{{ missing_var }}-{{ kernel_id }}", "KERNEL_NAMESPACE": "production", } } with patch.object(self.proxy, 'log'), patch( 'enterprise_gateway.services.processproxies.k8s.KernelSessionManager' ) as mock_session_manager: mock_session_manager.get_kernel_username.return_value = "testuser" result = self.proxy._determine_kernel_pod_name(**kwargs) # Should fall back to default naming self.assertEqual(result, "testuser-test-kernel-id") def test_pod_name_without_template(self): """Test pod name determination without template syntax.""" kwargs = {"env": {"KERNEL_POD_NAME": "static-pod-name", "KERNEL_NAMESPACE": "production"}} with patch.object(self.proxy, 'log'): result = self.proxy._determine_kernel_pod_name(**kwargs) # Should use as-is and DNS-normalize self.assertEqual(result, "static-pod-name") def test_pod_name_dns_normalization(self): """Test DNS name normalization of pod names.""" kwargs = { "env": { "KERNEL_POD_NAME": "{{ kernel_namespace }}_{{ kernel_id }}", "KERNEL_NAMESPACE": "Test-Namespace", "KERNEL_IMAGE": "python:3.11", } } with patch.object(self.proxy, 'log'): result = self.proxy._determine_kernel_pod_name(**kwargs) # Should be DNS-normalized (lowercase, dashes only) self.assertEqual(result, "test-namespace-test-kernel-id") def test_regex_pattern_validation(self): """Test that only valid variable names are matched by regex.""" valid_vars = [ "kernel_id", "kernel_namespace", "kernel_image_pull_policy", "a", "var123", "KERNEL_ID", ] # Variables that should be blocked by the regex pattern invalid_vars = [ "123invalid", # starts with number "invalid-var", # contains dash "invalid.var", # contains dot "invalid var", # contains space "invalid@var", # contains special char "_private_var", # starts with underscore (security risk) "__class__", # magic method (security risk) "__dict__", # magic method (security risk) "__globals__", # magic method (security risk) ] variables = {var: "value" for var in valid_vars} # Also add underscore variables to test they're not substituted even if present variables.update( {"_private_var": "private", "__class__": "dangerous", "__dict__": "dangerous"} ) # Valid variables should be substituted for var in valid_vars: template = f"{{{{ {var} }}}}" result = self.proxy._safe_template_substitute(template, variables) self.assertEqual(result, "value", f"Valid variable {var} should be substituted") # Invalid variables should be treated as having invalid syntax for var in invalid_vars: template = f"{{{{ {var} }}}}" with patch.object(self.proxy, 'log') as mock_log: result = self.proxy._safe_template_substitute(template, variables) self.assertIsNone(result, f"Invalid variable {var} should be rejected") mock_log.warning.assert_called_once() # Should warn about unsupported expressions since invalid var names don't match regex self.assertIn("Invalid template syntax", mock_log.warning.call_args[0][0]) if __name__ == '__main__': unittest.main() ================================================ FILE: enterprise_gateway/tests/test_yaml_injection.py ================================================ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. """Tests for YAML injection vulnerability fix (GHSA-cfw7-6c5v-2wjq).""" import os import unittest import yaml from jinja2 import Environment, FileSystemLoader, select_autoescape TEMPLATE_DIR = os.path.join( os.path.dirname(__file__), "..", "..", "etc", "kernel-launchers", "kubernetes", "scripts", ) OPERATOR_TEMPLATE_DIR = os.path.join( os.path.dirname(__file__), "..", "..", "etc", "kernel-launchers", "operators", "scripts", ) YAML_PARSED_KERNEL_VARS = {"KERNEL_VOLUME_MOUNTS", "KERNEL_VOLUMES"} ALLOWED_K8S_KINDS = { "Pod", "Secret", "PersistentVolumeClaim", "PersistentVolume", "Service", "ConfigMap", } def yaml_safe_str(value): """Escape a value for safe inclusion in a YAML template.""" if isinstance(value, str): return yaml.dump(value, default_style='"', width=10000).strip() if isinstance(value, (dict, list)): return yaml.dump(value, default_flow_style=True, width=10000).strip() # yaml.dump appends a document-end marker ("...\n") for scalars; strip it return yaml.dump(value, width=10000).replace("\n...", "").strip() def _build_keywords(env_overrides: dict) -> dict: """Build a keywords dict from env_overrides using the fixed parsing logic.""" keywords = {} for name, value in env_overrides.items(): if name.startswith("KERNEL_"): if name in YAML_PARSED_KERNEL_VARS: parsed = yaml.safe_load(value) if isinstance(parsed, list) and all(isinstance(item, dict) for item in parsed): keywords[name.lower()] = parsed else: keywords[name.lower()] = value return keywords def _render_pod_template(keywords: dict) -> str: """Render the kernel-pod.yaml.j2 template with the yaml_safe filter.""" j_env = Environment( loader=FileSystemLoader(os.path.normpath(TEMPLATE_DIR)), trim_blocks=True, lstrip_blocks=True, autoescape=select_autoescape( disabled_extensions=("j2", "yaml"), default_for_string=True, default=True, ), ) j_env.filters["yaml_safe"] = yaml_safe_str return j_env.get_template("/kernel-pod.yaml.j2").render(**keywords) def _base_env() -> dict: return { "KERNEL_POD_NAME": "test-pod", "KERNEL_NAMESPACE": "default", "KERNEL_ID": "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee", "KERNEL_IMAGE": "elyra/kernel-py:3.2.3", "KERNEL_SERVICE_ACCOUNT_NAME": "default", "KERNEL_UID": "1000", "KERNEL_GID": "100", } class TestYamlSafeStrFilter(unittest.TestCase): """Test the yaml_safe_str Jinja2 filter.""" def test_normal_string(self): result = yaml_safe_str("/home/jovyan") self.assertEqual(result, '"/home/jovyan"') def test_string_with_quotes(self): result = yaml_safe_str('hello "world"') self.assertIn("hello", result) parsed = yaml.safe_load(f"key: {result}") self.assertEqual(parsed["key"], 'hello "world"') def test_string_with_newlines_escaped(self): result = yaml_safe_str("line1\nline2\nline3") self.assertNotIn("\n", result.strip('"')) parsed = yaml.safe_load(f"key: {result}") self.assertEqual(parsed["key"], "line1\nline2\nline3") def test_document_boundary_escaped(self): result = yaml_safe_str("before\n---\nafter") parsed_docs = list(yaml.safe_load_all(f"key: {result}")) self.assertEqual(len(parsed_docs), 1) self.assertEqual(parsed_docs[0]["key"], "before\n---\nafter") def test_end_of_document_marker_escaped(self): result = yaml_safe_str("before\n...\nafter") parsed = yaml.safe_load(f"key: {result}") self.assertIn("...", parsed["key"]) def test_none_serialized_as_yaml_null(self): result = yaml_safe_str(None) self.assertEqual(result, "null") parsed = yaml.safe_load(f"key: {result}") self.assertIsNone(parsed["key"]) def test_bool_serialized_as_yaml_bool(self): self.assertEqual(yaml_safe_str(True), "true") self.assertEqual(yaml_safe_str(False), "false") parsed_true = yaml.safe_load(f"key: {yaml_safe_str(True)}") parsed_false = yaml.safe_load(f"key: {yaml_safe_str(False)}") self.assertIs(parsed_true["key"], True) self.assertIs(parsed_false["key"], False) def test_numeric_serialized_correctly(self): self.assertEqual(yaml_safe_str(1000), "1000") self.assertEqual(yaml_safe_str(3.14), "3.14") parsed_int = yaml.safe_load(f"key: {yaml_safe_str(1000)}") parsed_float = yaml.safe_load(f"key: {yaml_safe_str(3.14)}") self.assertEqual(parsed_int["key"], 1000) self.assertAlmostEqual(parsed_float["key"], 3.14) def test_dict_rendered_as_flow_mapping(self): result = yaml_safe_str({"name": "data", "mountPath": "/data"}) parsed = yaml.safe_load(f"- {result}") self.assertEqual(parsed[0]["name"], "data") self.assertEqual(parsed[0]["mountPath"], "/data") def test_empty_string(self): result = yaml_safe_str("") parsed = yaml.safe_load(f"key: {result}") self.assertEqual(parsed["key"], "") def test_image_name_with_tag(self): result = yaml_safe_str("registry.example.com/org/image:v1.2.3") parsed = yaml.safe_load(f"key: {result}") self.assertEqual(parsed["key"], "registry.example.com/org/image:v1.2.3") class TestEnvVarParsing(unittest.TestCase): """Test that env var parsing correctly distinguishes scalar vs structured vars.""" def test_scalar_vars_remain_strings(self): env = {"KERNEL_IMAGE": "nginx:latest", "KERNEL_UID": "1000"} keywords = _build_keywords(env) self.assertEqual(keywords["kernel_image"], "nginx:latest") self.assertIsInstance(keywords["kernel_image"], str) self.assertEqual(keywords["kernel_uid"], "1000") self.assertIsInstance(keywords["kernel_uid"], str) def test_volume_mounts_parsed_as_list(self): env = { "KERNEL_VOLUME_MOUNTS": '[{"name": "data", "mountPath": "/data"}]', } keywords = _build_keywords(env) self.assertIsInstance(keywords["kernel_volume_mounts"], list) self.assertEqual(keywords["kernel_volume_mounts"][0]["name"], "data") def test_volumes_parsed_as_list(self): env = { "KERNEL_VOLUMES": '[{"name": "data", "emptyDir": {}}]', } keywords = _build_keywords(env) self.assertIsInstance(keywords["kernel_volumes"], list) def test_non_list_volume_rejected(self): env = {"KERNEL_VOLUME_MOUNTS": "not-a-list"} keywords = _build_keywords(env) self.assertNotIn("kernel_volume_mounts", keywords) def test_list_of_strings_volume_rejected(self): """List of strings (not dicts) should be rejected to prevent injection via loop items.""" env = {"KERNEL_VOLUME_MOUNTS": '["name: data\\nmountPath: /data"]'} keywords = _build_keywords(env) self.assertNotIn("kernel_volume_mounts", keywords) def test_mixed_list_volume_rejected(self): """List containing both dicts and strings should be rejected.""" env = {"KERNEL_VOLUME_MOUNTS": '[{"name": "ok"}, "injected\\nstring"]'} keywords = _build_keywords(env) self.assertNotIn("kernel_volume_mounts", keywords) def test_yaml_safe_load_not_applied_to_scalars(self): env = {"KERNEL_WORKING_DIR": '"injected\\nvalue"'} keywords = _build_keywords(env) self.assertEqual(keywords["kernel_working_dir"], '"injected\\nvalue"') self.assertNotIn("\n", keywords["kernel_working_dir"]) class TestSecurityContextInjection(unittest.TestCase): """Test that securityContext injection via KERNEL_WORKING_DIR is blocked.""" def test_security_context_not_overridden(self): env = _base_env() env["KERNEL_WORKING_DIR"] = ( '"/tmp\\"\\n\\nsecurityContext:\\n runAsUser: 0\\n runAsGroup: 0\\n fsGroup: 100\\n"' ) keywords = _build_keywords(env) rendered = _render_pod_template(keywords) docs = list(yaml.safe_load_all(rendered)) self.assertEqual(len(docs), 1) sc = docs[0]["spec"]["securityContext"] self.assertEqual(sc["runAsUser"], 1000) self.assertEqual(sc["runAsGroup"], 100) def test_injection_via_kernel_image(self): env = _base_env() env["KERNEL_IMAGE"] = 'nginx"\nsecurityContext:\n runAsUser: 0' keywords = _build_keywords(env) rendered = _render_pod_template(keywords) docs = list(yaml.safe_load_all(rendered)) self.assertEqual(len(docs), 1) sc = docs[0]["spec"]["securityContext"] self.assertEqual(sc["runAsUser"], 1000) def test_injection_via_kernel_namespace(self): env = _base_env() env["KERNEL_NAMESPACE"] = 'default"\nsecurityContext:\n runAsUser: 0' keywords = _build_keywords(env) rendered = _render_pod_template(keywords) docs = list(yaml.safe_load_all(rendered)) self.assertEqual(len(docs), 1) sc = docs[0]["spec"]["securityContext"] self.assertEqual(sc["runAsUser"], 1000) def test_injection_via_volume_mounts_string_list_blocked_at_l1(self): """L1: list-of-strings in KERNEL_VOLUME_MOUNTS is rejected during parsing.""" env = _base_env() env["KERNEL_VOLUME_MOUNTS"] = ( '["{name: data, mountPath: /data}\\n securityContext:\\n runAsUser: 0"]' ) keywords = _build_keywords(env) self.assertNotIn("kernel_volume_mounts", keywords) def test_injection_via_volume_mounts_blocked_at_l2(self): """L2: even if a string slips into volume_mounts, yaml_safe filter escapes it.""" env = _base_env() keywords = _build_keywords(env) keywords["kernel_volume_mounts"] = [ "{name: data, mountPath: /data}\n securityContext:\n runAsUser: 0" ] rendered = _render_pod_template(keywords) docs = list(yaml.safe_load_all(rendered)) self.assertEqual(len(docs), 1) sc = docs[0]["spec"]["securityContext"] self.assertEqual(sc["runAsUser"], 1000) env["KERNEL_WORKING_DIR"] = ( '/tmp\n...\n---\napiVersion: v1\nkind: Pod\nmetadata:\n' # noqa: S108 ' name: injected-pod\nspec:\n containers:\n' ' - name: evil\n image: nginx\n securityContext:\n' ' privileged: true\n...\n' ) keywords = _build_keywords(env) rendered = _render_pod_template(keywords) docs = [d for d in yaml.safe_load_all(rendered) if d is not None] self.assertEqual(len(docs), 1, "Injected document should not create extra YAML documents") self.assertEqual(docs[0]["kind"], "Pod") self.assertEqual(docs[0]["metadata"]["name"], "test-pod") def test_all_rendered_kinds_are_allowed(self): env = _base_env() keywords = _build_keywords(env) rendered = _render_pod_template(keywords) docs = [d for d in yaml.safe_load_all(rendered) if d is not None] for doc in docs: self.assertIn( doc.get("kind"), ALLOWED_K8S_KINDS, f"Unexpected kind: {doc.get('kind')}", ) def test_duplicate_pod_kind_detected(self): """L3: if an attacker somehow injected a second Pod, document count validation catches it.""" multi_pod_yaml = ( "apiVersion: v1\nkind: Pod\nmetadata:\n name: legit\n" "---\n" "apiVersion: v1\nkind: Pod\nmetadata:\n name: evil\n" ) docs = list(yaml.safe_load_all(multi_pod_yaml)) kind_counts: dict[str, int] = {} for doc in docs: if doc: kind = doc.get("kind") kind_counts[kind] = kind_counts.get(kind, 0) + 1 self.assertEqual(kind_counts.get("Pod"), 2) self.assertGreater(kind_counts["Pod"], 1, "Should detect duplicate Pod documents") class TestNormalOperation(unittest.TestCase): """Test that the fix preserves normal kernel launch functionality.""" def test_basic_pod_renders_correctly(self): env = _base_env() keywords = _build_keywords(env) rendered = _render_pod_template(keywords) docs = list(yaml.safe_load_all(rendered)) self.assertEqual(len(docs), 1) pod = docs[0] self.assertEqual(pod["kind"], "Pod") self.assertEqual(pod["metadata"]["name"], "test-pod") self.assertEqual(pod["metadata"]["namespace"], "default") self.assertEqual(pod["spec"]["containers"][0]["image"], "elyra/kernel-py:3.2.3") self.assertEqual(pod["spec"]["serviceAccountName"], "default") def test_working_dir_set_correctly(self): env = _base_env() env["KERNEL_WORKING_DIR"] = "/home/jovyan/work" keywords = _build_keywords(env) rendered = _render_pod_template(keywords) pod = yaml.safe_load(rendered) self.assertEqual(pod["spec"]["containers"][0]["workingDir"], "/home/jovyan/work") def test_resource_limits_rendered(self): env = _base_env() env["KERNEL_CPUS"] = "500m" env["KERNEL_MEMORY"] = "1Gi" env["KERNEL_CPUS_LIMIT"] = "1" env["KERNEL_MEMORY_LIMIT"] = "2Gi" keywords = _build_keywords(env) rendered = _render_pod_template(keywords) pod = yaml.safe_load(rendered) resources = pod["spec"]["containers"][0]["resources"] self.assertEqual(resources["requests"]["cpu"], "500m") self.assertEqual(resources["requests"]["memory"], "1Gi") self.assertEqual(resources["limits"]["cpu"], "1") self.assertEqual(resources["limits"]["memory"], "2Gi") def test_security_context_with_uid_gid(self): env = _base_env() keywords = _build_keywords(env) rendered = _render_pod_template(keywords) pod = yaml.safe_load(rendered) sc = pod["spec"]["securityContext"] self.assertEqual(sc["runAsUser"], 1000) self.assertEqual(sc["runAsGroup"], 100) self.assertEqual(sc["fsGroup"], 100) def test_volume_mounts_rendered(self): env = _base_env() env["KERNEL_VOLUME_MOUNTS"] = '[{"name": "data-vol", "mountPath": "/data"}]' env["KERNEL_VOLUMES"] = '[{"name": "data-vol", "emptyDir": {}}]' keywords = _build_keywords(env) rendered = _render_pod_template(keywords) pod = yaml.safe_load(rendered) mounts = pod["spec"]["containers"][0]["volumeMounts"] self.assertEqual(len(mounts), 1) self.assertEqual(mounts[0]["name"], "data-vol") volumes = pod["spec"]["volumes"] self.assertEqual(len(volumes), 1) self.assertEqual(volumes[0]["name"], "data-vol") class TestSparkOperatorTemplate(unittest.TestCase): """Test that the Spark operator template is also protected.""" def _render_operator_template(self, keywords: dict) -> str: j_env = Environment( loader=FileSystemLoader(os.path.normpath(OPERATOR_TEMPLATE_DIR)), trim_blocks=True, lstrip_blocks=True, autoescape=select_autoescape( disabled_extensions=("j2", "yaml"), default_for_string=True, default=True, ), ) j_env.filters["yaml_safe"] = yaml_safe_str return j_env.get_template("/sparkoperator.k8s.io-v1beta2.yaml.j2").render(**keywords) def test_injection_via_kernel_image_blocked(self): keywords = { "kernel_resource_name": "test-spark", "kernel_image": 'nginx\nmalicious:\n key: value', "kernel_id": "test-id", "spark_context_initialization_mode": "none", "eg_response_address": "1.2.3.4:8080", "eg_port_range": "0..0", "eg_public_key": "testkey", "kernel_service_account_name": "default", "kernel_executor_image": "elyra/kernel-py:3.2.3", } rendered = self._render_operator_template(keywords) doc = yaml.safe_load(rendered) self.assertEqual(doc["kind"], "SparkApplication") self.assertIn("\n", doc["spec"]["image"]) self.assertNotIn("malicious", doc) def test_normal_spark_app_renders(self): keywords = { "kernel_resource_name": "test-spark", "kernel_image": "elyra/kernel-spark-py:3.2.3", "kernel_id": "test-id-123", "spark_context_initialization_mode": "lazy", "eg_response_address": "10.0.0.1:8080", "eg_port_range": "10000..11000", "eg_public_key": "abc123", "kernel_service_account_name": "spark-sa", "kernel_executor_image": "elyra/kernel-spark-py:3.2.3", } rendered = self._render_operator_template(keywords) doc = yaml.safe_load(rendered) self.assertEqual(doc["kind"], "SparkApplication") self.assertEqual(doc["metadata"]["name"], "test-spark") self.assertEqual(doc["spec"]["image"], "elyra/kernel-spark-py:3.2.3") self.assertEqual(doc["spec"]["driver"]["serviceAccount"], "spark-sa") if __name__ == "__main__": unittest.main() ================================================ FILE: etc/Makefile ================================================ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. .PHONY: help clean clean-images clean-enterprise-gateway clean-enterprise-gateway-demo clean-demo-base \ clean-kernel-images clean-py clean-tf-py clean-tf-gpu-py clean-r clean-spark-r clean-scala toree-launcher \ kernelspecs_all kernelspecs_yarn kernelspecs_conductor kernelspecs_kubernetes kernelspecs_docker clean-kernel-image-puller SA?=source activate ENV:=enterprise-gateway-dev SHELL:=/bin/bash SUPPORTED_ARCHS=linux/arm64 linux/amd64 PLATFORM_ARCHS=`echo ${SUPPORTED_ARCHS} | sed "s/ /,/g"` # Docker attributes - hub organization and tag. Modify accordingly HUB_ORG:=elyra # Set NO_CACHE=--no-cache to force docker build to not use cached layers NO_CACHE?= help: # http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' clean: ## Make a clean source tree -rm -rf kernel-launchers/scala/lib -rm -rf kernel-launchers/scala/toree-launcher/project/project/ # # Kernelspec build section ************************************************* # KERNELSPECS := kernelspecs_all kernelspecs_yarn kernelspecs_conductor kernelspecs_kubernetes kernelspecs_docker kernelspecs: $(KERNELSPECS) kernel_image_files FILE_kernelspecs_all:=../dist/jupyter_enterprise_gateway_kernelspecs-$(VERSION).tar.gz FILE_kernelspecs_yarn:=../dist/jupyter_enterprise_gateway_kernelspecs_yarn-$(VERSION).tar.gz FILE_kernelspecs_conductor:=../dist/jupyter_enterprise_gateway_kernelspecs_conductor-$(VERSION).tar.gz FILE_kernelspecs_kubernetes:=../dist/jupyter_enterprise_gateway_kernelspecs_kubernetes-$(VERSION).tar.gz FILE_kernelspecs_docker:=../dist/jupyter_enterprise_gateway_kernelspecs_docker-$(VERSION).tar.gz FILES_kernelspecs_all:=$(shell find kernel-launchers kernelspecs -type f -name '*') TOREE_LAUNCHER_FILES:=$(shell find kernel-launchers/scala/toree-launcher/src -type f -name '*') ../build/kernelspecs: kernel-launchers/scala/lib $(FILES_kernelspecs_all) @rm -rf ../build/kernelspecs @mkdir -p ../build/kernelspecs # Seed the build tree with initial files cp -r kernelspecs ../build # Distribute language and config-sensitive files. # On-prem kernelspecs get launcher files in the kernelspec hierarchy @echo ../build/kernelspecs/python_distributed | xargs -t -n 1 cp -r kernel-launchers/python/scripts @echo ../build/kernelspecs/dask_python_* | xargs -t -n 1 cp -r kernel-launchers/python/scripts @echo ../build/kernelspecs/spark_python_{conductor*,yarn*} | xargs -t -n 1 cp -r kernel-launchers/python/scripts @echo ../build/kernelspecs/spark_R_{conductor*,yarn*} | xargs -t -n 1 cp -r kernel-launchers/R/scripts @echo ../build/kernelspecs/spark_scala_{conductor*,yarn*} | xargs -t -n 1 cp -r kernel-launchers/scala/lib # Container-based kernelspecs (and operators) just get the container launchers @echo ../build/kernelspecs/{python,R,scala,python_tf,python_tf_gpu}_kubernetes | xargs -t -n 1 cp -r kernel-launchers/kubernetes/* @echo ../build/kernelspecs/spark_{python,R,scala}_kubernetes | xargs -t -n 1 cp -r kernel-launchers/kubernetes/* @echo ../build/kernelspecs/{python,R,scala,python_tf,python_tf_gpu}_docker | xargs -t -n 1 cp -r kernel-launchers/docker/* @echo ../build/kernelspecs/spark_python_operator | xargs -t -n 1 cp -r kernel-launchers/operators/* # Populate kernel resources. Because tensorflow is also python, it should be last. @echo ../build/kernelspecs/*R* | xargs -t -n 1 cp -r kernel-resources/ir/* @echo ../build/kernelspecs/*scala* | xargs -t -n 1 cp -r kernel-resources/apache_toree/* @echo ../build/kernelspecs/*python* | xargs -t -n 1 cp -r kernel-resources/python/* @echo ../build/kernelspecs/*tf* | xargs -t -n 1 cp -r kernel-resources/tensorflow/* # Perform the copy again to enable local, per-kernel, overrides cp -r kernelspecs ../build @(cd ../build/kernelspecs; find . -name 'kernel.json' -print0 | xargs -0 sed -i.bak "s/VERSION/$(TAG)/g"; find . -name *.bak -print0 | xargs -0 rm -f) @mkdir -p ../dist PATTERN_kernelspecs_all := * PATTERN_kernelspecs_yarn := *_yarn_* PATTERN_kernelspecs_conductor := *_conductor_* PATTERN_kernelspecs_kubernetes := {*_kubernetes,*_operator} PATTERN_kernelspecs_docker := *_docker define BUILD_KERNELSPEC $1: $$(FILE_$1) $$(FILE_$1): ../build/kernelspecs rm -f $$(FILE_$1) @( cd ../build/kernelspecs; tar -pvczf "../$$(FILE_$1)" $$(PATTERN_$1) ) endef $(foreach kernelspec,$(KERNELSPECS),$(eval $(call BUILD_KERNELSPEC,$(kernelspec)))) kernel-launchers/scala/lib: $(TOREE_LAUNCHER_FILES) -rm -rf kernel-launchers/scala/lib mkdir -p kernel-launchers/scala/lib @(cd kernel-launchers/scala/toree-launcher; sbt -Dversion=$(VERSION) -Dspark_version=$(SPARK_VERSION) package; cp target/scala-2.12/*.jar ../lib) curl -L https://repository.apache.org/content/repositories/releases/org/apache/toree/toree-assembly/0.5.0-incubating/toree-assembly-0.5.0-incubating.jar -o kernel-launchers/scala/lib/toree-assembly-0.5.0-incubating.jar KERNEL_IMAGE_FILE:=../dist/jupyter_enterprise_gateway_kernel_image_files-$(VERSION).tar.gz kernel_image_files: ../build/kernel_image_files rm -f $(KERNEL_IMAGE_FILE) @( cd ../build/kernel_image_files; tar -pvczf "../$(KERNEL_IMAGE_FILE)" . ) ../build/kernel_image_files: kernel-launchers/scala/lib kernel-launchers/bootstrap/bootstrap-kernel.sh @rm -rf ../build/kernel_image_files @mkdir -p ../build/kernel_image_files/kernel-launchers cp kernel-launchers/bootstrap/* ../build/kernel_image_files cp -r kernel-launchers/{python,R,scala} ../build/kernel_image_files/kernel-launchers rm -rf ../build/kernel_image_files/kernel-launchers/scala/{\.*DS*,toree-launcher} # leave only lib # # Docker image build section *********************************************** # KERNEL_IMAGES := kernel-py kernel-spark-py kernel-r kernel-spark-r kernel-scala kernel-tf-py kernel-tf-gpu-py DOCKER_IMAGES := demo-base enterprise-gateway-demo enterprise-gateway kernel-image-puller $(KERNEL_IMAGES) PUSHED_IMAGES := demo-base enterprise-gateway-demo enterprise-gateway kernel-image-puller $(KERNEL_IMAGES) docker-images: $(DOCKER_IMAGES) kernel-images: $(KERNEL_IMAGES) push-images: push-enterprise-gateway-demo push-enterprise-gateway push-kernel-py push-kernel-spark-py push-kernel-tf-py push-kernel-r push-kernel-spark-r push-kernel-scala push-kernel-image-puller clean-images: clean-enterprise-gateway-demo clean-demo-base clean-enterprise-gateway clean-kernel-image-puller clean-kernel-images clean-kernel-images: clean-kernel-py clean-kernel-spark-py clean-kernel-tf-py clean-kernel-tf-gpu-py clean-kernel-r clean-kernel-spark-r clean-kernel-scala # Extra dependencies for each docker image... DEPENDS_demo-base: DEPENDS_enterprise-gateway-demo: $(FILE_kernelspecs_all) DEPENDS_enterprise-gateway: $(FILE_kernelspecs_all) DEPENDS_kernel-image-puller: DEPENDS_kernel-py DEPENDS_kernel-spark-py DEPENDS_kernel-r DEPENDS_kernel-spark-r DEPENDS_kernel-scala DEPENDS_kernel-tf-py DEPENDS_kernel-tf-gpu-py: $(FILE_kernelspecs_kubernetes) $(FILE_kernelspecs_docker) # Extra targets for each docker image... TARGETS_demo-base: TARGETS_kernel-image-puller: TARGETS_enterprise-gateway TARGETS_enterprise-gateway-demo: kernelspecs @make -C .. bdist TARGETS_kernel-py TARGETS_kernel-spark-py TARGETS_kernel-r TARGETS_kernel-spark-r TARGETS_kernel-scala TARGETS_kernel-tf-py TARGETS_kernel-tf-gpu-py: kernelspecs # Extra files for each docker image... FILES_demo-base := FILES_kernel-image-puller := FILES_enterprise-gateway-demo := ../dist/jupyter_enterprise_gateway_kernelspecs-* ../dist/jupyter_enterprise_gateway*.whl FILES_enterprise-gateway := ../dist/jupyter_enterprise_gateway_kernel_image_files* ../dist/jupyter_enterprise_gateway_kernelspecs-* ../dist/jupyter_enterprise_gateway*.whl FILES_kernel-py := ../dist/jupyter_enterprise_gateway_kernel_image_files* FILES_kernel-spark-py := ../dist/jupyter_enterprise_gateway_kernel_image_files* FILES_kernel-tf-py := ../dist/jupyter_enterprise_gateway_kernel_image_files* FILES_kernel-tf-gpu-py := ../dist/jupyter_enterprise_gateway_kernel_image_files* FILES_kernel-r := ../dist/jupyter_enterprise_gateway_kernel_image_files* FILES_kernel-spark-r := ../dist/jupyter_enterprise_gateway_kernel_image_files* FILES_kernel-scala := ../dist/jupyter_enterprise_gateway_kernel_image_files* # Generate image creation targets for each entry in $(DOCKER_IMAGES). Switch 'eval' to 'info' to see what is produced. define BUILD_IMAGE $1: ../.image-$1 ../.image-$1: docker/$1/* DEPENDS_$1 @make clean-$1 TARGETS_$1 @mkdir -p ../build/docker/$1 @cp -r docker/$1/* $$(FILES_$1) ../build/docker/$1 ifdef MULTIARCH_BUILD @echo "starting buildx builder for $1" -@(docker buildx rm $1) (docker buildx create --use --name $1) (cd ../build/docker/$1; docker buildx build ${NO_CACHE} --platform $(PLATFORM_ARCHS) --build-arg HUB_ORG=${HUB_ORG} --build-arg TAG=${TAG} --build-arg SPARK_VERSION=${SPARK_VERSION} -t $(HUB_ORG)/$1:$(TAG) . --push) @echo "remove builder instance $1" -(docker buildx rm $1) else ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), $(SUPPORTED_ARCHS))) @echo "Building docker image for $(TARGET_ARCH)" (cd ../build/docker/$1; docker build ${NO_CACHE} --platform ${TARGET_ARCH} --build-arg HUB_ORG=${HUB_ORG} --build-arg TAG=${TAG} --build-arg SPARK_VERSION=${SPARK_VERSION} -t $(HUB_ORG)/$1:$(TAG) .) @-docker images $(HUB_ORG)/$1:$(TAG) else @echo "TARGET_ARCH not defined or not in supported platforms: $(PLATFORM_ARCHS). Building docker image for default platform" (cd ../build/docker/$1; docker build ${NO_CACHE} --build-arg HUB_ORG=${HUB_ORG} --build-arg TAG=${TAG} --build-arg SPARK_VERSION=${SPARK_VERSION} -t $(HUB_ORG)/$1:$(TAG) .) @-docker images $(HUB_ORG)/$1:$(TAG) endif @touch ../.image-$1 endef $(foreach image,$(DOCKER_IMAGES),$(eval $(call BUILD_IMAGE,$(image)))) # Generate clean-xxx targets for each entry in $(DOCKER_IMAGES). Switch 'eval' to 'info' to see what is produced. define CLEAN_IMAGE clean-$1: @rm -f ../.image-$1 @-docker rmi -f $(HUB_ORG)/$1:$(TAG) endef $(foreach image,$(DOCKER_IMAGES),$(eval $(call CLEAN_IMAGE,$(image)))) # Publish each publish image on $(PUSHED_IMAGES) to DockerHub. Switch 'eval' to 'info' to see what is produced. define PUSH_IMAGE push-$1: docker push $(HUB_ORG)/$1:$(TAG) endef $(foreach image,$(PUSHED_IMAGES),$(eval $(call PUSH_IMAGE,$(image)))) ================================================ FILE: etc/docker/demo-base/Dockerfile ================================================ ARG BASE_CONTAINER=continuumio/miniconda3:24.1.2-0 FROM $BASE_CONTAINER ARG SPARK_VERSION ARG SPARKR_VERSION=3.1.2 ARG NB_USER="jovyan" ARG NB_UID="1000" ARG NB_GID="100" USER root ENV HADOOP_HOME=/usr/hdp/current/hadoop \ ANACONDA_HOME=/opt/conda ENV SHELL=/bin/bash \ NB_USER=$NB_USER \ NB_UID=$NB_UID \ NB_GID=$NB_GID \ LC_ALL=en_US.UTF-8 \ LANG=en_US.UTF-8 \ LANGUAGE=en_US.UTF-8 \ JAVA_HOME=/usr/lib/jvm/java \ SPARK_HOME=/usr/hdp/current/spark2-client \ PYSPARK_PYTHON=$ANACONDA_HOME/bin/python \ HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop ENV HOME=/home/$NB_USER \ PATH=$JAVA_HOME/bin:$ANACONDA_HOME/bin:$HADOOP_HOME/bin:$SPARK_HOME/bin:$PATH ENV SPARK_VER=$SPARK_VERSION ENV HADOOP_VER=3.3.1 # INSTALL / DOWNLOAD ALL NEEDED PACKAGES RUN dpkg --purge --force-depends ca-certificates-java \ && apt-get update && apt-get -yq dist-upgrade \ && apt-get install -yq --no-install-recommends \ wget \ bzip2 \ tar \ curl \ less \ nano \ ca-certificates \ libkrb5-dev \ sudo \ locales \ gcc \ fonts-liberation \ unzip \ libsm6 \ libxext-dev \ libxrender1 \ openssh-server \ openssh-client \ openjdk-11-jdk-headless \ ca-certificates-java \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* RUN ln -s $(readlink -f /usr/bin/javac | sed "s:/bin/javac::") ${JAVA_HOME} RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && \ locale-gen ADD fix-permissions /usr/local/bin/fix-permissions # Create jovyan user with UID=1000 and in the 'users' group # and make sure these dirs are writable by the `users` group. RUN groupadd wheel -g 11 && \ echo "auth required pam_wheel.so use_uid" >> /etc/pam.d/su && \ useradd -m -s /bin/bash -N -u $NB_UID $NB_USER && \ mkdir -p /usr/hdp/current && \ mkdir -p /usr/local/share/jupyter && \ chown $NB_USER:$NB_GID $ANACONDA_HOME && \ chmod g+w /etc/passwd && \ chmod +x /usr/local/bin/fix-permissions && \ fix-permissions $HOME && \ fix-permissions $ANACONDA_HOME && \ fix-permissions /usr/hdp/current && \ fix-permissions /usr/local/share/jupyter # Create service user 'jovyan'. Pin uid/gid to 1000. RUN useradd -m -s /bin/bash -N -u 1111 elyra && \ useradd -m -s /bin/bash -N -u 1112 bob && \ useradd -m -s /bin/bash -N -u 1113 alice USER $NB_UID # Setup work directory for backward-compatibility RUN mkdir /home/$NB_USER/work && \ fix-permissions /home/$NB_USER # DOWNLOAD HADOOP AND SPARK RUN curl -sL https://archive.apache.org/dist/hadoop/common/hadoop-$HADOOP_VER/hadoop-$HADOOP_VER.tar.gz | tar -xz -C /usr/hdp/current RUN curl -sL https://archive.apache.org/dist/spark/spark-$SPARK_VER/spark-$SPARK_VER-bin-hadoop3.2.tgz | tar -xz -C /usr/hdp/current # SETUP SPARK AND HADOOP SYMLINKS RUN cd /usr/hdp/current && ln -s ./hadoop-$HADOOP_VER hadoop && ln -s ./spark-$SPARK_VER-bin-hadoop3.2 spark2-client USER root RUN conda install mamba -n base -c conda-forge && \ mamba install --yes --quiet -c conda-forge \ 'jupyter' \ 'r-devtools' \ 'r-stringr' \ 'r-argparse' && \ mamba clean -y --all &&\ fix-permissions $ANACONDA_HOME && \ fix-permissions /home/$NB_USER USER $NB_UID #Package ‘SparkR’ currently supports 3.1.2, so we'll set its own ARG #https://cran.r-project.org/src/contrib/Archive/SparkR/ RUN Rscript -e 'install.packages("IRkernel", repos="https://mirror.las.iastate.edu/CRAN/", lib="/opt/conda/lib/R/library")' \ -e 'IRkernel::installspec(prefix = "/usr/local")' \ -e 'download.file(url = "https://cran.r-project.org/src/contrib/Archive/SparkR/SparkR_$SPARKR_VERSION.tar.gz", destfile = "SparkR_$SPARKR_VERSION.tar.gz")' \ -e 'install.packages(pkgs="SparkR_$SPARKR_VERSION.tar.gz", type="source", repos=NULL, lib="/opt/conda/lib/R/library")' \ -e 'unlink("SparkR_$SPARKR_VERSION.tar.gz")' # SETUP HADOOP CONFIGS RUN sed -i '/^export JAVA_HOME/ s:.*:export JAVA_HOME=/usr/lib/jvm/java\nexport HADOOP_HOME=/usr/hdp/current/hadoop\nexport HADOOP_HOME=/usr/hdp/current/hadoop\n:' $HADOOP_HOME/etc/hadoop/hadoop-env.sh RUN sed -i '/^export HADOOP_CONF_DIR/ s:.*:export HADOOP_CONF_DIR=/usr/hdp/current/hadoop/etc/hadoop/:' $HADOOP_HOME/etc/hadoop/hadoop-env.sh # SETUP PSEUDO - DISTRIBUTED CONFIGS FOR HADOOP COPY ["core-site.xml.template", "hdfs-site.xml", "mapred-site.xml", "yarn-site.xml.template", \ "$HADOOP_HOME/etc/hadoop/"] # working around docker.io build error RUN ls -la /usr/hdp/current/hadoop/etc/hadoop/*-env.sh && \ chmod +x /usr/hdp/current/hadoop/etc/hadoop/*-env.sh && \ ls -la /usr/hdp/current/hadoop/etc/hadoop/*-env.sh # Install Toree RUN cd /tmp && \ curl -O https://archive.apache.org/dist/incubator/toree/0.5.0-incubating/toree-pip/toree-0.5.0.tar.gz && \ pip install --upgrade setuptools --user && \ pip install /tmp/toree-0.5.0.tar.gz && \ jupyter toree install --spark_home=$SPARK_HOME --kernel_name="Spark $SPARK_VER" --interpreters=Scala && \ rm -f /tmp/toree-0.5.0.tar.gz && \ fix-permissions $ANACONDA_HOME && \ fix-permissions /home/$NB_USER # SETUP PASSWORDLESS SSH FOR $NB_USER RUN ssh-keygen -q -N "" -t rsa -f /home/$NB_USER/.ssh/id_rsa && \ cp /home/$NB_USER/.ssh/id_rsa.pub /home/$NB_USER/.ssh/authorized_keys && \ chmod 0700 /home/$NB_USER USER root # SETUP PASSWORDLESS SSH RUN yes y | ssh-keygen -q -N "" -t dsa -f /etc/ssh/ssh_host_dsa_key && \ yes y | ssh-keygen -q -N "" -t rsa -f /etc/ssh/ssh_host_rsa_key && \ yes y | ssh-keygen -q -N "" -t rsa -f /root/.ssh/id_rsa && \ cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys RUN ssh-keygen -A COPY ssh_config /root/.ssh/config RUN chmod 600 /root/.ssh/config && \ chown root:root /root/.ssh/config && \ echo "Port 2122" >> /etc/ssh/sshd_config && \ echo "${NB_USER} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers RUN service ssh restart COPY ssh_config /home/$NB_USER/.ssh/config RUN chmod 600 /home/$NB_USER/.ssh/config && \ chown $NB_USER: /home/$NB_USER/.ssh/config COPY bootstrap-yarn-spark.sh /usr/local/bin/ RUN chown $NB_USER: /usr/local/bin/bootstrap-yarn-spark.sh && \ chmod 0700 /usr/local/bin/bootstrap-yarn-spark.sh CMD ["/usr/local/bin/bootstrap-yarn-spark.sh"] LABEL Hadoop.version=$HADOOP_VER LABEL Spark.version=$SPARK_VER # Hdfs ports EXPOSE 50010 50020 50070 50075 50090 8020 9000 \ # Mapred ports 19888 \ #Yarn ports 8030 8031 8032 8033 8040 8042 8088 \ #Other ports 49707 2122 USER $NB_USER ================================================ FILE: etc/docker/demo-base/README.md ================================================ # What this image Gives You - Ubuntu base image : bionic - Hadoop 2.7.7 - Apache Spark 2.4.6 - Java 1.8 runtime - Mini-conda latest (python 3.11) with R packages - Toree 0.4.0-incubating - `jovyan` service user, with system users `elyra`, `bob`, and `alice`. The jovyan uid is `1000` to match other jupyter images. - Password-less ssh for service user - Users have HDFS folder setup at startup # Basic Use As of the 0.9.0 release of [Jupyter Enterprise Gateway](https://github.com/jupyter-server/enterprise_gateway/releases) this image can be started as a separate YARN cluster to better demonstrate remote kernel capabilities. See section [Dual Mode](https://hub.docker.com/r/elyra/enterprise-gateway/#dual_mode) on the enterprise-gateway page for command usage. ================================================ FILE: etc/docker/demo-base/bootstrap-yarn-spark.sh ================================================ #!/bin/bash # This file is a copy of /etc/bootstrap.sh but sets up the YARN cluster in its "deamon" case. # It also checks for --help or no options before starting anything... FROM=${FROM:-"YARN"} CMD=${1:-"--help"} if [[ "$CMD" == "--help" ]]; then echo "" echo "usage: docker run {-it|-d} --rm -h -p 8088:8088 -p 8042:8042 " echo "" echo "where is:" echo " --yarn ... Runs container as standalone YARN master - assumed to be used with Enterprise Gateway" echo " --help ... Produces this message." echo " ... Invokes ''. Use ='/bin/bash' to explore within the container." echo "" echo "Tips:" echo "1) You can target a different YARN cluster by using '-e YARN_HOST='" echo "2) You can \"bring your own kernels\" by mounting to /tmp/byok/kernels (e.g., -v my-kernels-dir:/tmp/byok/kernels)" echo "3) It is advised that ports '8088' and '8042' be mapped to host ports, although the host port numbers are not" echo " required to be '8088' and '8042'. " exit 0 fi : ${HADOOP_HOME:=/usr/hdp/current/hadoop} : ${YARN_HOST:=$HOSTNAME} : ${SPARK_HOME:=/usr/hdp/current/spark2-client} : ${SPARK_VER:=3.2.1} : ${JAVA_HOME:=/usr/lib/jvm/java} echo "export JAVA_HOME=${JAVA_HOME}" >> $HADOOP_HOME/etc/hadoop/hadoop-env.sh # Set all the hadoop envs for this shell $HADOOP_HOME/etc/hadoop/hadoop-env.sh rm -f /tmp/*.pid # installing libraries if any - (resource urls added comma separated to the ACP system variable) cd $HADOOP_HOME/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - ## altering the hostname in core-site and enterprise-gateway startup configuration sed s/HOSTNAME/$YARN_HOST/ /usr/hdp/current/hadoop/etc/hadoop/core-site.xml.template > /usr/hdp/current/hadoop/etc/hadoop/core-site.xml sed s/HOSTNAME/$YARN_HOST/ /usr/hdp/current/hadoop/etc/hadoop/yarn-site.xml.template > /usr/hdp/current/hadoop/etc/hadoop/yarn-site.xml # # setting spark defaults cp $SPARK_HOME/conf/spark-defaults.conf.template $SPARK_HOME/conf/spark-defaults.conf # set spark.yarn.jars so spark will stop uploaded jars to hdfs everytime echo "spark.yarn.jars hdfs://$YARN_HOST:9000/spark/*.jar" >> $SPARK_HOME/conf/spark-defaults.conf # place metastore db and derby.log in /tmp echo "spark.driver.extraJavaOptions -Dderby.system.home=/tmp" >> $SPARK_HOME/conf/spark-defaults.conf ##/usr/sbin/rsyslog echo "********** STARTING SSH DAEMON ***********" sudo service ssh restart # If we're not running in standalone mode, don't run as jovyan. # If we're running in standalone mode, startup yarn, hdfs, etc. if [[ "$YARN_HOST" == "$HOSTNAME" || "$FROM" == "YARN" ]]; then echo "********** FORMATTING NAMENODE ***********" $HADOOP_HOME/bin/hdfs namenode -format $HADOOP_HOME/sbin/start-dfs.sh $HADOOP_HOME/sbin/start-yarn.sh echo "********** LEAVING HDFS SAFE MODE...... ***********" $HADOOP_HOME/bin/hadoop dfsadmin -safemode leave echo "********** UPLOADING SPARK JARS TO HDFS..... ***********" hdfs dfs -put $SPARK_HOME/jars /spark ## Add HDFS folders for our users (jovyan, bob, alice)... echo "Setting up HDFS folders for Enterprise Gateway users..." hdfs dfs -mkdir -p /user/{jovyan,bob,alice,root} /tmp/hive hdfs dfs -chown jovyan:jovyan /user/jovyan hdfs dfs -chown bob:bob /user/bob hdfs dfs -chown alice:alice /user/alice hdfs dfs -chmod 0777 /tmp/hive elif [[ "$CMD" == "--yarn" ]]; then echo "YARN_HOST cannot be different from HOSTNAME when using --yarn! YARN_HOST=$YARN_HOST != HOSTNAME=$HOSTNAME" exit 1 fi if [[ "$CMD" == "--yarn" ]]; then echo "YARN application logs can be found at '/usr/hdp/current/hadoop/logs/userlogs'" prev_count=0 while [ 1 ] do # Every minute list any new application directories that have been created since # last time. sleep 60 if ls -ld /usr/hdp/current/hadoop/logs/userlogs/application* > /dev/null 2>&1; then count=`ls -ld /usr/hdp/current/hadoop/logs/userlogs/application*|wc -l` if [[ $count > $prev_count ]]; then new_apps=`expr $count - $prev_count` ls -ldt /usr/hdp/current/hadoop/logs/userlogs/application*|head --lines=$new_apps fi # reset each time in case count < prev_count prev_count=$count fi done elif [[ "$FROM" == "YARN" ]]; then echo "" echo "Note: YARN application logs can be found at '/usr/hdp/current/hadoop/logs/userlogs'" "$*" fi exit 0 ================================================ FILE: etc/docker/demo-base/core-site.xml.template ================================================ fs.defaultFS hdfs://HOSTNAME:9000 ================================================ FILE: etc/docker/demo-base/fix-permissions ================================================ #!/bin/bash # set permissions on a directory # after any installation, if a directory needs to be (human) user-writable, # run this script on it. # It will make everything in the directory owned by the group $NB_GID # and writable by that group. # Deployments that want to set a specific user id can preserve permissions # by adding the `--group-add users` line to `docker run`. # uses find to avoid touching files that already have the right permissions, # which would cause massive image explosion # right permissions are: # group=$NB_GID # AND permissions include group rwX (directory-execute) # AND directories have setuid,setgid bits set set -e for d in "$@"; do find "$d" \ ! \( \ -group $NB_GID \ -a -perm -g+rwX \ \) \ -exec chgrp $NB_GID {} \; \ -exec chmod g+rwX {} \; # setuid,setgid *on directories only* find "$d" \ \( \ -type d \ -a ! -perm -6000 \ \) \ -exec chmod +6000 {} \; done ================================================ FILE: etc/docker/demo-base/hdfs-site.xml ================================================ dfs.replication 1 ================================================ FILE: etc/docker/demo-base/mapred-site.xml ================================================ mapreduce.framework.name yarn ================================================ FILE: etc/docker/demo-base/ssh_config ================================================ Host * UserKnownHostsFile /dev/null StrictHostKeyChecking no LogLevel quiet Port 2122 ================================================ FILE: etc/docker/demo-base/yarn-site.xml.template ================================================ yarn.nodemanager.vmem-check-enabled false yarn.nodemanager.aux-services mapreduce_shuffle Number of seconds after an application finishes before the nodemanager's DeletionService will delete the application's localized file directory and log directory. To diagnose Yarn application problems, set this property's value large enough (for example, to 600 = 10 minutes) to permit examination of these directories. After changing the property's value, you must restart the nodemanager in order for it to have an effect. The roots of Yarn applications' work directories is configurable with the yarn.nodemanager.local-dirs property (see below), and the roots of the Yarn applications' log directories is configurable with the yarn.nodemanager.log-dirs property (see also below). yarn.nodemanager.delete.debug-delay-sec 600 yarn.resourcemanager.scheduler.address HOSTNAME:8030 yarn.resourcemanager.address HOSTNAME:8032 yarn.resourcemanager.webapp.address HOSTNAME:8088 yarn.resourcemanager.resource-tracker.address HOSTNAME:8031 yarn.resourcemanager.admin.address HOSTNAME:8033 yarn.application.classpath /usr/hdp/current/hadoop/etc/hadoop, /usr/hdp/current/hadoop/share/hadoop/common/*, /usr/hdp/current/hadoop/share/hadoop/common/lib/*, /usr/hdp/current/hadoop/share/hadoop/hdfs/*, /usr/hdp/current/hadoop/share/hadoop/hdfs/lib/*, /usr/hdp/current/hadoop/share/hadoop/mapreduce/*, /usr/hdp/current/hadoop/share/hadoop/mapreduce/lib/*, /usr/hdp/current/hadoop/share/hadoop/yarn/*, /usr/hdp/current/hadoop/share/hadoop/yarn/lib/* ================================================ FILE: etc/docker/docker-compose.yml ================================================ version: "3.5" # A docker user network is created and referenced by the service. This network # must also get conveyed to launched kernel containers and that occurs via the env variable: EG_DOCKER_NETWORK # Notes (FIXMEs): # 1. We need to address the need to run as UID 0 (root). This appears to be required inorder to create containers/services from within. # 2. Using endpoint-mode dnsrr (which appears to be required inorder for kernel container to send the connection info response back) # also required mode=host on any published ports. :-( # 3. We only use one replica since session affinity is another point of investigation in Swarm services: enterprise-gateway: image: elyra/enterprise-gateway:dev user: root volumes: - "/var/run/docker.sock:/var/run/docker.sock" # It's often helpful to mount the kernelspec files from the host into the container. # Since this could be a deployed to a swarm cluster, it is recommended in this case that these be mounted on an # NFS volume available to all nodes of the cluster, or a volume plugin is used instead of a bind mount. # - /usr/local/share/jupyter/kernels:/usr/local/share/jupyter/kernels environment: - "EG_DOCKER_NETWORK=${EG_DOCKER_NETWORK:-enterprise-gateway_enterprise-gateway}" - "EG_KERNEL_LAUNCH_TIMEOUT=${EG_KERNEL_LAUNCH_TIMEOUT:-60}" - "EG_KERNEL_INFO_TIMEOUT=${EG_KERNEL_INFO_TIMEOUT:-60}" - "EG_CULL_IDLE_TIMEOUT=${EG_CULL_IDLE_TIMEOUT:-3600}" # Use double-defaulting for B/C. Support for EG_KERNEL_WHITELIST will be removed in a future release - "EG_ALLOWED_KERNELS=${EG_ALLOWED_KERNELS:-${EG_KERNEL_WHITELIST:-'r_docker','python_docker','python_tf_docker','python_tf_gpu_docker','scala_docker'}}" - "EG_MIRROR_WORKING_DIRS=${EG_MIRROR_WORKING_DIRS:-False}" - "EG_RESPONSE_PORT=${EG_RESPONSE_PORT:-8877}" - "KG_PORT=${KG_PORT:-8888}" networks: - "enterprise-gateway" labels: app: "enterprise-gateway" component: "enterprise-gateway" deploy: replicas: 1 endpoint_mode: dnsrr labels: app: "enterprise-gateway" component: "enterprise-gateway" enterprise-gateway-proxy: image: haproxy:alpine ports: - ${KG_PORT:-8888}:8888 - 9088:9088 networks: - "enterprise-gateway" entrypoint: "" command: - /bin/sh - -c - | cat < /usr/local/etc/haproxy/haproxy.cfg global maxconn 4096 daemon log stdout format raw local0 defaults log global option httplog mode http option http-server-close option dontlognull option redispatch option contstats retries 3 backlog 10000 timeout client 25s timeout connect 5s timeout server 25s timeout tunnel 3600s timeout http-keep-alive 1s timeout http-request 15s timeout queue 30s timeout tarpit 60s default-server inter 3s rise 2 fall 3 option forwardfor listen stats bind :9088 mode http stats enable stats refresh 10s stats realm Haproxy\ Statistics stats show-node stats uri / resolvers docker nameserver dns 127.0.0.11:53 hold valid 1s frontend proxy bind 0.0.0.0:8888 maxconn 10000 option forwardfor default_backend enterprise-gateway backend enterprise-gateway dynamic-cookie-key ENTERPRISE_KEY cookie SRVID insert dynamic server-template enterprise-gateway 2 enterprise-gateway:8888 check resolvers docker EOF exec /docker-entrypoint.sh haproxy -f /usr/local/etc/haproxy/haproxy.cfg networks: enterprise-gateway: name: enterprise-gateway driver: overlay ================================================ FILE: etc/docker/enterprise-gateway/Dockerfile ================================================ ARG BASE_CONTAINER=jupyter/minimal-notebook:2023-03-13 FROM $BASE_CONTAINER ARG SPARK_VERSION ENV SPARK_VER=$SPARK_VERSION ENV SPARK_HOME=/opt/spark RUN mamba install --quiet --yes \ cffi \ send2trash \ requests \ future \ pycryptodomex && \ conda clean --all && \ fix-permissions $CONDA_DIR && \ fix-permissions /home/$NB_USER USER root RUN apt update && apt install -yq curl openjdk-8-jdk ENV JAVA_HOME=/usr/lib/jvm/java RUN ln -s $(readlink -f /usr/bin/javac | sed "s:/bin/javac::") ${JAVA_HOME} # Download and install Spark RUN curl -s https://archive.apache.org/dist/spark/spark-${SPARK_VER}/spark-${SPARK_VER}-bin-hadoop2.7.tgz | \ tar -xz -C /opt && \ ln -s ${SPARK_HOME}-${SPARK_VER}-bin-hadoop2.7 $SPARK_HOME && \ mkdir -p /usr/hdp/current && \ ln -s ${SPARK_HOME}-${SPARK_VER}-bin-hadoop2.7 /usr/hdp/current/spark2-client # Install Enterprise Gateway wheel and kernelspecs COPY jupyter_enterprise_gateway*.whl /tmp/ RUN pip install /tmp/jupyter_enterprise_gateway*.whl && \ rm -f /tmp/jupyter_enterprise_gateway*.whl ADD jupyter_enterprise_gateway_kernelspecs*.tar.gz /usr/local/share/jupyter/kernels/ ADD jupyter_enterprise_gateway_kernel_image_files*.tar.gz /usr/local/bin/ COPY start-enterprise-gateway.sh /usr/local/bin/ RUN chown jovyan:users /usr/local/bin/start-enterprise-gateway.sh && \ chmod 0755 /usr/local/bin/start-enterprise-gateway.sh && \ touch /usr/local/share/jupyter/enterprise-gateway.log && \ chown -R jovyan:users /usr/local/share/jupyter /usr/local/bin/kernel-launchers && \ chmod 0666 /usr/local/share/jupyter/enterprise-gateway.log && \ rm -f /usr/local/bin/bootstrap-kernel.sh USER jovyan CMD ["/usr/local/bin/start-enterprise-gateway.sh"] EXPOSE 8888 WORKDIR /usr/local/bin ================================================ FILE: etc/docker/enterprise-gateway/README.md ================================================ This image adds support for [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster. It is currently built on jupyter/minimal-notebook as a base with Apache Spark 2.4.6 installed on top. **Note: If you're looking for the YARN-based image of this name, it has been moved to [elyra/enterprise-gateway-demo](https://hub.docker.com/r/elyra/enterprise-gateway-demo/).** # What it Gives You - [Jupyter Enterprise Gateway](https://github.com/jupyter-server/enterprise_gateway) - Python/R/Toree kernels that can be launched and distributed across a managed cluster. # Basic Use Pull this image, along with all of the elyra/kernel-\* images to each of your managed nodes. Although manual seeding of images across the cluster is not required, it is highly recommended since kernel startup times can timeout and image downloads can seriously undermine that window. ## Kubernetes Enterprise Gateway is deployed into Kubernetes using [Helm](https://helm.sh/). See the [Kubernetes section of our Operator's Guide](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/operators/deploy-kubernetes.html) for further details. ## Docker Swarm Download the [`docker-compose.yml`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/docker/docker-compose.yml) file and make any necessary changes for your configuration. The compose file consists of three pieces, the Enterprise Gateway container itself, a proxy layer container, and a Docker network. We recommend that a volume be used so that the kernelspec files can be accessed outside of the container since we've found those to require post-deployment modifications from time to time. ## Docker (Traditional) Same instructions as for Docker Swarm using [`docker-compose.yml`](https://github.com/jupyter-server/enterprise_gateway/blob/main/etc/docker/docker-compose.yml). Please note that you can still run Enterprise Gateway as a traditional docker container within a Docker Swarm cluster, yet have the kernel containers launched as Docker Swarm services since how the kernels are launched is a function of their configured process proxy class. For more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/). ================================================ FILE: etc/docker/enterprise-gateway/start-enterprise-gateway.sh ================================================ #!/bin/bash #export ANACONDA_HOME=/opt/conda #export JAVA_HOME=/usr/java/default #export PYSPARK_PYTHON=${ANACONDA_HOME}/bin/python #export PATH=${ANACONDA_HOME}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:${JAVA_HOME}/bin # Enterprise Gateway variables export EG_SSH_PORT=${EG_SSH_PORT:-2122} # Kernel Gateway looks for KG_ for the following. For the sake of consistency # we want to use EG_. The following produces the default value in EG_ (unless # set in the env), with the ultimate override of KG_ from the env. export EG_IP=${EG_IP:-0.0.0.0} export KG_IP=${KG_IP:-${EG_IP}} export EG_PORT=${EG_PORT:-8888} export KG_PORT=${KG_PORT:-${EG_PORT}} export EG_PORT_RETRIES=${EG_PORT_RETRIES:-0} export KG_PORT_RETRIES=${KG_PORT_RETRIES:-${EG_PORT_RETRIES}} # To use tunneling set this variable to 'True' (may need to run as root). export EG_ENABLE_TUNNELING=${EG_ENABLE_TUNNELING:-False} export EG_LIST_KERNELS=${EG_LIST_KERNELS:-True} export EG_LOG_LEVEL=${EG_LOG_LEVEL:-DEBUG} export EG_CULL_IDLE_TIMEOUT=${EG_CULL_IDLE_TIMEOUT:-43200} # default to 12 hours export EG_CULL_INTERVAL=${EG_CULL_INTERVAL:-60} export EG_CULL_CONNECTED=${EG_CULL_CONNECTED:-False} EG_ALLOWED_KERNELS=${EG_ALLOWED_KERNELS:-${EG_KERNEL_WHITELIST:-"null"}} export EG_ALLOWED_KERNELS=`echo ${EG_ALLOWED_KERNELS} | sed 's/[][]//g'` # sed is used to strip off surrounding brackets as they should no longer be included. export EG_DEFAULT_KERNEL_NAME=${EG_DEFAULT_KERNEL_NAME:-python_docker} export EG_KERNEL_INFO_TIMEOUT=${EG_KERNEL_INFO_TIMEOUT:-60} # Determine whether the kernels-allowed list should be added to the start command. # This is conveyed via a 'null' value for the env - which indicates no kernel names # were used in the helm chart or docker-compose yaml. allowed_kernels_option="" if [ "${EG_ALLOWED_KERNELS}" != "null" ]; then # Update to --KernelSpecManager.allowed_kernelspecs once jupyter_client >= 7 can be supported allowed_kernels_option="--KernelSpecManager.whitelist=[${EG_ALLOWED_KERNELS}]" fi echo "Starting Jupyter Enterprise Gateway..." exec jupyter enterprisegateway \ --log-level=${EG_LOG_LEVEL} ${allowed_kernels_option} \ --RemoteMappingKernelManager.cull_idle_timeout=${EG_CULL_IDLE_TIMEOUT} \ --RemoteMappingKernelManager.cull_interval=${EG_CULL_INTERVAL} \ --RemoteMappingKernelManager.cull_connected=${EG_CULL_CONNECTED} \ --RemoteMappingKernelManager.default_kernel_name=${EG_DEFAULT_KERNEL_NAME} \ --RemoteMappingKernelManager.kernel_info_timeout=${EG_KERNEL_INFO_TIMEOUT} ================================================ FILE: etc/docker/enterprise-gateway-demo/Dockerfile ================================================ ARG HUB_ORG ARG SPARK_VERSION ARG BASE_CONTAINER=${HUB_ORG}/demo-base:${SPARK_VERSION} FROM $BASE_CONTAINER # An ARG declared before a FROM is outside of a build stage, # so it can’t be used in any instruction after a FROM. # To use the default value of an ARG declared before the first FROM # use an ARG instruction without a value inside of a build stage: ARG SPARK_VERSION ENV NB_USER="jovyan" ENV SPARK_VER=${SPARK_VERSION} USER $NB_USER # Install Enterprise Gateway wheel and kernelspecs COPY jupyter_enterprise_gateway*.whl /tmp/ RUN pip install /tmp/jupyter_enterprise_gateway*.whl ADD jupyter_enterprise_gateway_kernelspecs*.tar.gz /usr/local/share/jupyter/kernels/ USER root RUN fix-permissions /usr/local/share/jupyter/kernels/ COPY start-enterprise-gateway.sh.template /usr/local/bin/start-enterprise-gateway.sh RUN chown $NB_USER: /usr/local/bin/start-enterprise-gateway.sh && \ chmod +x /usr/local/bin/start-enterprise-gateway.sh USER $NB_USER # Massage kernelspecs to docker image env... # Create symbolic link to preserve hdp-related directories # Copy toree jar from install to scala kernelspec lib directory # Add YARN_CONF_DIR to each env stanza, Add alternate-sigint to vanilla toree RUN mkdir -p /tmp/byok/kernels && \ cp /usr/local/share/jupyter/kernels/spark_${SPARK_VER}_scala/lib/*.jar /usr/local/share/jupyter/kernels/spark_scala_yarn_cluster/lib && \ cp /usr/local/share/jupyter/kernels/spark_${SPARK_VER}_scala/lib/*.jar /usr/local/share/jupyter/kernels/spark_scala_yarn_client/lib && \ cd /usr/local/share/jupyter/kernels && \ for dir in spark_*; do cat $dir/kernel.json | sed s/'"env": {'/'"env": {| "YARN_CONF_DIR": "\/usr\/hdp\/current\/hadoop\/etc\/hadoop",'/ | tr '|' '\n' > xkernel.json; mv xkernel.json $dir/kernel.json; done && \ cat spark_${SPARK_VER}_scala/kernel.json | sed s/'"__TOREE_OPTS__": "",'/'"__TOREE_OPTS__": "--alternate-sigint USR2",'/ | tr '|' '\n' > xkernel.json; mv xkernel.json spark_${SPARK_VER}_scala/kernel.json && \ touch /usr/local/share/jupyter/enterprise-gateway.log && \ chmod 0666 /usr/local/share/jupyter/enterprise-gateway.log USER root # install boot script COPY bootstrap-enterprise-gateway.sh /usr/local/bin/bootstrap-enterprise-gateway.sh RUN chown $NB_USER: /usr/local/bin/bootstrap-enterprise-gateway.sh && \ chmod 0700 /usr/local/bin/bootstrap-enterprise-gateway.sh ENTRYPOINT ["/usr/local/bin/bootstrap-enterprise-gateway.sh"] CMD ["--help"] EXPOSE 8888 USER $NB_USER ================================================ FILE: etc/docker/enterprise-gateway-demo/README.md ================================================ Built on [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/), this image adds support for [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) to better demonstrate running Python, R and Scala kernels in YARN-cluster mode. # What it Gives You - [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/) base functionality - [Jupyter Enterprise Gateway](https://github.com/jupyter-incubator/enterprise_gateway) - Python/R/Toree kernels that target YARN-cluster mode # Basic Use **elyra/enterprise-gateway-demo** can be used as a combined YARN cluster in which the kernels run locally in YARN-cluster mode, or combined with a different instance of itself or an [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/) instance to more easily view that kernels are running remotely. Prior to using either mode, we recommend you create a local docker network. This better isolates the container(s) and avoids port collisions that might come into play if you're using a gateway-enabled Notebook image on the same host. Here's a simple way to create a docker network... `docker network create -d bridge jeg` Once created, you just add `--net jeg` to the enterprise gateway run commands. Using `--net jeg` when creating instances of the gateway-enabled Notebook image are not necessary. ### Combined Mode To run the image as a combined YARN/Enterprise Gateway instance, use the following command: `docker run -itd --rm -p 8888:8888 -p 8088:8088 -p 8042:8042 --net=jeg elyra/enterprise-gateway-demo --elyra` To produce a general usage statement, the following can used... `docker run --rm elyra/enterprise-gateway-demo --help` To run the enterprise-gateway-demo container in an interactive mode, where enterprise gateway is manually started within the container, use the following... `docker run -it --rm -p 8888:8888 -p 8088:8088 -p 8042:8042 --net=jeg elyra/enterprise-gateway-demo /bin/bash` Once in the container, enterprise-gateway-demo can be started using `sudo -u jovyan /usr/local/bin/start-enterprise-gateway.sh` ### Dual Mode To get a better idea that kernels are running remote, you can invoke elyra/enterprise-gateway-demo to be the YARN master or use [elyra/demo-base](https://hub.docker.com/r/elyra/demo-base/). To invoke the YARN master using elyra/demo-base... `docker run -d --rm -h yarnmaster --name yarnmaster -p 8088:8088 -p 8042:8042 --net jeg elyra/demo-base --yarn` or using elyra/enterprise-gateway-demo... `docker run -d --rm -h yarnmaster --name yarnmaster -p 8088:8088 -p 8042:8042 --net jeg elyra/enterprise-gateway-demo --yarn` Then, invoke elyra/enterprise-gateway-demo as purely an Enterprise Gateway host that indicates the name of its YARN master... `docker run -it --rm -h elyra --name elyra -p 8888:8888 --net jeg -e YARN_HOST=yarnmaster elyra/enterprise-gateway-demo --elyra` **Tip:** YARN logs can be accessed via host system's public IP on port `8042` rather than using container's `hostname:8042`, while YARN Resource manager can be accessed via container's `hostname:8088` port. #### Bring Your Own Kernels elyra/enterprise-gateway-demo sets up `JUPYTER_PATH` to point to `/tmp/byok`. This enables the ability to use docker volumes to mount your own set of kernelspec files. The kernelspecs must reside in a `kernels` directory. You can mount to the appropriate point in one of two ways via the docker `-v` option: `-v :/tmp/byok` or `-v :/tmp/byok/kernels` To confirm Enterprise Gateway is detecting the new kernelspecs, monitor the log (`docker logs -f `) and issue a refresh from the gateway-enabled Notebook instance. Each refresh of the notebook's tree view triggers a refresh of the set of kernelspecs in Enterprise Gateway. # Connecting a client notebook You can use any gateway-enabled notebook server to hit the running docker container. Note: Given the size of the enterprise-gateway-demo when combined with a YARN/Spark installation, it is recommended that you have at least 4GB of memory allocated for your docker image in order to run kernels (particularly the Toree/Scala kernel). # Recognized Environment Variables The following environment variables are recognized during startup of the container and can be specified via docker's `-e` option. These will rarely need to be modified. `KG_IP`: specifies the IP address of enterprise gateway. This should be a public IP. Default = 0.0.0.0 `KG_PORT`: specifies the port that enterprise gateway is listening on. This port should be mapped to a host port via `-p`. Default = 8888 `KG_PORT_RETRIES`: specifies the number of retries due to port conflicts that will be attempted. Default = 0 `EG_REMOTE_HOSTS`: specifies a comma-separated lists of hostnames which can be used to run YARN-client kernels. Default = `EG_YARN_ENDPOINT`: specifies the HTTP endpoint of the YARN Resource Manager. Default = http://:8088/ws/v1/cluster} `EG_SSH_PORT=`: specifies the port of the SSH server. This container is setup to use port `2122`. This value should not be changed. Default = 2122 `EG_ENABLE_TUNNELING`: specifies whether port tunneling will be used. This value is currently `False` because ssh tunneling is not working unless Enterprise Gateway is run as the root user. This can be accomplished by starting the container with `bash` as the command and running `start-enterprise-gateway.sh` directly (sans `sudo`). NOTE: Dual Mode functionality is only available in tags 0.9.0+ ================================================ FILE: etc/docker/enterprise-gateway-demo/bootstrap-enterprise-gateway.sh ================================================ #!/bin/bash # This file is a copy of /etc/bootstrap.sh but invokes Jupyter Enterprise Gateway in its "deamon" case. # It also checks for --help or no options before starting anything... CMD=${1:-"--help"} if [[ "$CMD" == "--help" ]]; then echo "" echo "usage: docker run -it[d] --rm -h -p 8888:8888 [-p 8088:8088 -p 8042:8042] " echo "" echo "where is:" echo " --gateway ... Invokes Enterprise Gateway as user 'jovyan' directly. Useful for daemon behavior." echo " --yarn ... Runs container as standalone YARN master - no Enterprise Gateway is started." echo " --help ... Produces this message." echo " ... Invokes ''. Use ='/bin/bash' to explore within the container." echo "" echo "Tips:" echo "1) You can target a different YARN cluster by using '-e YARN_HOST='" echo "2) You can \"bring your own kernels\" by mounting to /tmp/byok/kernels (e.g., -v my-kernels-dir:/tmp/byok/kernels)" echo "3) It is advised that port '8888' be mapped to a host port, although the host port number is not" echo " required to be '8888'. Mapping of ports '8088' and '8042' is also strongly recommended" echo " for YARN application monitoring if running standalone." exit 0 elif [[ "$CMD" != "--gateway" && "$CMD" != "--yarn" ]]; then # invoke w/o starting YARN "$*" exit 0 fi : ${YARN_HOST:=$HOSTNAME} export FROM="EG" /usr/local/bin/bootstrap-yarn-spark.sh $* # Note that '--yarn' functionality is a subset of '--gateway' functionality if [[ "$CMD" == "--gateway" ]]; then sudo sed -i "s/HOSTNAME/$YARN_HOST/" /usr/local/bin/start-enterprise-gateway.sh /usr/local/bin/start-enterprise-gateway.sh fi exit 0 ================================================ FILE: etc/docker/enterprise-gateway-demo/start-enterprise-gateway.sh.template ================================================ #!/bin/bash # Allow for mounts of kernelspecs to /tmp/byok/kernels export JUPYTER_PATH=${JUPYTER_PATH:-/tmp/byok} # Enterprise Gateway variables export EG_REMOTE_HOSTS=${EG_REMOTE_HOSTS:-HOSTNAME} export EG_SSH_PORT=${EG_SSH_PORT:-2122} export KG_IP=${KG_IP:-0.0.0.0} export KG_PORT=${KG_PORT:-8888} export KG_PORT_RETRIES=${KG_PORT_RETRIES:-0} # To use tunneling set this variable to 'True' and run as root. export EG_ENABLE_TUNNELING=${EG_ENABLE_TUNNELING:-False} export EG_LOG_LEVEL=${EG_LOG_LEVEL:-DEBUG} export EG_CULL_IDLE_TIMEOUT=${EG_CULL_IDLE_TIMEOUT:-43200} # default to 12 hours export EG_CULL_CONNECTED=${EG_CULL_CONNECTED:-True} echo "Starting Jupyter Enterprise Gateway..." jupyter enterprisegateway \ --log-level=${EG_LOG_LEVEL} \ --EnterpriseGatewayApp.inherited_envs=PYSPARK_PYTHON \ --MappingKernelManager.cull_idle_timeout=${EG_CULL_IDLE_TIMEOUT} \ --MappingKernelManager.cull_interval=30 \ --MappingKernelManager.cull_connected=${EG_CULL_CONNECTED} 2>&1 | tee /usr/local/share/jupyter/enterprise-gateway.log ================================================ FILE: etc/docker/kernel-image-puller/Dockerfile ================================================ ARG BASE_CONTAINER=python:3.10-bookworm FROM $BASE_CONTAINER WORKDIR /usr/src/app COPY requirements.txt ./ RUN pip install --no-cache-dir -r requirements.txt COPY kernel_image_puller.py ./ COPY image_fetcher.py ./ ARG OS=Debian_12 # Install crictl for use by KIP when non-docker installations are encountered. RUN mkdir -p /etc/apt/keyrings && \ curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubic.gpg && \ echo "deb [signed-by=/etc/apt/keyrings/kubic.gpg] https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/${OS}/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list && \ apt-get update && apt-get install -y cri-tools RUN echo $PATH # The following environment variables are supported - defaults provided. Override as needed. ENV KIP_GATEWAY_HOST=http://localhost:8888 ENV KIP_INTERVAL=300 ENV KIP_LOG_LEVEL=INFO ENV KIP_NUM_PULLERS=2 ENV KIP_NUM_RETRIES=3 ENV KIP_PULL_POLICY='IfNotPresent' CMD [ "python", "./kernel_image_puller.py" ] ================================================ FILE: etc/docker/kernel-image-puller/README.md ================================================ This image is responsible for contacting the configured [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) instance within a Kubernetes or Docker Swarm cluster and pulling the set of kernel-based images to the node on which it is running. # What it Gives You - The ability to add new nodes and have kernel images on those nodes automatically populated. - The ability to configure new kernelspecs that use different images and have those images pulled to all cluster nodes. # Basic Use Deploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment. As part of that deployment, Kernel Image Puller (KIP) will be launched on each node. On Kubernetes, this will be accomplished via a DaemonSet. On Docker Swarm, it will be via a global service. KIP will then contact the configured Enterprise Gateway instance, fetch the set of in-use kernelspecs, parse out the image names and pull those images. There are a few points of configuration listed below - all of which are environment variables (defaults in parenthesis). - `KIP_GATEWAY_HOST` (`http://localhost:8888`) - `KIP_INTERVAL` (`300`) - `KIP_LOG_LEVEL` (`INFO`) - `KIP_NUM_PULLERS` (`2`) - `KIP_NUM_RETRIES` (`3`) - `KIP_PULL_POLICY` (`IfNotPresent`) - `KIP_IMAGE_FETCHER` (`KernelSpecsFetcher`) For more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/). ================================================ FILE: etc/docker/kernel-image-puller/image_fetcher.py ================================================ """image name fetcher abstract class and concrete implementation""" import abc import importlib import os import requests import yaml from kubernetes import client, config from kubernetes.client import ApiException class ImageNameFetcher(metaclass=abc.ABCMeta): """ abstract class to extend for fetch image names """ @abc.abstractmethod def fetch_image_names(self) -> set[str]: """ Abstract method to fetch image names. :return: A set of image names. """ pass class KernelSpecsFetcher(ImageNameFetcher): """Fetches the image names by hitting the /api/kernelspecs endpoint of the Gateway. For process-proxy kernelspecs, the image names are contained in the config stanza - which resides in the process-proxy stanza located in the metadata. """ def __init__(self, logger): """ KIP_AUTH_TOKEN: enterprise-gateway auth token KIP_GATEWAY_HOST: enterprise-gateway host KIP_VALIDATE_CERT: validate cert or not """ self.logger = logger self.auth_token = os.getenv("KIP_AUTH_TOKEN", None) self.gateway_host = os.getenv("KIP_GATEWAY_HOST", "http://localhost:18888") self.validate_cert = os.getenv("KIP_VALIDATE_CERT", "False").lower() == "true" def get_kernel_specs(self): """Fetches the set of kernelspecs from the gateway, returning a dict of configured kernel specs""" end_point = f"{self.gateway_host}/api/kernelspecs" self.logger.info(f"Fetching kernelspecs from '{end_point}' ...") headers = {"Content-Type": "application/json"} if self.auth_token: end_point += f"?token={self.auth_token}" headers.update({"Authorization": f"token {self.auth_token}"}) resp = requests.get(end_point, headers=headers, verify=self.validate_cert, timeout=60) if not resp.ok: msg = f"Gateway server response: {resp.status_code}" raise requests.exceptions.HTTPError(msg) return resp.json() def fetch_image_names(self) -> set[str]: """ fetch image names from enterprise gateway kernelspecs """ k_specs = None try: k_specs_response = self.get_kernel_specs() k_specs = k_specs_response.get("kernelspecs") except Exception as ex: self.logger.error( f"Got exception attempting to retrieve kernelspecs - retrying. Exception was: {ex}" ) if k_specs is None: return None # Locate the configured images within the kernel_specs and add to set for duplicate management images = set() for key in k_specs: metadata = k_specs.get(key).get("spec").get("metadata") if metadata is not None: config_parent = metadata.get("process_proxy") if config_parent is None: # See if this is a provisioner config_parent = metadata.get("kernel_provisioner") if config_parent is not None: config = config_parent.get("config") if config is not None: image_name = config.get("image_name") if image_name is not None: images.add(image_name) executor_image_name = config.get("executor_image_name") if executor_image_name is not None: images.add(executor_image_name) return images class StaticListFetcher(ImageNameFetcher): """ A class for fetching image names from a static list of images provided by an environment variable. Inherits from `ImageNameFetcher`, which defines a `fetch_images()` method that must be implemented. This class reads the `KIP_IMAGES` environment variable, which should be a comma-separated list of image names. It then splits the list into individual image names and returns them as a set. Attributes: logger (logging.Logger): The logger to use for logging messages. Methods: fetch_images(): Fetches image names from the `KIP_IMAGES` environment variable and returns them as a set. """ def __init__(self, logger) -> None: """ init method """ self.logger = logger def fetch_image_names(self) -> set[str]: """ KIP_IMAGES: comma seperated list of image names """ images = os.getenv("KIP_IMAGES", "").split(",") return set(images) class ConfigMapImagesFetcher(ImageNameFetcher): """ A class for fetching image names from a Kubernetes ConfigMap. Inherits from `ImageNameFetcher`, which defines a `fetch_images()` method that must be implemented. This class reads the `KIP_CM_NAMESPACE`, `KIP_CM_NAME`, and `KIP_CM_KEY_NAME` environment variables to determine the namespace, name, and key name of the ConfigMap containing the image names. It then reads the specified ConfigMap and extracts the image names from the specified key, which should be a YAML list of image names. Attributes: logger (logging.Logger): The logger to use for logging messages. namespace (str): The namespace containing the ConfigMap. name (str): The name of the ConfigMap. key_name (str): The name of the key containing the YAML list of image names. Methods: fetch_images(): Fetches image names from the specified ConfigMap and key and returns them as a set. """ def __init__(self, logger) -> None: """ Initializes a new instance of the class with the specified logger and environment variables. KIP_CM_NAMESPACE: namespace the configmap is in KIP_CM_NAME: the name of the config map KIP_CM_KEY_NAME: the key name """ self.logger = logger self.namespace = os.getenv("KIP_CM_NAMESPACE", "enterprise-gateway") self.name = os.getenv("KIP_CM_NAME", "kernel-images") self.key_name = os.getenv("KIP_CM_KEY_NAME", "image-names") def fetch_image_names(self) -> set[str]: """ fetch image names by parsing the configmap this will load the in-cluster context, your service account of the pod should have access to get configmap """ config.load_incluster_config() v1 = client.CoreV1Api() config_map = None try: config_map = v1.read_namespaced_config_map(name=self.name, namespace=self.namespace) except ApiException as e: if e.status == 404: self.logger.error(f"ConfigMap {self.name} not found in namespace {self.namespace}") else: # Handle other ApiException errors self.logger.error(f"Error retrieving ConfigMap: {e}") if config_map and self.key_name in config_map.data: images = config_map.data[self.key_name] image_list = [] try: image_list = yaml.safe_load(images) except yaml.YAMLError as e: self.logger("Error parsing YAML:", e) return image_list return [] class CombinedImagesFetcher(ImageNameFetcher): """ A class for fetching image names from multiple fetchers. Inherits from `ImageNameFetcher`, which defines a `fetch_images()` method that must be implemented. This class initializes a list of fetchers based on the `KIP_INTERNAL_FETCHERS` environment variable, which should be a comma-separated list of fetcher class names. It then calls the `fetch_images()` method on each fetcher and combines the results into a set of unique image names. Attributes: logger (logging.Logger): The logger to use for logging messages. fetchers (list): A list of fetchers to use for fetching image names. Methods: fetch_images(): Fetches image names from all fetchers and returns them as a set. """ def __init__(self, logger): """ KIP_INTERNAL_FETCHERS: fetchers used internally to get image names """ self.logger = logger fetcher_names = os.getenv("KIP_INTERNAL_FETCHERS", "KernelSpecsFetcher").split(',') self.fetchers = [] module = importlib.import_module("image_fetcher") args = (logger,) for f in fetcher_names: fetcher = getattr(module, f)(*args) self.fetchers.append(fetcher) def fetch_image_names(self) -> set[str]: """ fetch image names from internal fetchers """ images = set() for f in self.fetchers: images.update(f.fetch_image_names()) return images ================================================ FILE: etc/docker/kernel-image-puller/kernel_image_puller.py ================================================ """A kernel image puller.""" import importlib import logging import os import queue import time from subprocess import CalledProcessError, run from threading import Thread from typing import List, Optional from docker.client import DockerClient from docker.errors import NotFound # initialize root logger logging.basicConfig(format="[%(levelname)1.1s %(asctime)s %(name)s.%(threadName)s] %(message)s") log_level = os.getenv("KIP_LOG_LEVEL", "INFO") class KernelImagePuller: """A kernel image puller.""" POLICY_IF_NOT_PRESENT = "IfNotPresent" POLICY_ALWAYS = "Always" policies = (POLICY_IF_NOT_PRESENT, POLICY_ALWAYS) DOCKER_CLIENT = "docker" CONTAINERD_CLIENT = "containerd" supported_container_runtimes = (DOCKER_CLIENT, CONTAINERD_CLIENT) def __init__(self, kip_logger, image_fetcher): """Initialize the puller.""" self.interval = None self.container_runtime = None self.runtime_endpoint = None self.default_container_registry = None self.log = kip_logger self.worker_queue = None self.threads = [] self.pulled_images = set() self.num_pullers = None self.num_retries = None self.policy = None self.image_fetcher = image_fetcher self.load_static_env_values() def load_static_env_values(self): """Load the static environment values.""" self.num_pullers = int(os.getenv("KIP_NUM_PULLERS", "2")) self.num_retries = int(os.getenv("KIP_NUM_RETRIES", "3")) self.policy = os.getenv("KIP_PULL_POLICY", KernelImagePuller.POLICY_IF_NOT_PRESENT) self.default_container_registry = os.getenv("KIP_DEFAULT_CONTAINER_REGISTRY", "") self.runtime_endpoint = os.getenv( "KIP_CRI_ENDPOINT", "unix:///run/containerd/containerd.sock" ) self.container_runtime = self.get_container_runtime() # Add authentication token support to KIP self.interval = int(os.getenv("KIP_INTERVAL", "300")) if self.policy not in KernelImagePuller.policies: logger.warning( f"Invalid pull policy detected in KIP_PULL_POLICY: '{self.policy}'. " f"Using policy '{KernelImagePuller.POLICY_IF_NOT_PRESENT}'." ) self.policy = KernelImagePuller.POLICY_IF_NOT_PRESENT logger.info("Starting Kernel Image Puller with the following parameters:") logger.info(f"KIP_INTERVAL: {self.interval} secs") logger.info(f"KIP_NUM_PULLERS: {self.num_pullers}") logger.info(f"KIP_NUM_RETRIES: {self.num_retries}") logger.info(f"KIP_PULL_POLICY: {self.policy}") logger.info(f"KIP_LOG_LEVEL: {log_level}") # logger.info(f"KIP_AUTH_TOKEN: {self.auth_token}") # Do not print logger.info(f"KIP_DEFAULT_CONTAINER_REGISTRY: '{self.default_container_registry}'") logger.info(f"KIP_CRI_ENDPOINT: {self.runtime_endpoint}") if self.is_runtime_endpoint_recognized(): logger.info(f"Detected container runtime: {self.container_runtime}") else: logger.warning( f"This node's container runtime interface could not be detected from " f"endpoint: {self.runtime_endpoint}, proceeding with {self.container_runtime} client..." ) def start(self): """Start the puller.""" self.log.info("Starting Kernel Image Puller process.") self.initialize_workers() wait_interval = 5 # Start with 5 seconds to ensure EG service gets started... time.sleep(wait_interval) # Fetch the image names, then wait for name queue to drain. Once drained, or if there were issues # fetching the image names, wait the interval number of seconds and perform the operation again. while True: fetched = self.fetch_image_names() if fetched: # Once we have fetched kernelspecs, update wait_interval wait_interval = self.interval self.worker_queue.join() elif not self.is_runtime_endpoint_recognized(): # Increase the interval since we shouldn't pound the service for kernelspecs wait_interval = self.interval logger.info(f"Sleeping {wait_interval} seconds to fetch image names...\n") time.sleep(wait_interval) def initialize_workers(self): """Initialize the workers.""" self.worker_queue = queue.Queue() for i in range(self.num_pullers): t = Thread(target=self.image_puller, name=f"t{(i + 1)}") t.start() self.threads.append(t) def get_container_runtime(self) -> Optional[str]: """Determine the container runtime from the KIP_CRI_ENDPOINT env.""" if KernelImagePuller.DOCKER_CLIENT in self.runtime_endpoint: return KernelImagePuller.DOCKER_CLIENT # This will essentially be the default to use in case we don't recognized the endpoint. return KernelImagePuller.CONTAINERD_CLIENT def is_runtime_endpoint_recognized(self) -> bool: """Check if the runtime endpoint is recognized.""" return ( KernelImagePuller.DOCKER_CLIENT in self.runtime_endpoint or KernelImagePuller.CONTAINERD_CLIENT in self.runtime_endpoint ) def fetch_image_names(self): """ Fetches image names and adds them to a worker queue for processing. Returns: bool: True if at least one image name was found and added to the worker queue, False otherwise. """ # Locate the configured image_names within the kernel_specs and add to set for duplicate management image_names = self.image_fetcher.fetch_image_names() if not image_names: return False # Add the image names to the name queue to be pulled for image_name in image_names: self.worker_queue.put_nowait(image_name) return True def image_puller(self): """Thread-based puller. Gets image name from the queue and attempts to pull the image. Any issues, except for NotFound, are retried up to num_retries times. Once the image has been pulled, it's not found or the retries have been exceeded, the queue task is marked as done. """ while True: logger.debug("Waiting for new image to pull") image_name = self.worker_queue.get() self.log.info(f"Task received to pull image: {image_name}") if image_name is None: break i = 0 while i < self.num_retries: try: self.pull_image(image_name) break except Exception as ex: i += 1 if i < self.num_retries: logger.warning( f"Attempt {i} to pull image '{image_name}' encountered exception - retrying. " f"Exception was: {ex}." ) else: logger.error( f"Attempt {i} to pull image '{image_name}' failed with exception: {ex}" ) self.worker_queue.task_done() def pull_image(self, image_name): """Pulls the image. If the policy is `IfNotPresent` the set of pulled image names is checked and, if present, the method returns. Otherwise, the pull attempt is made and the set of pulled images is updated, when successful. """ if self.policy == KernelImagePuller.POLICY_IF_NOT_PRESENT: if image_name in self.pulled_images: # Image has been pulled, but make sure it still exists. If it doesn't exist # let this drop through to actual pull logger.info( f"Image '{image_name}' already pulled and policy is '{self.policy}'. Checking existence." ) if self.image_exists(image_name): return self.pulled_images.remove(image_name) logger.warning( f"Previously pulled image '{image_name}' was not found - attempting pull..." ) elif self.image_exists(image_name): # Yet to be pulled, consider pulled if exists policy = self.policy logger.info( f"Image '{image_name}' has not been pulled but exists, and policy is '{policy}'. Skipping pull." ) self.pulled_images.add(image_name) return logger.info(f"Pulling image '{image_name}'...") if self.download_image(image_name): self.pulled_images.add(image_name) else: logger.warning(f"Image '{image_name}' was not downloaded!") def get_absolute_image_name(self, image_name: str) -> str: """Ensures the image name is prefixed with a "registry".""" # We will check for the form 'registry/repo/image:tag' if the 'registry/' prefix # is missing (based on the absence of two slashes), then we'll prefix the image # name with the KIP_DEFAULT_CONTAINER_REGISTRY env value. image_pieces = image_name.split("/") # we're missing a registry specifier, use default if present if len(image_pieces) < 3 and self.default_container_registry: return f"{self.default_container_registry}/{image_name}" return image_name # take our chances def image_exists(self, image_name: str) -> bool: """Checks for the existence of the named image using the configured container runtime.""" result = True absolute_image_name = self.get_absolute_image_name(image_name) t0 = time.time() if self.container_runtime == KernelImagePuller.DOCKER_CLIENT: try: DockerClient.from_env().images.get(absolute_image_name) except NotFound: result = False elif self.container_runtime == KernelImagePuller.CONTAINERD_CLIENT: argv = ["crictl", "-r", self.runtime_endpoint, "inspecti", "-q", absolute_image_name] result = self.execute_cmd(argv) else: # invalid container runtime logger.error(f"Invalid container runtime detected: '{self.container_runtime}'!") result = False t1 = time.time() logger.debug( f"Checked existence of image '{image_name}' in {(t1 - t0):.3f} secs. exists = {result}" ) return result def download_image(self, image_name: str) -> bool: """Downloads (pulls) the named image using the configured container runtime.""" result = True absolute_image_name = self.get_absolute_image_name(image_name) t0 = time.time() if self.container_runtime == KernelImagePuller.DOCKER_CLIENT: try: DockerClient.from_env().images.pull(absolute_image_name) except NotFound: result = False elif self.container_runtime == KernelImagePuller.CONTAINERD_CLIENT: argv = ["crictl", "-r", self.runtime_endpoint, "pull", absolute_image_name] result = self.execute_cmd(argv) else: # invalid container runtime logger.error(f"Invalid container runtime detected: '{self.container_runtime}'!") result = False t1 = time.time() if result is True: logger.info(f"Pulled image '{image_name}' in {(t1 - t0):.3f} secs.") return result def execute_cmd(self, argv: List[str]) -> bool: """Execute the given command expressed in 'argv'. If expected_output is provided it will be checked against the command's stdout after stripping off the '\n' character. """ result = True try: run(argv, capture_output=True, text=True, check=True) except CalledProcessError as cpe: error_msg = cpe.stderr[:-1] # strip off trailing newline logger.error(f"Error executing {' '.join(argv)}: {error_msg}") result = False except Exception as ex: logger.error(f"Error executing {' '.join(argv)}: {ex}") result = False return result if __name__ == "__main__": logger = logging.getLogger("kernel_image_puller") logger.setLevel(log_level) logger.info("Loading KernelImagePuller...") fetcher_class_name = os.getenv('KIP_IMAGE_FETCHER', 'KernelSpecsFetcher') args = (logger,) module = importlib.import_module("image_fetcher") fetcher = getattr(module, fetcher_class_name)(*args) kip = KernelImagePuller(logger, fetcher) kip.start() ================================================ FILE: etc/docker/kernel-image-puller/requirements.txt ================================================ docker>=3.7.2 kubernetes>=17.17.0 requests>=2.7,<3.0 ================================================ FILE: etc/docker/kernel-py/Dockerfile ================================================ # Ubuntu 18.04.1 LTS Bionic ARG BASE_CONTAINER=jupyter/scipy-notebook:2023-03-13 FROM $BASE_CONTAINER ENV PATH=$PATH:$CONDA_DIR/bin # Add debugger support RUN pip install --upgrade ipykernel RUN conda install --quiet --yes \ cffi \ future \ pycryptodomex && \ conda clean --all && \ fix-permissions $CONDA_DIR && \ fix-permissions /home/$NB_USER ADD jupyter_enterprise_gateway_kernel_image_files*.tar.gz /usr/local/bin/ USER root RUN apt-get update && apt-get install -yq --no-install-recommends \ libkrb5-dev \ && rm -rf /var/lib/apt/lists/* RUN chown jovyan:users /usr/local/bin/bootstrap-kernel.sh && \ chmod 0755 /usr/local/bin/bootstrap-kernel.sh && \ chown -R jovyan:users /usr/local/bin/kernel-launchers USER jovyan ENV KERNEL_LANGUAGE=python # Disble healthcheck inherited from notebook image HEALTHCHECK NONE CMD /usr/local/bin/bootstrap-kernel.sh ================================================ FILE: etc/docker/kernel-py/README.md ================================================ This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster. It is built on [jupyter/scipy-notebook](https://hub.docker.com/r/jupyter/scipy-notebook/). # What it Gives You - IPython kernel support (with debugger) - [Data science libraries](https://jupyter-docker-stacks.readthedocs.io/en/latest/using/selecting.html#jupyter-scipy-notebook) # Basic Use Deploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment. Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. For more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/). ================================================ FILE: etc/docker/kernel-r/Dockerfile ================================================ # Ubuntu 18.04.1 LTS Bionic ARG BASE_CONTAINER=quay.io/jupyter/r-notebook:r-4.5.2 FROM $BASE_CONTAINER RUN conda install --quiet --yes \ 'r-argparse' \ pycryptodomex && \ conda clean --all && \ fix-permissions $CONDA_DIR ADD jupyter_enterprise_gateway_kernel_image_files*.tar.gz /usr/local/bin/ # Switch back to root to modify ownerships USER root RUN apt-get update && apt-get install -y \ less \ curl \ libkrb5-dev \ && rm -rf /var/lib/apt/lists/* RUN chown jovyan:users /usr/local/bin/bootstrap-kernel.sh && \ chmod 0755 /usr/local/bin/bootstrap-kernel.sh && \ chown -R jovyan:users /usr/local/bin/kernel-launchers USER jovyan ENV KERNEL_LANGUAGE=R # Disble healthcheck inherited from notebook image HEALTHCHECK NONE CMD /usr/local/bin/bootstrap-kernel.sh ================================================ FILE: etc/docker/kernel-r/README.md ================================================ This image enables the use of an IRKernel kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster. It is currently built on [jupyter/r-notebook](https://hub.docker.com/r/jupyter/r-notebook/). # What it Gives You - IRKernel kernel support # Basic Use Deploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment. Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. For more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/). ================================================ FILE: etc/docker/kernel-scala/Dockerfile ================================================ ARG HUB_ORG ARG SPARK_VERSION # TODO: Restore usage of SPARK_VERSION ARG once https://github.com/jupyter/enterprise_gateway/pull/867 is merged ARG BASE_CONTAINER=$HUB_ORG/spark:v$SPARK_VERSION FROM $BASE_CONTAINER ADD jupyter_enterprise_gateway_kernel_image_files*.tar.gz /usr/local/bin/ USER root # Create/setup the jovyan system user RUN adduser --system -uid 1000 jovyan --ingroup users && \ chown jovyan:users /usr/local/bin/bootstrap-kernel.sh && \ chmod 0755 /usr/local/bin/bootstrap-kernel.sh && \ chmod 0777 /opt/spark/work-dir && \ chown -R jovyan:users /usr/local/bin/kernel-launchers USER jovyan ENV KERNEL_LANGUAGE=scala CMD /usr/local/bin/bootstrap-kernel.sh ================================================ FILE: etc/docker/kernel-scala/README.md ================================================ This image enables the use of a Scala ([Apache Toree](https://toree.apache.org/)) kernel launched from [Jupyter Enterprise Gateway](http://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster. It is built on [elyra/spark:v2.4.6](https://hub.docker.com/r/elyra/spark/) deriving from the [Apache Spark 2.4.6 release](https://spark.apache.org/docs/2.4.6/). Note: The ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results. # What it Gives You - Scala (Toree) kernel support - Spark on kubernetes support from within a Jupyter Notebook # Basic Use Deploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment. Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. For more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/). ================================================ FILE: etc/docker/kernel-spark-py/Dockerfile ================================================ ARG HUB_ORG ARG TAG # Ubuntu 18.04.1 LTS Bionic ARG BASE_CONTAINER=$HUB_ORG/kernel-py:$TAG FROM $BASE_CONTAINER ARG SPARK_VERSION ENV SPARK_VER=$SPARK_VERSION ENV SPARK_HOME=/opt/spark ENV KERNEL_LANGUAGE=python ENV R_LIBS_USER=$R_LIBS_USER:${SPARK_HOME}/R/lib ENV PATH=$PATH:$SPARK_HOME/bin USER root RUN dpkg --purge --force-depends ca-certificates-java \ && apt-get update \ && apt-get install -yq --no-install-recommends \ ca-certificates \ ca-certificates-java \ openjdk-8-jdk \ less \ curl \ libssl-dev \ && rm -rf /var/lib/apt/lists/* ENV JAVA_HOME=/usr/lib/jvm/java RUN ln -s $(readlink -f /usr/bin/javac | sed "s:/bin/javac::") ${JAVA_HOME} # Download and install Spark RUN curl -s https://archive.apache.org/dist/spark/spark-${SPARK_VER}/spark-${SPARK_VER}-bin-hadoop2.7.tgz | \ tar -xz -C /opt && \ ln -s ${SPARK_HOME}-${SPARK_VER}-bin-hadoop2.7 $SPARK_HOME # Download entrypoint.sh from matching tag RUN cd /opt/ && \ wget https://raw.githubusercontent.com/apache/spark/v${SPARK_VER}/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/entrypoint.sh && \ chmod a+x /opt/entrypoint.sh && \ sed -i 's/tini -s/tini -g/g' /opt/entrypoint.sh WORKDIR $SPARK_HOME/work-dir # Ensure that work-dir is writable by everyone RUN chmod 0777 $SPARK_HOME/work-dir ENTRYPOINT [ "/opt/entrypoint.sh" ] USER jovyan ================================================ FILE: etc/docker/kernel-spark-py/README.md ================================================ This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes cluster. It is built on the base image [elyra/kernel-py](https://hub.docker.com/r/elyra/kernel-py/), and adds [Apache Spark 2.4.6](https://spark.apache.org/docs/2.4.6/). Note: The ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results. # What it Gives You - IPython kernel support (with debugger) - [Data science libraries](https://jupyter-docker-stacks.readthedocs.io/en/latest/using/selecting.html#jupyter-scipy-notebook) - Spark on kubernetes support from within a Jupyter Notebook # Basic Use Deploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment. Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. For more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/). ================================================ FILE: etc/docker/kernel-spark-r/Dockerfile ================================================ ARG HUB_ORG ARG TAG ARG BASE_CONTAINER=$HUB_ORG/kernel-r:$TAG FROM $BASE_CONTAINER ARG SPARK_VERSION USER root ENV SPARK_VER=$SPARK_VERSION ENV SPARK_HOME=/opt/spark ENV KERNEL_LANGUAGE=R ENV R_LIBS_USER=$R_LIBS_USER:${R_HOME}/library:${SPARK_HOME}/R/lib ENV PATH=$PATH:$SPARK_HOME/bin RUN dpkg --purge --force-depends ca-certificates-java \ && apt-get update \ && apt-get install -y \ ca-certificates \ ca-certificates-java \ openjdk-8-jdk \ libssl-dev \ && rm -rf /var/lib/apt/lists/* ENV JAVA_HOME=/usr/lib/jvm/java RUN ln -s $(readlink -f /usr/bin/javac | sed "s:/bin/javac::") ${JAVA_HOME} # Download and install Spark RUN curl -s https://archive.apache.org/dist/spark/spark-${SPARK_VER}/spark-${SPARK_VER}-bin-hadoop2.7.tgz | \ tar -xz -C /opt && \ ln -s ${SPARK_HOME}-${SPARK_VER}-bin-hadoop2.7 $SPARK_HOME # Download entrypoint.sh from matching tag RUN cd /opt/ && \ wget https://raw.githubusercontent.com/apache/spark/v${SPARK_VER}/resource-managers/kubernetes/docker/src/main/dockerfiles/spark/entrypoint.sh && \ chmod a+x /opt/entrypoint.sh && \ sed -i 's/tini -s/tini -g/g' /opt/entrypoint.sh WORKDIR $SPARK_HOME/work-dir # Ensure that work-dir is writable by everyone RUN chmod 0777 $SPARK_HOME/work-dir ENTRYPOINT [ "/opt/entrypoint.sh" ] USER jovyan ================================================ FILE: etc/docker/kernel-spark-r/README.md ================================================ This image enables the use of an IRKernel kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes cluster. It is built on the base image [elyra/kernel-r](https://hub.docker.com/r/elyra/kernel-r/), and adds [Apache Spark 2.4.6](https://spark.apache.org/docs/2.4.6/). Note: The ability to use the kernel within Spark within a Docker Swarm configuration probably won't yield the expected results. # What it Gives You - IRkernel kernel support - Spark on kubernetes support from within a Jupyter Notebook # Basic Use Deploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment. Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. For more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/). ================================================ FILE: etc/docker/kernel-tf-gpu-py/Dockerfile ================================================ # Ubuntu:xenial ARG BASE_CONTAINER=tensorflow/tensorflow:2.9.1-gpu FROM $BASE_CONTAINER ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update && apt-get install -yq \ build-essential \ libsm6 \ libxext-dev \ libxrender1 \ netcat \ python3-dev \ tzdata \ unzip && \ rm -rf /var/lib/apt/lists/* && \ pip install --upgrade future pycryptodomex ipykernel ADD jupyter_enterprise_gateway_kernel_image_files*.tar.gz /usr/local/bin/ USER root RUN adduser --system --uid 1000 --gid 100 jovyan && \ chown jovyan:users /usr/local/bin/bootstrap-kernel.sh && \ chmod 0755 /usr/local/bin/bootstrap-kernel.sh && \ chown -R jovyan:users /usr/local/bin/kernel-launchers USER jovyan ENV KERNEL_LANGUAGE=python CMD /usr/local/bin/bootstrap-kernel.sh ================================================ FILE: etc/docker/kernel-tf-gpu-py/README.md ================================================ This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster that can perform Tensorflow operations. It is currently built on [tensorflow/tensorflow:2.7.0-gpu-jupyter](https://hub.docker.com/r/tensorflow/tensorflow/) deriving from the [tensorflow](https://github.com/tensorflow/tensorflow) project. # What it Gives You - IPython kernel support supplemented with Tensorflow functionality (and debugger) # Basic Use Deploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment. Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. For more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/). ================================================ FILE: etc/docker/kernel-tf-py/Dockerfile ================================================ # Ubuntu:Bionic # TensorFlow 2.4.0 ARG BASE_CONTAINER=jupyter/tensorflow-notebook:2023-10-20 FROM $BASE_CONTAINER ENV KERNEL_LANGUAGE=python ADD jupyter_enterprise_gateway_kernel_image_files*.tar.gz /usr/local/bin/ RUN conda install --quiet --yes \ pillow \ future \ pycryptodomex && \ fix-permissions $CONDA_DIR USER root RUN chown jovyan:users /usr/local/bin/bootstrap-kernel.sh && \ chmod 0755 /usr/local/bin/bootstrap-kernel.sh && \ chown -R jovyan:users /usr/local/bin/kernel-launchers USER jovyan # Disble healthcheck inherited from notebook image HEALTHCHECK NONE CMD [ "/usr/local/bin/bootstrap-kernel.sh" ] ================================================ FILE: etc/docker/kernel-tf-py/README.md ================================================ This image enables the use of an IPython kernel launched from [Jupyter Enterprise Gateway](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/) within a Kubernetes or Docker Swarm cluster that can perform Tensorflow operations. It is currently built on the [jupyter/tensorflow-notebook](https://hub.docker.com/r/jupyter/tensorflow-notebook) image deriving from the [jupyter/tensorflow-notebook](https://github.com/jupyter/docker-stacks/tree/main/images/tensorflow-notebook) project. # What it Gives You - IPython kernel support supplemented with Tensorflow functionality (and debugger) # Basic Use Deploy [enterprise-gateway](https://hub.docker.com/r/elyra/enterprise-gateway/) per its instructions and configured to the appropriate environment. Launch a gateway-enabled Jupyter Notebook application against the Enterprise Gateway instance and pick the desired kernel to use in your notebook. For more information, check our [repo](https://github.com/jupyter-server/enterprise_gateway) and [docs](https://jupyter-enterprise-gateway.readthedocs.io/en/latest/). ================================================ FILE: etc/kernel-launchers/R/scripts/launch_IRkernel.R ================================================ library(argparse) library(jsonlite) require("SparkR") require("base64enc") require("digest") require("stringr") r_libs_user <- Sys.getenv("R_LIBS_USER") sparkConfigList <- list( spark.executorEnv.R_LIBS_USER=r_libs_user, spark.rdd.compress="true") min_port_range_size = Sys.getenv("MIN_PORT_RANGE_SIZE") if ( is.null(min_port_range_size) ) min_port_range_size = Sys.getenv("EG_MIN_PORT_RANGE_SIZE") if ( is.null(min_port_range_size) ) min_port_range_size = 1000 # Initializes the Spark session/context and SQL context initialize_spark_session <- function(mode) { # Make sure SparkR package is loaded last; this is necessary # to avoid the need to fully qualify package namespace (using ::) old <- getOption("defaultPackages") options(defaultPackages = c(old, "SparkR")) if (identical(mode, "eager")) { # Start the spark context immediately if set to eager spark <- SparkR::sparkR.session(enableHiveSupport = FALSE, sparkConfig=sparkConfigList) assign("spark", spark, envir = .GlobalEnv) sc <- SparkR:::callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", spark) sqlContext <<- SparkR::sparkRSQL.init(sc) assign("sc", sc, envir = .GlobalEnv) } else { # Keep lazy evaluation as default starting mode if initialization mode is lazy or not set at all makeActiveBinding(".sparkRsession", sparkSessionFn, SparkR:::.sparkREnv) makeActiveBinding(".sparkRjsc", sparkContextFn, SparkR:::.sparkREnv) delayedAssign("spark", {get(".sparkRsession", envir=SparkR:::.sparkREnv)}, assign.env=.GlobalEnv) # backward compatibility for Spark 1.6 and earlier notebooks delayedAssign("sc", {get(".sparkRjsc", envir=SparkR:::.sparkREnv)}, assign.env=.GlobalEnv) delayedAssign("sqlContext", {spark}, assign.env=.GlobalEnv) } } sparkSessionFn <- local({ function(v) { if (missing(v)) { # get SparkSession # create a new sparkSession rm(".sparkRsession", envir=SparkR:::.sparkREnv) # rm to ensure no infinite recursion get("sc", envir=.GlobalEnv) sparkSession <- SparkR::sparkR.session( sparkHome=Sys.getenv("SPARK_HOME"), sparkConfig=sparkConfigList); sparkSession } } }) sparkContextFn <- local({ function(v) { if (missing(v)) { # get SparkContext # create a new sparkContext rm(".sparkRjsc", envir=SparkR:::.sparkREnv) # rm to ensure no infinite recursion message ("Obtaining Spark session...") sparkContext <- SparkR:::sparkR.sparkContext( sparkHome=Sys.getenv("SPARK_HOME"), sparkEnvirMap=SparkR:::convertNamedListToEnv(sparkConfigList)) message ("Spark session obtained.") sparkContext } } }) # Figure out the connection_file to use determine_connection_file <- function(kernel_id){ base_file = paste("kernel-", kernel_id, sep="") temp_file = tempfile(pattern=paste(base_file,"_",sep=""), fileext=".json") cat(paste("Using connection file ",temp_file," \n",sep="'")) return(temp_file) } validate_port_range <- function(port_range){ port_ranges = strsplit(port_range, "..", fixed=TRUE) lower_port = as.integer(port_ranges[[1]][1]) upper_port = as.integer(port_ranges[[1]][2]) port_range_size = upper_port - lower_port if (port_range_size != 0) { if (port_range_size < min_port_range_size){ message(paste("Port range validation failed for range:", port_range, ". Range size must be at least", min_port_range_size, "as specified by env EG_MIN_PORT_RANGE_SIZE")) return(NA) } } return(list("lower_port"=lower_port, "upper_port"=upper_port)) } # Check arguments parser <- argparse::ArgumentParser(description="Parse Arguments for R Launcher") parser$add_argument("--kernel-id", nargs='?', help="the id associated with the launched kernel") parser$add_argument("--port-range", nargs='?', metavar='..', help="the range of ports impose for kernel ports") parser$add_argument("--response-address", nargs='?', metavar=':', help="the IP:port address of the system hosting the server and expecting response") parser$add_argument("--public-key", nargs='?', help="the public key used to encrypt connection information") parser$add_argument("--spark-context-initialization-mode", nargs='?', help="the initialization mode of the spark context: lazy, eager or none") parser$add_argument("--customAppName", nargs='?', help="the custom application name to be set") # The following arguments are deprecated and will be used only if their mirroring arguments have no value. # This means that the default value for --spark-context-initialization-mode (none) will need to come from # the mirrored args' default until deprecated items have been removed. parser$add_argument("connection_file", nargs='?', help='Connection file to write connection info') parser$add_argument("--RemoteProcessProxy.kernel-id", nargs='?', help="the id associated with the launched kernel (deprecated)") parser$add_argument("--RemoteProcessProxy.port-range", nargs='?', metavar='..', help="the range of ports impose for kernel ports (deprecated)") parser$add_argument("--RemoteProcessProxy.response-address", nargs='?', metavar=':', help="the IP:port address of the system hosting the server and expecting response (deprecated)") parser$add_argument("--RemoteProcessProxy.public-key", nargs='?', help="the public key used to encrypt connection information (deprecated)") parser$add_argument("--RemoteProcessProxy.spark-context-initialization-mode", nargs='?', default="none", help="the initialization mode of the spark context: lazy, eager or none (deprecated)") argv <- parser$parse_args() kernel_id <- argv$kernel_id if (is.null(kernel_id)) { kernel_id <- argv$RemoteProcessProxy.kernel_id } port_range <- argv$port_range if (is.null(port_range)) { port_range <- argv$RemoteProcessProxy.port_range } response_address <- argv$response_address if (is.null(response_address)) { response_address <- argv$RemoteProcessProxy.response_address } public_key <- argv$public_key if (is.null(public_key)) { public_key <- argv$RemoteProcessProxy.public_key } spark_context_initialization_mode <- argv$spark_context_initialization_mode if (is.null(spark_context_initialization_mode)) { spark_context_initialization_mode <- argv$RemoteProcessProxy.spark_context_initialization_mode } if (is.null(argv$connection_file) && is.null(kernel_id)){ message("At least one of the parameters: 'connection_file' or '--kernel-id' must be provided!") return(NA) } if (is.null(kernel_id)){ message("Parameter '--kernel-id' must be provided!") return(NA) } if (is.null(public_key)){ message("Parameter '--public-key' must be provided!") return(NA) } # if we have a response address, then deal with items relative to remote support (ports, comm-socket, etc.) if (!is.null(response_address) && str_length(response_address) > 0){ #If port range argument is passed from kernel json with no value if (is.null(port_range)){ port_range <- NA } # If there is a response address, use pull socket mode connection_file <- determine_connection_file(kernel_id) # if port-range was provided, validate the range and determine bounds lower_port = 0 upper_port = 0 if (!is.na(port_range)){ range <- validate_port_range(port_range) if (length(range) > 1){ lower_port = range$lower_port upper_port = range$upper_port } } # Get the pid of the launcher so the listener thread (process) can detect its # presence to know when to shutdown. pid <- Sys.getpid() # Hoop to jump through to get the directory this script resides in so that we can # load the co-located python server_listener.py file. This code will not work if # called directly from within RStudio. # https://stackoverflow.com/questions/1815606/rscript-determine-path-of-the-executing-script launch_args <- commandArgs(trailingOnly = FALSE) file_option <- "--file=" script_path <- sub(file_option, "", launch_args[grep(file_option, launch_args)]) listener_file <- paste(sep="/", dirname(script_path), "server_listener.py") # Launch the server listener logic in an async manner and poll for the existence of # the connection file before continuing. Should there be an issue, the server # will terminate the launcher, so there's no need for a timeout. python_cmd <- Sys.getenv("PYSPARK_PYTHON", "python") # If present, use the same python specified for Spark svr_listener_cmd <- stringr::str_interp(gsub("\n[:space:]*" , "", paste(python_cmd,"-c \"import os, sys, imp; gl = imp.load_source('setup_server_listener', '${listener_file}'); gl.setup_server_listener(conn_filename='${connection_file}', parent_pid='${pid}', lower_port=${lower_port}, upper_port=${upper_port}, response_addr='${response_address}', kernel_id='${kernel_id}', public_key='${public_key}')\""))) system(svr_listener_cmd, wait=FALSE) while (!file.exists(connection_file)) { Sys.sleep(0.5) } } else { # already provided connection_file = argv$connection_file } # If spark context creation is desired go ahead and initialize the session/context # Otherwise, skip spark context creation if set to none or not provided if (!is.na(spark_context_initialization_mode)){ if (!identical(spark_context_initialization_mode, "none")){ # Add custom application name (spark.app.name) spark config if available, else default to kernel_id if (!is.null(argv$customAppName) && str_length(argv$customAppName) > 0){ sparkConfigList[['spark.app.name']] <- argv$customAppName } else { sparkConfigList[['spark.app.name']] <- kernel_id } initialize_spark_session(spark_context_initialization_mode) } } # Start the kernel IRkernel::main(connection_file) # Only unlink the connection file if we're launched for remote behavior. if (!is.na(response_address)){ unlink(connection_file) } # Stop the context and exit if (!identical(spark_context_initialization_mode, "none")){ sparkR.session.stop() } ================================================ FILE: etc/kernel-launchers/R/scripts/server_listener.py ================================================ """A server listener for R.""" import base64 import json import logging import os import random import socket import uuid from threading import Thread from Cryptodome.Cipher import AES, PKCS1_v1_5 from Cryptodome.PublicKey import RSA from Cryptodome.Random import get_random_bytes from Cryptodome.Util.Padding import pad from jupyter_client.connect import write_connection_file LAUNCHER_VERSION = 1 # Indicate to server the version of this launcher (payloads may vary) max_port_range_retries = int( os.getenv("MAX_PORT_RANGE_RETRIES", os.getenv("EG_MAX_PORT_RANGE_RETRIES", "5")) ) log_level = os.getenv("LOG_LEVEL", os.getenv("EG_LOG_LEVEL", "10")) log_level = int(log_level) if log_level.isdigit() else log_level logging.basicConfig(format="[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s] %(message)s") logger = logging.getLogger("server_listener for R launcher") logger.setLevel(log_level) def _encrypt(connection_info_str, public_key): """Encrypt the connection information using a generated AES key that is then encrypted using the public key passed from the server. Both are then returned in an encoded JSON payload. This code also exists in the Python kernel-launcher's launch_ipykernel.py script. """ aes_key = get_random_bytes(16) cipher = AES.new(aes_key, mode=AES.MODE_ECB) # Encrypt the connection info using the aes_key encrypted_connection_info = cipher.encrypt(pad(connection_info_str, 16)) b64_connection_info = base64.b64encode(encrypted_connection_info) # Encrypt the aes_key using the server's public key imported_public_key = RSA.importKey(base64.b64decode(public_key.encode())) cipher = PKCS1_v1_5.new(key=imported_public_key) encrypted_key = base64.b64encode(cipher.encrypt(aes_key)) # Compose the payload and Base64 encode it payload = { "version": LAUNCHER_VERSION, "key": encrypted_key.decode(), "conn_info": b64_connection_info.decode(), } b64_payload = base64.b64encode(json.dumps(payload).encode(encoding="utf-8")) return b64_payload def return_connection_info( connection_file, response_addr, lower_port, upper_port, kernel_id, public_key, parent_pid ): """Returns the connection information corresponding to this kernel. This code also exists in the Python kernel-launcher's launch_ipykernel.py script. """ response_parts = response_addr.split(":") if len(response_parts) != 2: logger.error( f"Invalid format for response address '{response_addr}'. Assuming 'pull' mode..." ) return response_ip = response_parts[0] try: response_port = int(response_parts[1]) except ValueError: logger.error( f"Invalid port component found in response address '{response_addr}'. " "Assuming 'pull' mode..." ) return with open(connection_file) as fp: cf_json = json.load(fp) fp.close() # add process and process group ids into connection info cf_json["pid"] = parent_pid cf_json["pgid"] = os.getpgid(parent_pid) # prepare socket address for handling signals comm_sock = prepare_comm_socket(lower_port, upper_port) cf_json["comm_port"] = comm_sock.getsockname()[1] cf_json["kernel_id"] = kernel_id with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.connect((response_ip, response_port)) json_content = json.dumps(cf_json).encode(encoding="utf-8") logger.debug(f"JSON Payload '{json_content}") payload = _encrypt(json_content, public_key) logger.debug(f"Encrypted Payload '{payload}") s.send(payload) return comm_sock def prepare_comm_socket(lower_port, upper_port): """Prepares the socket to which the server will send signal and shutdown requests. This code also exists in the Python kernel-launcher's launch_ipykernel.py script. """ sock = _select_socket(lower_port, upper_port) logger.info( f"Signal socket bound to host: {sock.getsockname()[0]}, port: {sock.getsockname()[1]}" ) sock.listen(1) sock.settimeout(5) return sock def _select_ports(count, lower_port, upper_port): """Select and return n random ports that are available and adhere to the given port range, if applicable. This code also exists in the Python kernel-launcher's launch_ipykernel.py script. """ ports = [] sockets = [] for _ in range(count): sock = _select_socket(lower_port, upper_port) ports.append(sock.getsockname()[1]) sockets.append(sock) for sock in sockets: sock.close() return ports def _select_socket(lower_port, upper_port): """Create and return a socket whose port is available and adheres to the given port range, if applicable. This code also exists in the Python kernel-launcher's launch_ipykernel.py script. """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) found_port = False retries = 0 while not found_port: try: sock.bind(("0.0.0.0", _get_candidate_port(lower_port, upper_port))) # noqa found_port = True except Exception: retries = retries + 1 if retries > max_port_range_retries: msg = "Failed to locate port within range {}..{} after {} retries!".format( lower_port, upper_port, max_port_range_retries ) raise RuntimeError(msg) from None return sock def _get_candidate_port(lower_port, upper_port): """Returns a port within the given range. If the range is zero, the zero is returned. This code also exists in the Python kernel-launcher's launch_ipykernel.py script. """ range_size = upper_port - lower_port if range_size == 0: return 0 return random.randint(lower_port, upper_port) def get_server_request(sock): """Gets a request from the server and returns the corresponding dictionary. This code also exists in the Python kernel-launcher's launch_ipykernel.py script. """ conn = None data = "" request_info = None try: conn, addr = sock.accept() while True: buffer = conn.recv(1024).decode("utf-8") if not buffer: # send is complete request_info = json.loads(data) break data = data + buffer # append what we received until we get no more... except Exception as e: if type(e) is not socket.timeout: raise e finally: if conn: conn.close() return request_info def server_listener(sock, parent_pid): """Waits for requests from the server and processes each when received. Currently, these will be one of a sending a signal to the corresponding kernel process (signum) or stopping the listener and exiting the kernel (shutdown). This code also exists in the Python kernel-launcher's launch_ipykernel.py script. """ shutdown = False while not shutdown: request = get_server_request(sock) if request: signum = -1 # prevent logging poll requests since that occurs every 3 seconds if request.get("signum") is not None: signum = int(request.get("signum")) os.kill(parent_pid, signum) if request.get("shutdown") is not None: shutdown = bool(request.get("shutdown")) if signum != 0: logger.info(f"server_listener got request: {request}") def setup_server_listener( conn_filename, parent_pid, lower_port, upper_port, response_addr, kernel_id, public_key ): """Set up the server listener.""" ip = "0.0.0.0" # noqa key = str(uuid.uuid4()).encode() # convert to bytes ports = _select_ports(5, lower_port, upper_port) write_connection_file( fname=conn_filename, ip=ip, key=key, shell_port=ports[0], iopub_port=ports[1], stdin_port=ports[2], hb_port=ports[3], control_port=ports[4], ) if response_addr: comm_socket = return_connection_info( conn_filename, response_addr, int(lower_port), int(upper_port), kernel_id, public_key, int(parent_pid), ) if comm_socket: # socket in use, start server listener thread server_listener_thread = Thread( target=server_listener, args=( comm_socket, int(parent_pid), ), ) server_listener_thread.start() return __all__ = [ "setup_server_listener", ] ================================================ FILE: etc/kernel-launchers/bootstrap/bootstrap-kernel.sh ================================================ #!/bin/bash PORT_RANGE=${PORT_RANGE:-${EG_PORT_RANGE:-0..0}} RESPONSE_ADDRESS=${RESPONSE_ADDRESS:-${EG_RESPONSE_ADDRESS}} PUBLIC_KEY=${PUBLIC_KEY:-${EG_PUBLIC_KEY}} KERNEL_LAUNCHERS_DIR=${KERNEL_LAUNCHERS_DIR:-/usr/local/bin/kernel-launchers} KERNEL_SPARK_CONTEXT_INIT_MODE=${KERNEL_SPARK_CONTEXT_INIT_MODE:-none} KERNEL_CLASS_NAME=${KERNEL_CLASS_NAME} echo $0 env: `env` launch_python_kernel() { # Launch the python kernel launcher - which embeds the IPython kernel and listens for interrupts # and shutdown requests from Enterprise Gateway. export JPY_PARENT_PID=$$ # Force reset of parent pid since we're detached if [ -z "${KERNEL_CLASS_NAME}" ] then kernel_class_option="" else kernel_class_option="--kernel-class-name ${KERNEL_CLASS_NAME}" fi set -x python ${KERNEL_LAUNCHERS_DIR}/python/scripts/launch_ipykernel.py --kernel-id ${KERNEL_ID} \ --port-range ${PORT_RANGE} --response-address ${RESPONSE_ADDRESS} --public-key ${PUBLIC_KEY} \ --spark-context-initialization-mode ${KERNEL_SPARK_CONTEXT_INIT_MODE} ${kernel_class_option} { set +x; } 2>/dev/null } launch_R_kernel() { # Launch the R kernel launcher - which embeds the IRkernel kernel and listens for interrupts # and shutdown requests from Enterprise Gateway. set -x Rscript ${KERNEL_LAUNCHERS_DIR}/R/scripts/launch_IRkernel.R --kernel-id ${KERNEL_ID} --port-range ${PORT_RANGE} --response-address ${RESPONSE_ADDRESS} --public-key ${PUBLIC_KEY} --spark-context-initialization-mode ${KERNEL_SPARK_CONTEXT_INIT_MODE} { set +x; } 2>/dev/null } launch_scala_kernel() { # Launch the scala kernel launcher - which embeds the Apache Toree kernel and listens for interrupts # and shutdown requests from Enterprise Gateway. This kernel is currenly always launched using # spark-submit, so additional setup is required. PROG_HOME=${KERNEL_LAUNCHERS_DIR}/scala KERNEL_ASSEMBLY=`(cd "${PROG_HOME}/lib"; ls -1 toree-assembly-*.jar;)` TOREE_ASSEMBLY="${PROG_HOME}/lib/${KERNEL_ASSEMBLY}" if [ ! -f ${TOREE_ASSEMBLY} ]; then echo "Toree assembly '${PROG_HOME}/lib/toree-assembly-*.jar' is missing. Exiting..." exit 1 fi # Toree launcher jar path, plus required lib jars (toree-assembly) JARS="${TOREE_ASSEMBLY}" # Toree launcher app path LAUNCHER_JAR=`(cd "${PROG_HOME}/lib"; ls -1 toree-launcher*.jar;)` LAUNCHER_APP="${PROG_HOME}/lib/${LAUNCHER_JAR}" if [ ! -f ${LAUNCHER_APP} ]; then echo "Scala launcher jar '${PROG_HOME}/lib/toree-launcher*.jar' is missing. Exiting..." exit 1 fi SPARK_OPTS="--name ${KERNEL_USERNAME}-${KERNEL_ID}" TOREE_OPTS="--alternate-sigint USR2" set -x eval exec \ "${SPARK_HOME}/bin/spark-submit" \ "${SPARK_OPTS}" \ --jars "${JARS}" \ --class launcher.ToreeLauncher \ "${LAUNCHER_APP}" \ "${TOREE_OPTS}" \ "--kernel-id ${KERNEL_ID} --port-range ${PORT_RANGE} --response-address ${RESPONSE_ADDRESS} --public-key ${PUBLIC_KEY} --spark-context-initialization-mode ${KERNEL_SPARK_CONTEXT_INIT_MODE}" { set +x; } 2>/dev/null } # Ensure that required envs are present, check language before the dynamic values if [ -z "${KERNEL_LANGUAGE+x}" ] then echo "KERNEL_LANGUAGE is required. Set this value in the image or when starting container." exit 1 fi if [ -z "${KERNEL_ID+x}" ] || [ -z "${RESPONSE_ADDRESS+x}" ] || [ -z "${PUBLIC_KEY+x}" ] then echo "Environment variables, KERNEL_ID, RESPONSE_ADDRESS, and PUBLIC_KEY are required." exit 1 fi # Invoke appropriate launcher based on KERNEL_LANGUAGE (case-insensitive) if [[ "${KERNEL_LANGUAGE,,}" == "python" ]] then launch_python_kernel elif [[ "${KERNEL_LANGUAGE,,}" == "scala" ]] then launch_scala_kernel elif [[ "${KERNEL_LANGUAGE,,}" == "r" ]] then launch_R_kernel else echo "Unrecognized value for KERNEL_LANGUAGE: '${KERNEL_LANGUAGE}'!" exit 1 fi exit 0 ================================================ FILE: etc/kernel-launchers/docker/scripts/launch_docker.py ================================================ """Launches a containerized kernel.""" import argparse import os import re import sys import urllib3 from docker.client import DockerClient from docker.types import EndpointSpec, RestartPolicy urllib3.disable_warnings() # Set env to False if the container should be left around for debug purposes, etc. remove_container = bool( os.getenv("REMOVE_CONTAINER", os.getenv("EG_REMOVE_CONTAINER", "True")).lower() == "true" ) swarm_mode = bool(os.getenv("DOCKER_MODE", os.getenv("EG_DOCKER_MODE", "swarm")).lower() == "swarm") def launch_docker_kernel( kernel_id, port_range, response_addr, public_key, spark_context_init_mode, kernel_class_name ): """Launches a containerized kernel.""" # Can't proceed if no image was specified. image_name = os.environ.get("KERNEL_IMAGE", None) if image_name is None: sys.exit("ERROR - KERNEL_IMAGE not found in environment - kernel launch terminating!") if not re.match( r'^[a-zA-Z0-9][a-zA-Z0-9._\-/]*(:[a-zA-Z0-9._\-]+)?(@sha256:[a-f0-9]+)?$', image_name ): sys.exit(f"ERROR - KERNEL_IMAGE contains invalid characters: {image_name}") # Container name is composed of KERNEL_USERNAME and KERNEL_ID container_name = os.environ.get("KERNEL_USERNAME", "") + "-" + kernel_id # Determine network. If EG_DOCKER_NETWORK has not been propagated, fall back to 'bridge'... docker_network = os.environ.get("DOCKER_NETWORK", os.environ.get("EG_DOCKER_NETWORK", "bridge")) # Build labels - these will be modelled similar to kubernetes: kernel_id, component, app, ... labels = {} labels["kernel_id"] = kernel_id labels["component"] = "kernel" labels["app"] = "enterprise-gateway" # Capture env parameters... param_env = {} param_env["PORT_RANGE"] = port_range param_env["PUBLIC_KEY"] = public_key param_env["RESPONSE_ADDRESS"] = response_addr param_env["KERNEL_SPARK_CONTEXT_INIT_MODE"] = spark_context_init_mode if kernel_class_name: param_env["KERNEL_CLASS_NAME"] = kernel_class_name # Since the environment is specific to the kernel (per env stanza of kernelspec, KERNEL_ and EG_CLIENT_ENVS) # just add the env here. param_env.update(os.environ) param_env.pop( "PATH" ) # Let the image PATH be used. Since this is relative to images, we're probably safe. user = param_env.get("KERNEL_UID") group = param_env.get("KERNEL_GID") # setup common args kwargs = {} kwargs["name"] = container_name kwargs["hostname"] = container_name kwargs["user"] = user kwargs["labels"] = labels client = DockerClient.from_env() if swarm_mode: networks = [] networks.append(docker_network) # mounts = list() # Enable if necessary # mounts.append("/usr/local/share/jupyter/kernels:/usr/local/share/jupyter/kernels:ro") endpoint_spec = EndpointSpec(mode="dnsrr") restart_policy = RestartPolicy(condition="none") # finish args setup kwargs["env"] = param_env kwargs["endpoint_spec"] = endpoint_spec kwargs["restart_policy"] = restart_policy kwargs["container_labels"] = labels kwargs["networks"] = networks kwargs["groups"] = [group, "100"] if param_env.get("KERNEL_WORKING_DIR"): kwargs["workdir"] = param_env.get("KERNEL_WORKING_DIR") # kwargs['mounts'] = mounts # Enable if necessary # print("service args: {}".format(kwargs)) # useful for debug client.services.create(image_name, **kwargs) else: # volumes = { # Enable if necessary # "/usr/local/share/jupyter/kernels": { # "bind": "/usr/local/share/jupyter/kernels", # "mode": "ro", # } # } # finish args setup kwargs["environment"] = param_env kwargs["remove"] = remove_container kwargs["network"] = docker_network kwargs["group_add"] = [group, "100"] kwargs["detach"] = True if param_env.get("KERNEL_WORKING_DIR"): kwargs["working_dir"] = param_env.get("KERNEL_WORKING_DIR") # kwargs['volumes'] = volumes # Enable if necessary # print("container args: {}".format(kwargs)) # useful for debug client.containers.run(image_name, **kwargs) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--kernel-id", dest="kernel_id", nargs="?", help="Indicates the id associated with the launched kernel.", ) parser.add_argument( "--port-range", dest="port_range", nargs="?", metavar="..", help="Port range to impose for kernel ports", ) parser.add_argument( "--response-address", dest="response_address", nargs="?", metavar=":", help="Connection address (:) for returning connection file", ) parser.add_argument( "--public-key", dest="public_key", nargs="?", help="Public key used to encrypt connection information", ) parser.add_argument( "--spark-context-initialization-mode", dest="spark_context_init_mode", nargs="?", help="Indicates whether or how a spark context should be created", ) parser.add_argument( "--kernel-class-name", dest="kernel_class_name", nargs="?", help="Indicates the name of the kernel class to use. Must be a subclass of 'ipykernel.kernelbase.Kernel'.", ) # The following arguments are deprecated and will be used only if their mirroring arguments have no value. # This means that the default value for --spark-context-initialization-mode (none) will need to come from # the mirrored args' default until deprecated item has been removed. parser.add_argument( "--RemoteProcessProxy.kernel-id", dest="rpp_kernel_id", nargs="?", help="Indicates the id associated with the launched kernel. (deprecated)", ) parser.add_argument( "--RemoteProcessProxy.port-range", dest="rpp_port_range", nargs="?", metavar="..", help="Port range to impose for kernel ports (deprecated)", ) parser.add_argument( "--RemoteProcessProxy.response-address", dest="rpp_response_address", nargs="?", metavar=":", help="Connection address (:) for returning connection file (deprecated)", ) parser.add_argument( "--RemoteProcessProxy.public-key", dest="rpp_public_key", nargs="?", help="Public key used to encrypt connection information (deprecated)", ) parser.add_argument( "--RemoteProcessProxy.spark-context-initialization-mode", dest="rpp_spark_context_init_mode", nargs="?", help="Indicates whether or how a spark context should be created (deprecated)", default="none", ) arguments = vars(parser.parse_args()) kernel_id = arguments["kernel_id"] or arguments["rpp_kernel_id"] port_range = arguments["port_range"] or arguments["rpp_port_range"] response_addr = arguments["response_address"] or arguments["rpp_response_address"] public_key = arguments["public_key"] or arguments["rpp_public_key"] spark_context_init_mode = ( arguments["spark_context_init_mode"] or arguments["rpp_spark_context_init_mode"] ) kernel_class_name = arguments["kernel_class_name"] launch_docker_kernel( kernel_id, port_range, response_addr, public_key, spark_context_init_mode, kernel_class_name ) ================================================ FILE: etc/kernel-launchers/kubernetes/scripts/kernel-pod.yaml.j2 ================================================ # This file defines the Kubernetes objects necessary for kernels to run witihin Kubernetes. # Substitution parameters are processed by the launch_kubernetes.py code located in the # same directory. Some values are factory values, while others (typically prefixed with 'kernel_') can be # provided by the client. # # This file can be customized as needed. No changes are required to launch_kubernetes.py provided kernel_ # values are used - which be automatically set from corresponding KERNEL_ env values. Updates will be required # to launch_kubernetes.py if new document sections (i.e., new k8s 'kind' objects) are introduced. # apiVersion: v1 kind: Pod metadata: name: {{ kernel_pod_name | yaml_safe }} namespace: {{ kernel_namespace | yaml_safe }} labels: kernel_id: {{ kernel_id | yaml_safe }} app: enterprise-gateway component: kernel source: kernel-pod.yaml annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: "false" spec: restartPolicy: Never serviceAccountName: {{ kernel_service_account_name | yaml_safe }} # NOTE: that using runAsGroup requires that feature-gate RunAsGroup be enabled. # WARNING: Only using runAsUser w/o runAsGroup or NOT enabling the RunAsGroup feature-gate # will result in the new kernel pod's effective group of 0 (root)! although the user will # correspond to the runAsUser value. As a result, BOTH should be uncommented AND the feature-gate # should be enabled to ensure expected behavior. In addition, 'fsGroup: 100' is recommended so # that /home/jovyan can be written to via the 'users' group (gid: 100) irrespective of the # "kernel_uid" and "kernel_gid" values. {% if kernel_uid is defined or kernel_gid is defined %} securityContext: {% if kernel_uid is defined %} runAsUser: {{ kernel_uid | int }} {% endif %} {% if kernel_gid is defined %} runAsGroup: {{ kernel_gid | int }} {% endif %} fsGroup: 100 {% endif %} containers: - image: {{ kernel_image | yaml_safe }} name: {{ kernel_pod_name | yaml_safe }} env: # Add any custom envs here that aren't already configured for the kernel's environment # - name: MY_CUSTOM_ENV # value: "my_custom_value" {% if kernel_cpus is defined or kernel_memory is defined or kernel_gpus is defined or kernel_cpus_limit is defined or kernel_memory_limit is defined or kernel_gpus_limit is defined %} resources: {% if kernel_cpus is defined or kernel_memory is defined or kernel_gpus is defined %} requests: {% if kernel_cpus is defined %} cpu: {{ kernel_cpus | yaml_safe }} {% endif %} {% if kernel_memory is defined %} memory: {{ kernel_memory | yaml_safe }} {% endif %} {% if kernel_gpus is defined %} nvidia.com/gpu: {{ kernel_gpus | yaml_safe }} {% endif %} {% endif %} {% if kernel_cpus_limit is defined or kernel_memory_limit is defined or kernel_gpus_limit is defined %} limits: {% if kernel_cpus_limit is defined %} cpu: {{ kernel_cpus_limit | yaml_safe }} {% endif %} {% if kernel_memory_limit is defined %} memory: {{ kernel_memory_limit | yaml_safe }} {% endif %} {% if kernel_gpus_limit is defined %} nvidia.com/gpu: {{ kernel_gpus_limit | yaml_safe }} {% endif %} {% endif %} {% endif %} {% if kernel_working_dir %} workingDir: {{ kernel_working_dir | yaml_safe }} {% endif %} volumeMounts: # Define any "unconditional" mounts here, followed by "conditional" mounts that vary per client {% if kernel_volume_mounts %} {% for volume_mount in kernel_volume_mounts %} - {{ volume_mount | yaml_safe }} {% endfor %} {% endif %} volumes: # Define any "unconditional" volumes here, followed by "conditional" volumes that vary per client {% if kernel_volumes %} {% for volume in kernel_volumes %} - {{ volume | yaml_safe }} {% endfor %} {% endif %} ================================================ FILE: etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py ================================================ #!/opt/conda/bin/python """Launch on kubernetes.""" import argparse import os import sys from typing import Dict, List import urllib3 import yaml from jinja2 import Environment, FileSystemLoader, select_autoescape from kubernetes import client, config from kubernetes.client.rest import ApiException urllib3.disable_warnings() KERNEL_POD_TEMPLATE_PATH = "/kernel-pod.yaml.j2" ALLOWED_K8S_KINDS = { "Pod", "Secret", "PersistentVolumeClaim", "PersistentVolume", "Service", "ConfigMap", } MAX_DOCUMENTS_PER_KIND = 1 YAML_PARSED_KERNEL_VARS = {"KERNEL_VOLUME_MOUNTS", "KERNEL_VOLUMES"} def yaml_safe_str(value): """Escape a value for safe inclusion in a YAML template. Uses PyYAML's own serializer to produce properly escaped output: - Strings are double-quoted with special characters escaped. - Dicts/lists are serialized as YAML flow mappings/sequences. - None, bools, and numbers are serialized to their YAML-canonical form. """ if isinstance(value, str): return yaml.dump(value, default_style='"', width=10000).strip() if isinstance(value, (dict, list)): return yaml.dump(value, default_flow_style=True, width=10000).strip() # yaml.dump appends a document-end marker ("...\n") for scalars; strip it return yaml.dump(value, width=10000).replace("\n...", "").strip() def generate_kernel_pod_yaml(keywords): """Return the kubernetes pod spec as a yaml string. - load jinja2 template from this file directory. - substitute template variables with keywords items. """ j_env = Environment( loader=FileSystemLoader(os.path.dirname(__file__)), trim_blocks=True, lstrip_blocks=True, autoescape=select_autoescape( disabled_extensions=( "j2", "yaml", ), default_for_string=True, default=True, ), ) j_env.filters["yaml_safe"] = yaml_safe_str k8s_yaml = j_env.get_template(KERNEL_POD_TEMPLATE_PATH).render(**keywords) return k8s_yaml def extend_pod_env(pod_def: dict) -> dict: """Extends the pod_def.spec.containers[0].env stanza with current environment.""" env_stanza = pod_def["spec"]["containers"][0].get("env") or [] # Walk current set of template env entries and replace those found in the current # env with their values (and record those items). Then add all others from the env # that were not already. processed_entries: List[str] = [] for item in env_stanza: item_name = item.get("name") if item_name in os.environ: item["value"] = os.environ[item_name] processed_entries.append(item_name) for name, value in os.environ.items(): if name not in processed_entries: env_stanza.append({"name": name, "value": value}) pod_def["spec"]["containers"][0]["env"] = env_stanza return pod_def # a popular reason that lasts many APIs but is not constantized in the client lib K8S_ALREADY_EXIST_REASON = "AlreadyExists" def _parse_k8s_exception(exc: ApiException) -> str: """Parse the exception and return the error message from kubernetes api Args: exc (Exception): Exception object Returns: str: Error message from kubernetes api """ # more exception can be parsed, but at the time of implementation we only need this one msg = f'"reason":{K8S_ALREADY_EXIST_REASON}' if exc.status == 409 and exc.reason == "Conflict" and msg in exc.body: return K8S_ALREADY_EXIST_REASON return "" def launch_kubernetes_kernel( kernel_id, port_range, response_addr, public_key, spark_context_init_mode, pod_template_file, spark_opts_out, kernel_class_name, ): """Launches a containerized kernel as a kubernetes pod.""" if os.getenv("KUBERNETES_SERVICE_HOST"): config.load_incluster_config() else: config.load_kube_config() # Capture keywords and their values. keywords = {} # Factory values... # Since jupyter lower cases the kernel directory as the kernel-name, we need to capture its case-sensitive # value since this is used to locate the kernel launch script within the image. # Ensure these key/value pairs are reflected in the environment. We'll add these to the container's env # stanza after the pod template is generated. if port_range: os.environ["PORT_RANGE"] = port_range if public_key: os.environ["PUBLIC_KEY"] = public_key if response_addr: os.environ["RESPONSE_ADDRESS"] = response_addr if kernel_id: os.environ["KERNEL_ID"] = kernel_id if spark_context_init_mode: os.environ["KERNEL_SPARK_CONTEXT_INIT_MODE"] = spark_context_init_mode if kernel_class_name: os.environ["KERNEL_CLASS_NAME"] = kernel_class_name os.environ["KERNEL_NAME"] = os.path.basename( os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ) # Walk env variables looking for names prefixed with KERNEL_. When found, set corresponding keyword value # with name in lower case. Only parse YAML for variables that legitimately carry structured data # (lists/dicts); treat all others as raw strings to prevent YAML injection attacks. for name, value in os.environ.items(): if name.startswith("KERNEL_"): if name in YAML_PARSED_KERNEL_VARS: parsed = yaml.safe_load(value) if not isinstance(parsed, list) or not all( isinstance(item, dict) for item in parsed ): sys.exit( f"ERROR - {name} must be a YAML list of mappings - " f"kernel launch terminating!" ) keywords[name.lower()] = parsed else: keywords[name.lower()] = value # Substitute all template variable (wrapped with {{ }}) and generate `yaml` string. k8s_yaml = generate_kernel_pod_yaml(keywords) # For each k8s object (kind), call the appropriate API method. Too bad there isn't a method # that can take a set of objects. # # Creation for additional kinds of k8s objects can be added below. Refer to # https://github.com/kubernetes-client/python for API signatures. Other examples can be found in # https://github.com/jupyter-server/enterprise_gateway/tree/main/enterprise_gateway/services/processproxies/k8s.py # pod_template = None pod_created = None kernel_namespace = keywords["kernel_namespace"] k8s_objs = list(yaml.safe_load_all(k8s_yaml)) kind_counts: Dict[str, int] = {} for k8s_obj in k8s_objs: if not k8s_obj: continue kind = k8s_obj.get("kind") if kind not in ALLOWED_K8S_KINDS: sys.exit( f"ERROR - Unexpected resource kind '{kind}' in rendered manifest - " f"kernel launch terminating!" ) kind_counts[kind] = kind_counts.get(kind, 0) + 1 for kind, count in kind_counts.items(): if count > MAX_DOCUMENTS_PER_KIND: sys.exit( f"ERROR - Rendered manifest contains {count} '{kind}' documents " f"(max {MAX_DOCUMENTS_PER_KIND}) - kernel launch terminating!" ) for k8s_obj in k8s_objs: if k8s_obj.get("kind"): if k8s_obj["kind"] == "Pod": # print("{}".format(k8s_obj)) # useful for debug pod_template = extend_pod_env(k8s_obj) if pod_template_file is None: try: pod_created = client.CoreV1Api(client.ApiClient()).create_namespaced_pod( body=k8s_obj, namespace=kernel_namespace ) except ApiException as exc: if _parse_k8s_exception(exc) == K8S_ALREADY_EXIST_REASON: pod_created = ( client.CoreV1Api(client.ApiClient()) .list_namespaced_pod( namespace=kernel_namespace, label_selector=f"kernel_id={kernel_id}", watch=False, ) .items[0] ) else: raise exc elif k8s_obj["kind"] == "Secret": if pod_template_file is None: client.CoreV1Api(client.ApiClient()).create_namespaced_secret( body=k8s_obj, namespace=kernel_namespace ) elif k8s_obj["kind"] == "PersistentVolumeClaim": if pod_template_file is None: try: client.CoreV1Api( client.ApiClient() ).create_namespaced_persistent_volume_claim( body=k8s_obj, namespace=kernel_namespace ) except ApiException as exc: if _parse_k8s_exception(exc) == K8S_ALREADY_EXIST_REASON: pass else: raise exc elif k8s_obj["kind"] == "PersistentVolume": if pod_template_file is None: client.CoreV1Api(client.ApiClient()).create_persistent_volume(body=k8s_obj) elif k8s_obj["kind"] == "Service": if pod_template_file is None and pod_created is not None: # Create dependency between pod and service, useful to delete service when kernel stops k8s_obj["metadata"]["ownerReferences"] = [ { "apiVersion": "v1", "kind": "pod", "name": str(pod_created.metadata.name), "uid": str(pod_created.metadata.uid), } ] client.CoreV1Api(client.ApiClient()).create_namespaced_service( body=k8s_obj, namespace=kernel_namespace ) elif k8s_obj["kind"] == "ConfigMap": if pod_template_file is None and pod_created is not None: # Create dependency between pod and configmap, useful to delete service when kernel stops k8s_obj["metadata"]["ownerReferences"] = [ { "apiVersion": "v1", "kind": "pod", "name": str(pod_created.metadata.name), "uid": str(pod_created.metadata.uid), } ] client.CoreV1Api(client.ApiClient()).create_namespaced_config_map( body=k8s_obj, namespace=kernel_namespace ) else: sys.exit( f"ERROR - Unhandled Kubernetes object kind '{k8s_obj['kind']}' found in yaml file - " f"kernel launch terminating!" ) else: print("ERROR processing Kubernetes yaml file - kernel launch terminating!") print(k8s_yaml) sys.exit( f"ERROR - Unknown Kubernetes object '{k8s_obj}' found in yaml file - kernel launch terminating!" ) if pod_template_file: # TODO - construct other --conf options for things like mounts, resources, etc. # write yaml to file... with open(pod_template_file, "w") as stream: yaml.dump(pod_template, stream) # Build up additional spark options. Note the trailing space to accommodate concatenation additional_spark_opts = ( f"--conf spark.kubernetes.driver.podTemplateFile={pod_template_file} " f"--conf spark.kubernetes.executor.podTemplateFile={pod_template_file} " ) additional_spark_opts += _get_spark_resources(pod_template) if spark_opts_out: with open(spark_opts_out, "w+") as soo_fd: soo_fd.write(additional_spark_opts) else: # If no spark_opts_out was specified, print to stdout in case this is an old caller print(additional_spark_opts) def _get_spark_resources(pod_template: Dict) -> str: # Gather up resources for cpu/memory requests/limits. Since gpus require a "discovery script" # we'll leave that alone for now: # https://spark.apache.org/docs/latest/running-on-kubernetes.html#resource-allocation-and-configuration-overview # # The config value names below are pulled from: # https://spark.apache.org/docs/latest/running-on-kubernetes.html#container-spec spark_resources = "" containers = pod_template.get("spec", {}).get("containers", []) if containers: # We're just dealing with single-container pods at this time. resources = containers[0].get("resources", {}) if resources: requests = resources.get("requests", {}) if requests: cpu_request = requests.get("cpu") if cpu_request: spark_resources += ( f"--conf spark.driver.cores={cpu_request} " f"--conf spark.executor.cores={cpu_request} " ) memory_request = requests.get("memory") if memory_request: spark_resources += ( f"--conf spark.driver.memory={memory_request} " f"--conf spark.executor.memory={memory_request} " ) limits = resources.get("limits", {}) if limits: cpu_limit = limits.get("cpu") if cpu_limit: spark_resources += ( f"--conf spark.kubernetes.driver.limit.cores={cpu_limit} " f"--conf spark.kubernetes.executor.limit.cores={cpu_limit} " ) memory_limit = limits.get("memory") if memory_limit: spark_resources += ( f"--conf spark.driver.memory={memory_limit} " f"--conf spark.executor.memory={memory_limit} " ) return spark_resources if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--kernel-id", dest="kernel_id", nargs="?", help="Indicates the id associated with the launched kernel.", ) parser.add_argument( "--port-range", dest="port_range", nargs="?", metavar="..", help="Port range to impose for kernel ports", ) parser.add_argument( "--response-address", dest="response_address", nargs="?", metavar=":", help="Connection address (:) for returning connection file", ) parser.add_argument( "--public-key", dest="public_key", nargs="?", help="Public key used to encrypt connection information", ) parser.add_argument( "--spark-context-initialization-mode", dest="spark_context_init_mode", nargs="?", help="Indicates whether or how a spark context should be created", ) parser.add_argument( "--pod-template", dest="pod_template_file", nargs="?", metavar="template filename", help="When present, yaml is written to file, no launch performed.", ) parser.add_argument( "--spark-opts-out", dest="spark_opts_out", nargs="?", metavar="additional spark options filename", help="When present, additional spark options are written to file, " "no launch performed, requires --pod-template.", ) parser.add_argument( "--kernel-class-name", dest="kernel_class_name", nargs="?", help="Indicates the name of the kernel class to use. Must be a subclass of 'ipykernel.kernelbase.Kernel'.", ) # The following arguments are deprecated and will be used only if their mirroring arguments have no value. # This means that the default value for --spark-context-initialization-mode (none) will need to come from # the mirrored args' default until deprecated item has been removed. parser.add_argument( "--RemoteProcessProxy.kernel-id", dest="rpp_kernel_id", nargs="?", help="Indicates the id associated with the launched kernel. (deprecated)", ) parser.add_argument( "--RemoteProcessProxy.port-range", dest="rpp_port_range", nargs="?", metavar="..", help="Port range to impose for kernel ports (deprecated)", ) parser.add_argument( "--RemoteProcessProxy.response-address", dest="rpp_response_address", nargs="?", metavar=":", help="Connection address (:) for returning connection file (deprecated)", ) parser.add_argument( "--RemoteProcessProxy.public-key", dest="rpp_public_key", nargs="?", help="Public key used to encrypt connection information (deprecated)", ) parser.add_argument( "--RemoteProcessProxy.spark-context-initialization-mode", dest="rpp_spark_context_init_mode", nargs="?", help="Indicates whether or how a spark context should be created (deprecated)", default="none", ) arguments = vars(parser.parse_args()) kernel_id = arguments["kernel_id"] or arguments["rpp_kernel_id"] port_range = arguments["port_range"] or arguments["rpp_port_range"] response_addr = arguments["response_address"] or arguments["rpp_response_address"] public_key = arguments["public_key"] or arguments["rpp_public_key"] spark_context_init_mode = ( arguments["spark_context_init_mode"] or arguments["rpp_spark_context_init_mode"] ) pod_template_file = arguments["pod_template_file"] spark_opts_out = arguments["spark_opts_out"] kernel_class_name = arguments["kernel_class_name"] launch_kubernetes_kernel( kernel_id, port_range, response_addr, public_key, spark_context_init_mode, pod_template_file, spark_opts_out, kernel_class_name, ) ================================================ FILE: etc/kernel-launchers/operators/scripts/launch_custom_resource.py ================================================ #!/opt/conda/bin/python """Launch a custom operator resource.""" import argparse import os import re import sys import urllib3 import yaml from jinja2 import Environment, FileSystemLoader, select_autoescape from kubernetes import client, config urllib3.disable_warnings() YAML_PARSED_KERNEL_VARS = {"KERNEL_VOLUME_MOUNTS", "KERNEL_VOLUMES"} def yaml_safe_str(value): """Escape a value for safe inclusion in a YAML template. Uses PyYAML's own serializer to produce properly escaped output: - Strings are double-quoted with special characters escaped. - Dicts/lists are serialized as YAML flow mappings/sequences. - None, bools, and numbers are serialized to their YAML-canonical form. """ if isinstance(value, str): return yaml.dump(value, default_style='"', width=10000).strip() if isinstance(value, (dict, list)): return yaml.dump(value, default_flow_style=True, width=10000).strip() # yaml.dump appends a document-end marker ("...\n") for scalars; strip it return yaml.dump(value, width=10000).replace("\n...", "").strip() def generate_kernel_custom_resource_yaml(kernel_crd_template, keywords): """Generate the kernel custom resource yaml given a template.""" j_env = Environment( loader=FileSystemLoader(os.path.dirname(__file__)), trim_blocks=True, lstrip_blocks=True, autoescape=select_autoescape( disabled_extensions=( "j2", "yaml", ), default_for_string=True, default=True, ), ) j_env.filters["yaml_safe"] = yaml_safe_str k8s_yaml = j_env.get_template("/" + kernel_crd_template + ".yaml.j2").render(**keywords) return k8s_yaml def extend_operator_env(op_def: dict, sub_spec: str) -> dict: """Extends the op_def.spec.sub_spec.env stanza with current environment.""" env_stanza = op_def["spec"][sub_spec].get("env") or [] # Walk current set of template env entries and replace those found in the current # env with their values (and record those items). Then add all others from the env # that were not already. processed_entries: list[str] = [] for item in env_stanza: item_name = item.get("name") if item_name in os.environ: item["value"] = os.environ[item_name] processed_entries.append(item_name) for name, value in os.environ.items(): if name not in processed_entries: env_stanza.append({"name": name, "value": value}) op_def["spec"][sub_spec]["env"] = env_stanza return op_def def launch_custom_resource_kernel( kernel_id, port_range, response_addr, public_key, spark_context_init_mode ): """Launch a custom resource kernel.""" config.load_incluster_config() keywords = {} keywords["eg_port_range"] = port_range keywords["eg_public_key"] = public_key keywords["eg_response_address"] = response_addr keywords["kernel_id"] = kernel_id keywords["kernel_name"] = os.path.basename( os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ) keywords["spark_context_initialization_mode"] = spark_context_init_mode # Only parse YAML for variables that legitimately carry structured data (lists/dicts); # treat all others as raw strings to prevent YAML injection attacks. for name, value in os.environ.items(): if name.startswith("KERNEL_"): if name in YAML_PARSED_KERNEL_VARS: parsed = yaml.safe_load(value) if not isinstance(parsed, list) or not all( isinstance(item, dict) for item in parsed ): sys.exit( f"ERROR - {name} must be a YAML list of mappings - " f"kernel launch terminating!" ) keywords[name.lower()] = parsed else: keywords[name.lower()] = value kernel_crd_template = keywords["kernel_crd_group"] + "-" + keywords["kernel_crd_version"] if not re.match(r'^[a-z0-9][a-z0-9.\-]*-v[a-z0-9]+$', kernel_crd_template): sys.exit( f"ERROR - Invalid CRD template name: {kernel_crd_template} - kernel launch terminating!" ) custom_resource_yaml = generate_kernel_custom_resource_yaml(kernel_crd_template, keywords) kernel_namespace = keywords["kernel_namespace"] group = keywords["kernel_crd_group"] version = keywords["kernel_crd_version"] plural = keywords["kernel_crd_plural"] custom_resource_object = yaml.safe_load(custom_resource_yaml) if not isinstance(custom_resource_object, dict) or "kind" not in custom_resource_object: sys.exit( "ERROR - Rendered CRD manifest is not a valid single-document YAML - kernel launch terminating!" ) if group == "sparkoperator.k8s.io": extend_operator_env(custom_resource_object, "driver") extend_operator_env(custom_resource_object, "executor") try: client.CustomObjectsApi().create_namespaced_custom_object( group, version, kernel_namespace, plural, custom_resource_object ) except client.exceptions.ApiException as ex: if ex.status == 404: sys.exit( "\nERROR: The Kubernetes Operator for Apache Spark does not appear to be installed. " "See 'https://github.com/GoogleCloudPlatform/spark-on-k8s-operator#installation' for " "instructions, then retry the operation.\n" ) else: print("ERROR processing Kubernetes Operator CRD - kernel launch terminating!") print(custom_resource_yaml) raise ex if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--kernel-id", "--RemoteProcessProxy.kernel-id", dest="kernel_id", nargs="?", help="Indicates the id associated with the launched kernel.", ) parser.add_argument( "--port-range", "--RemoteProcessProxy.port-range", dest="port_range", nargs="?", metavar="..", help="Port range to impose for kernel ports", ) parser.add_argument( "--response-address", "--RemoteProcessProxy.response-address", dest="response_address", nargs="?", metavar=":", help="Connection address (:) for returning connection file", ) parser.add_argument( "--public-key", "--RemoteProcessProxy.public-key", dest="public_key", nargs="?", help="Public key used to encrypt connection information", ) parser.add_argument( "--spark-context-initialization-mode", "--RemoteProcessProxy.spark-context-initialization-mode", dest="spark_context_init_mode", nargs="?", help="Indicates whether or how a spark context should be created", default="none", ) arguments = vars(parser.parse_args()) kernel_id = arguments["kernel_id"] port_range = arguments["port_range"] response_addr = arguments["response_address"] public_key = arguments["public_key"] spark_context_init_mode = arguments["spark_context_init_mode"] launch_custom_resource_kernel( kernel_id, port_range, response_addr, public_key, spark_context_init_mode ) ================================================ FILE: etc/kernel-launchers/operators/scripts/sparkoperator.k8s.io-v1beta2.yaml.j2 ================================================ apiVersion: "sparkoperator.k8s.io/v1beta2" kind: SparkApplication metadata: name: {{ kernel_resource_name | yaml_safe }} spec: restartPolicy: type: Never type: Python pythonVersion: "3" sparkVersion: 2.4.5 image: {{ kernel_image | yaml_safe }} mainApplicationFile: "local:///usr/local/bin/kernel-launchers/python/scripts/launch_ipykernel.py" arguments: - "--kernel-id" - {{ kernel_id | yaml_safe }} - "--spark-context-initialization-mode" - {{ spark_context_initialization_mode | yaml_safe }} - "--response-address" - {{ eg_response_address | yaml_safe }} - "--port-range" - {{ eg_port_range | yaml_safe }} - "--public-key" - {{ eg_public_key | yaml_safe }} driver: annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: "false" env: # Add any custom envs here that aren't already configured for the kernel's environment # Note: For envs to flow to the pods, the webhook server must be enabled during deployment # e.g., helm install my-release spark-operator/spark-operator --namespace spark-operator --set webhook.enable=true # - name: MY_DRIVER_ENV # value: "my_driver_value" serviceAccount: {{ kernel_service_account_name | yaml_safe }} labels: kernel_id: {{ kernel_id | yaml_safe }} app: enterprise-gateway component: kernel cores: 1 coreLimit: 1000m memory: 1g volumeMounts: {% if kernel_volume_mounts is defined %} {% for mount in kernel_volume_mounts %} - {{ mount | yaml_safe }} {% endfor %} {% endif %} volumes: {% if kernel_volumes is defined %} {% for volume in kernel_volumes %} - {{ volume | yaml_safe }} {% endfor %} {% endif %} executor: env: # Add any custom envs here that aren't already configured for the kernel's environment # Note: For envs to flow to the pods, the webhook server must be enabled during deployment # e.g., helm install my-release spark-operator/spark-operator --namespace spark-operator --set webhook.enable=true # - name: MY_EXECUTOR_ENV # value: "my_executor_value" labels: kernel_id: {{ kernel_id | yaml_safe }} app: enterprise-gateway component: worker image: {{ kernel_executor_image | yaml_safe }} instances: 2 cores: 1 coreLimit: 1000m memory: 1g volumeMounts: {% if kernel_volume_mounts is defined %} {% for mount in kernel_volume_mounts %} - {{ mount | yaml_safe }} {% endfor %} {% endif %} volumes: {% if kernel_volumes is defined %} {% for volume in kernel_volumes %} - {{ volume | yaml_safe }} {% endfor %} {% endif %} {% if kernel_sparkapp_config_map %} sparkConfigMap: {{ kernel_sparkapp_config_map | yaml_safe }} {% endif %} ================================================ FILE: etc/kernel-launchers/python/scripts/launch_ipykernel.py ================================================ """Launch an ipython kernel.""" import argparse import base64 import json import logging import os import random import signal import socket import tempfile import uuid from multiprocessing import Process from threading import Thread from Cryptodome.Cipher import AES, PKCS1_v1_5 from Cryptodome.PublicKey import RSA from Cryptodome.Random import get_random_bytes from Cryptodome.Util.Padding import pad from jupyter_client.connect import write_connection_file LAUNCHER_VERSION = 1 # Indicate to server the version of this launcher (payloads may vary) # Minimum port range size and max retries, let EG_ env values act as the default for b/c purposes min_port_range_size = int( os.getenv("MIN_PORT_RANGE_SIZE", os.getenv("EG_MIN_PORT_RANGE_SIZE", "1000")) ) max_port_range_retries = int( os.getenv("MAX_PORT_RANGE_RETRIES", os.getenv("EG_MAX_PORT_RANGE_RETRIES", "5")) ) log_level = os.getenv("LOG_LEVEL", os.getenv("EG_LOG_LEVEL", "10")) log_level = int(log_level) if log_level.isdigit() else log_level logging.basicConfig(format="[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s] %(message)s") logger = logging.getLogger("launch_ipykernel") logger.setLevel(log_level) DEFAULT_KERNEL_CLASS_NAME = "ipykernel.ipkernel.IPythonKernel" __spark_context = None class ExceptionThread(Thread): """Wrap thread to handle the exception.""" def __init__(self, target): """Initialize the thread.""" self.target = target self.exc = None Thread.__init__(self) def run(self): """Run the thread.""" try: self.target() except Exception as exc: self.exc = exc def initialize_namespace(namespace, cluster_type="spark"): """Initialize the kernel namespace. Parameters ---------- cluster_type : {'spark', 'dask', 'none'} The cluster type to initialize. ``'none'`` results in no variables in the initial namespace. """ if cluster_type == "spark": try: from pyspark.sql import SparkSession except ImportError: logger.info( "A spark context was desired but the pyspark distribution is not present. " "Spark context creation will not occur." ) return def initialize_spark_session(): import atexit """Initialize Spark session and replace global variable placeholders with real Spark session object references.""" spark = SparkSession.builder.getOrCreate() global __spark_context __spark_context = spark.sparkContext # Stop the spark session on exit atexit.register(lambda: spark.stop()) namespace.update( { "spark": spark, "sc": spark.sparkContext, "sql": spark.sql, "sqlContext": spark._wrapped, "sqlCtx": spark._wrapped, } ) init_thread = ExceptionThread(target=initialize_spark_session) spark = WaitingForSparkSessionToBeInitialized("spark", init_thread, namespace) sc = WaitingForSparkSessionToBeInitialized("sc", init_thread, namespace) sqlContext = WaitingForSparkSessionToBeInitialized("sqlContext", init_thread, namespace) def sql(query): """Placeholder function. When called will wait for Spark session to be initialized and call ``spark.sql(query)``""" return spark.sql(query) namespace.update( {"spark": spark, "sc": sc, "sql": sql, "sqlContext": sqlContext, "sqlCtx": sqlContext} ) init_thread.start() elif cluster_type == "dask": import dask_yarn cluster = dask_yarn.YarnCluster.from_current() namespace.update({"cluster": cluster}) elif cluster_type != "none": raise RuntimeError("Unknown cluster_type: %r" % cluster_type) class WaitingForSparkSessionToBeInitialized: """Wrapper object for SparkContext and other Spark session variables while the real Spark session is being initialized in a background thread. The class name is intentionally worded verbosely explicit as it will show up when executing a cell that contains only a Spark session variable like ``sc`` or ``sqlContext``. """ # private and public attributes that show up for tab completion, # to indicate pending initialization of Spark session _WAITING_FOR_SPARK_SESSION_TO_BE_INITIALIZED = "Spark Session not yet initialized ..." WAITING_FOR_SPARK_SESSION_TO_BE_INITIALIZED = "Spark Session not yet initialized ..." # the same wrapper class is used for all Spark session variables, so we need to record the name of the variable def __init__(self, global_variable_name, init_thread, namespace): """Initialize the waiter.""" self._spark_session_variable = global_variable_name self._init_thread = init_thread self._namespace = namespace # we intercept all method and attribute references on our temporary Spark session variable, # wait for the thread to complete initializing the Spark sessions and then we forward the # call to the real Spark objects def __getattr__(self, name): """Handle attribute getter.""" # ignore tab-completion request for __members__ or __methods__ and ignore meta property requests if name.startswith("__") or name.startswith("_ipython_") or name.startswith("_repr_"): return else: # wait on thread to initialize the Spark session variables in global variable scope self._init_thread.join(timeout=None) exc = self._init_thread.exc if exc: msg = f"Variable: {self._spark_session_variable} was not initialized properly." raise RuntimeError(msg) from exc # now return attribute/function reference from actual Spark object return getattr(self._namespace[self._spark_session_variable], name) def _validate_port_range(port_range): # if no argument was provided, return a range of 0 if not port_range: return 0, 0 try: port_ranges = port_range.split("..") lower_port = int(port_ranges[0]) upper_port = int(port_ranges[1]) port_range_size = upper_port - lower_port if port_range_size != 0 and port_range_size < min_port_range_size: msg = ( f"Port range validation failed for range: '{port_range}'. Range size must be at least " f"{min_port_range_size} as specified by env EG_MIN_PORT_RANGE_SIZE" ) raise RuntimeError(msg) from None except ValueError as ve: msg = f"Port range validation failed for range: '{port_range}'. Error was: {ve}" raise RuntimeError(msg) from None except IndexError as ie: msg = f"Port range validation failed for range: '{port_range}'. Error was: {ie}" raise RuntimeError(msg) from None return lower_port, upper_port def determine_connection_file(conn_file, kid): """If the directory exists, use the original file, else create a temporary file.""" if conn_file is None or not os.path.exists(os.path.dirname(conn_file)): if kid is not None: basename = "kernel-" + kid else: basename = os.path.splitext(os.path.basename(conn_file))[0] fd, conn_file = tempfile.mkstemp(suffix=".json", prefix=basename + "_") os.close(fd) logger.debug(f"Using connection file '{conn_file}'.") return conn_file def _encrypt(connection_info_str, public_key): """Encrypt the connection information using a generated AES key that is then encrypted using the public key passed from the server. Both are then returned in an encoded JSON payload. This code also exists in the R kernel-launcher's server_listener.py script. """ aes_key = get_random_bytes(16) cipher = AES.new(aes_key, mode=AES.MODE_ECB) # Encrypt the connection info using the aes_key encrypted_connection_info = cipher.encrypt(pad(connection_info_str, 16)) b64_connection_info = base64.b64encode(encrypted_connection_info) # Encrypt the aes_key using the server's public key imported_public_key = RSA.importKey(base64.b64decode(public_key.encode())) cipher = PKCS1_v1_5.new(key=imported_public_key) encrypted_key = base64.b64encode(cipher.encrypt(aes_key)) # Compose the payload and Base64 encode it payload = { "version": LAUNCHER_VERSION, "key": encrypted_key.decode(), "conn_info": b64_connection_info.decode(), } b64_payload = base64.b64encode(json.dumps(payload).encode(encoding="utf-8")) return b64_payload def return_connection_info( connection_file, response_addr, lower_port, upper_port, kernel_id, public_key ): """Returns the connection information corresponding to this kernel. This code also exists in the R kernel-launcher's server_listener.py script. """ response_parts = response_addr.split(":") if len(response_parts) != 2: logger.error( f"Invalid format for response address '{response_addr}'. Assuming 'pull' mode..." ) return response_ip = response_parts[0] try: response_port = int(response_parts[1]) except ValueError: logger.error( f"Invalid port component found in response address '{response_addr}'. Assuming 'pull' mode..." ) return with open(connection_file) as fp: cf_json = json.load(fp) fp.close() # add process and process group ids into connection info pid = os.getpid() cf_json["pid"] = pid cf_json["pgid"] = os.getpgid(pid) # prepare socket address for handling signals comm_sock = prepare_comm_socket(lower_port, upper_port) cf_json["comm_port"] = comm_sock.getsockname()[1] cf_json["kernel_id"] = kernel_id with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.connect((response_ip, response_port)) json_content = json.dumps(cf_json).encode(encoding="utf-8") logger.debug(f"JSON Payload '{json_content}") payload = _encrypt(json_content, public_key) logger.debug(f"Encrypted Payload '{payload}") s.send(payload) return comm_sock def prepare_comm_socket(lower_port, upper_port): """Prepares the socket to which the server will send signal and shutdown requests. This code also exists in the R kernel-launcher's server_listener.py script. """ sock = _select_socket(lower_port, upper_port) logger.info( f"Signal socket bound to host: {sock.getsockname()[0]}, port: {sock.getsockname()[1]}" ) sock.listen(1) sock.settimeout(5) return sock def _select_ports(count, lower_port, upper_port): """Select and return n random ports that are available and adhere to the given port range, if applicable. This code also exists in the R kernel-launcher's server_listener.py script. """ ports = [] sockets = [] for _ in range(count): sock = _select_socket(lower_port, upper_port) ports.append(sock.getsockname()[1]) sockets.append(sock) for sock in sockets: sock.close() return ports def _select_socket(lower_port, upper_port): """Create and return a socket whose port is available and adheres to the given port range, if applicable. This code also exists in the R kernel-launcher's server_listener.py script. """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) found_port = False retries = 0 while not found_port: try: sock.bind(("0.0.0.0", _get_candidate_port(lower_port, upper_port))) # noqa found_port = True except Exception: retries = retries + 1 if retries > max_port_range_retries: msg = ( f"Failed to locate port within range {lower_port}..{upper_port} " f"after {max_port_range_retries} retries!" ) raise RuntimeError(msg) from None return sock def _get_candidate_port(lower_port, upper_port): """Returns a port within the given range. If the range is zero, the zero is returned. This code also exists in the R kernel-launcher's server_listener.py script. """ range_size = upper_port - lower_port if range_size == 0: return 0 return random.randint(lower_port, upper_port) def get_server_request(sock): """Gets a request from the server and returns the corresponding dictionary. This code also exists in the R kernel-launcher's server_listener.py script. """ conn = None data = "" request_info = None try: conn, addr = sock.accept() while True: buffer = conn.recv(1024).decode("utf-8") if not buffer: # send is complete request_info = json.loads(data) break data = data + buffer # append what we received until we get no more... except Exception as e: if type(e) is not socket.timeout: raise e finally: if conn: conn.close() return request_info def cancel_spark_jobs(sig, frame): """Cancel spark jobs.""" if __spark_context is None: return try: __spark_context.cancelAllJobs() except Exception as e: if e.__class__.__name__ == "Py4JError": try: __spark_context.cancelAllJobs() except Exception as ex: print( f"Error occurred while re-attempting Spark job cancellation when interrupting the kernel: {ex}" ) else: print( f"Error occurred while attempting Spark job cancellation when interrupting the kernel: {e}" ) def server_listener(sock, parent_pid, cluster_type): """Waits for requests from the server and processes each when received. Currently, these will be one of a sending a signal to the corresponding kernel process (signum) or stopping the listener and exiting the kernel (shutdown). This code also exists in the R kernel-launcher's server_listener.py script. """ shutdown = False while not shutdown: request = get_server_request(sock) if request: signum = -1 # prevent logging poll requests since that occurs every 3 seconds if request.get("signum") is not None: signum = int(request.get("signum")) os.kill(parent_pid, signum) if signum == 2 and cluster_type == "spark": os.kill(parent_pid, signal.SIGUSR2) if request.get("shutdown") is not None: shutdown = bool(request.get("shutdown")) if signum != 0: logger.info(f"server_listener got request: {request}") def import_item(name): """Import and return ``bar`` given the string ``foo.bar``. Calling ``bar = import_item("foo.bar")`` is the functional equivalent of executing the code ``from foo import bar``. Parameters ---------- name : string The fully qualified name of the module/package being imported. Returns ------- mod : module object The module that was imported. """ parts = name.rsplit(".", 1) if len(parts) == 2: # called with 'foo.bar....' package, obj = parts module = __import__(package, fromlist=[obj]) try: pak = getattr(module, obj) except AttributeError: raise ImportError("No module named %s" % obj) from None return pak else: # called with un-dotted string return __import__(parts[0]) def start_ipython( namespace, cluster_type="spark", kernel_class_name=DEFAULT_KERNEL_CLASS_NAME, **kwargs ): """Start the ipython kernel.""" from ipykernel.kernelapp import IPKernelApp # Capture the kernel class before removing 'import_item' from the namespace kernel_class = import_item(kernel_class_name) # create an initial list of variables to clear # we do this without deleting to preserve the locals so that # initialize_namespace isn't affected by this mutation to_delete = [k for k in namespace if not k.startswith("__")] # initialize the namespace with the proper variables initialize_namespace(namespace, cluster_type=cluster_type) # delete the extraneous variables for k in to_delete: del namespace[k] # Start the kernel. app = IPKernelApp.instance(kernel_class=kernel_class, user_ns=namespace, **kwargs) app.initialize([]) app.start() # cleanup conn_file = kwargs["connection_file"] try: import os # re-import os since it's removed during namespace manipulation during startup os.remove(conn_file) except Exception as e: print(f"Could not delete connection file '{conn_file}' at exit due to error: {e}") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--response-address", dest="response_address", nargs="?", metavar=":", help="Connection address (:) for returning connection file", ) parser.add_argument( "--kernel-id", dest="kernel_id", nargs="?", help="Indicates the id associated with the launched kernel.", ) parser.add_argument( "--public-key", dest="public_key", nargs="?", help="Public key used to encrypt connection information", ) parser.add_argument( "--port-range", dest="port_range", nargs="?", metavar="..", help="Port range to impose for kernel ports", ) parser.add_argument( "--spark-context-initialization-mode", dest="init_mode", nargs="?", help="the initialization mode of the spark context: lazy, eager or none", ) parser.add_argument( "--cluster-type", dest="cluster_type", nargs="?", help="the kind of cluster to initialize: spark, dask, or none", ) parser.add_argument( "--kernel-class-name", dest="kernel_class_name", nargs="?", default=DEFAULT_KERNEL_CLASS_NAME, help="Indicates the name of the kernel class to use. Must be a subclass of 'ipykernel.kernelbase.Kernel'.", ) # The following arguments are deprecated and will be used only if their mirroring arguments have no value. # This means that the default values for --spark-context-initialization-mode (none) and --cluster-type (spark) # will need to come from the mirrored args' default until deprecated items have been removed. parser.add_argument( "connection_file", nargs="?", help="Connection file to write connection info (deprecated)" ) parser.add_argument( "--RemoteProcessProxy.response-address", dest="rpp_response_address", nargs="?", metavar=":", help="Connection address (:) for returning connection file (deprecated)", ) parser.add_argument( "--RemoteProcessProxy.kernel-id", dest="rpp_kernel_id", nargs="?", help="Indicates the id associated with the launched kernel. (deprecated)", ) parser.add_argument( "--RemoteProcessProxy.public-key", dest="rpp_public_key", nargs="?", help="Public key used to encrypt connection information (deprecated)", ) parser.add_argument( "--RemoteProcessProxy.port-range", dest="rpp_port_range", nargs="?", metavar="..", help="Port range to impose for kernel ports (deprecated)", ) parser.add_argument( "--RemoteProcessProxy.spark-context-initialization-mode", dest="rpp_init_mode", nargs="?", default="none", help="the initialization mode of the spark context: lazy, eager or none (deprecated)", ) parser.add_argument( "--RemoteProcessProxy.cluster-type", dest="rpp_cluster_type", nargs="?", default="spark", help="the kind of cluster to initialize: spark, dask, or none (deprecated)", ) arguments = vars(parser.parse_args()) connection_file = arguments["connection_file"] response_addr = arguments["response_address"] or arguments["rpp_response_address"] kernel_id = arguments["kernel_id"] or arguments["rpp_kernel_id"] public_key = arguments["public_key"] or arguments["rpp_public_key"] lower_port, upper_port = _validate_port_range( arguments["port_range"] or arguments["rpp_port_range"] ) spark_init_mode = arguments["init_mode"] or arguments["rpp_init_mode"] cluster_type = arguments["cluster_type"] or arguments["rpp_cluster_type"] kernel_class_name = arguments["kernel_class_name"] ip = "0.0.0.0" # noqa if connection_file is None and kernel_id is None: msg = "At least one of the parameters: 'connection_file' or '--kernel-id' must be provided!" raise RuntimeError(msg) if kernel_id is None: msg = "Parameter '--kernel-id' must be provided!" raise RuntimeError(msg) if public_key is None: msg = "Parameter '--public-key' must be provided!" raise RuntimeError(msg) # Initialize the kernel namespace for the given cluster type if cluster_type == "spark" and spark_init_mode == "none": cluster_type = "none" # If the connection file doesn't exist, then create it. if (connection_file and not os.path.isfile(connection_file)) or kernel_id is not None: key = str(uuid.uuid4()).encode() # convert to bytes connection_file = determine_connection_file(connection_file, kernel_id) ports = _select_ports(5, lower_port, upper_port) write_connection_file( fname=connection_file, ip=ip, key=key, shell_port=ports[0], iopub_port=ports[1], stdin_port=ports[2], hb_port=ports[3], control_port=ports[4], ) if response_addr: comm_socket = return_connection_info( connection_file, response_addr, lower_port, upper_port, kernel_id, public_key ) if comm_socket: # socket in use, start server listener process server_listener_process = Process( target=server_listener, args=( comm_socket, os.getpid(), cluster_type, ), ) server_listener_process.start() if cluster_type == "spark": signal.signal(signal.SIGUSR2, cancel_spark_jobs) # launch the IPython kernel instance start_ipython( locals(), cluster_type=cluster_type, connection_file=connection_file, ip=ip, kernel_class_name=kernel_class_name, ) ================================================ FILE: etc/kernel-launchers/scala/toree-launcher/build.sbt ================================================ /* * Copyright (c) Jupyter Development Team. * Distributed under the terms of the Modified BSD License. */ name := "toree-launcher" version := sys.props.getOrElse("version", default = "1.0").replaceAll("dev[0-9]", "SNAPSHOT") scalaVersion := "2.12.12" resolvers += "Typesafe Repo" at "https://repo.typesafe.com/typesafe/releases/" /* resolvers += "Sonatype Repository" at "https://oss.sonatype.org/content/repositories/releases/" */ resolvers += "Sonatype Maven Central Mirror" at "https://maven-central.storage-download.googleapis.com/maven2/" libraryDependencies += "com.typesafe.play" %% "play-json" % "2.7.4" // Apache v2 libraryDependencies += "org.apache.toree" % "toree-assembly" % "0.5.0-incubating" ================================================ FILE: etc/kernel-launchers/scala/toree-launcher/project/build.properties ================================================ # # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. # sbt.version = 1.3.12 ================================================ FILE: etc/kernel-launchers/scala/toree-launcher/project/plugins.sbt ================================================ /* * Copyright (c) Jupyter Development Team. * Distributed under the terms of the Modified BSD License. */ logLevel := Level.Warn /* * Following plugins have a dependency on sbt v0.13 */ addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.5") addSbtPlugin("org.scalastyle" %% "scalastyle-sbt-plugin" % "1.0.0") ================================================ FILE: etc/kernel-launchers/scala/toree-launcher/project/scalastyle-config.xml ================================================ Scalastyle standard configuration true ARROW, EQUALS, ELSE, TRY, CATCH, FINALLY, LARROW, RARROW ARROW, EQUALS, COMMA, COLON, IF, ELSE, DO, WHILE, FOR, MATCH, TRY, CATCH, FINALLY, LARROW, RARROW ^println$ Class\.forName 800> 30 10 50 -1,0,1,2,3 ================================================ FILE: etc/kernel-launchers/scala/toree-launcher/src/main/scala/launcher/KernelProfile.scala ================================================ /** * Copyright (c) Jupyter Development Team. * Distributed under the terms of the Modified BSD License. */ package launcher import java.util.UUID.randomUUID import play.api.libs.json._ import scala.util.Random import launcher.utils.SocketUtils case class KernelProfile(hb_port : Int, control_port : Int, iopub_port : Int, stdin_port : Int, shell_port : Int, key : String, kernel_name : String, signature_scheme : String, transport : String, ip : String) object KernelProfile { def newKey() : String = randomUUID.toString def createJsonProfile(portLowerBound: Int = -1, portUpperBound: Int = -1) : String = { implicit val writes = Json.writes[KernelProfile] val newKernelProfile = new KernelProfile( hb_port = SocketUtils.findPort(portLowerBound, portUpperBound), control_port = SocketUtils.findPort(portLowerBound, portUpperBound), iopub_port = SocketUtils.findPort(portLowerBound, portUpperBound), stdin_port = SocketUtils.findPort(portLowerBound, portUpperBound), shell_port = SocketUtils.findPort(portLowerBound, portUpperBound), key = newKey(), kernel_name = "Apache Toree Scala", transport = "tcp", ip = "0.0.0.0", signature_scheme = "hmac-sha256" ) Json.prettyPrint(Json.toJson(newKernelProfile)) } } ================================================ FILE: etc/kernel-launchers/scala/toree-launcher/src/main/scala/launcher/ToreeLauncher.scala ================================================ /** * Copyright (c) Jupyter Development Team. * Distributed under the terms of the Modified BSD License. */ package launcher import java.io.{BufferedWriter, File, FileWriter, PrintStream} import java.nio.file.{Files, Paths} import java.net.{InetAddress, ServerSocket, Socket} import org.apache.toree.Main import play.api.libs.json._ import java.lang.management.ManagementFactory import scala.io.BufferedSource import scala.collection.mutable.ArrayBuffer import sun.misc.Signal import launcher.utils.{SecurityUtils, SocketUtils} import org.apache.toree.utils.LogLike object ToreeLauncher extends LogLike { val minPortRangeSize = sys.env.getOrElse("MIN_PORT_RANGE_SIZE", sys.env.getOrElse("EG_MIN_PORT_RANGE_SIZE", "1000")).toInt val kernelTempDir : String = "jupyter-kernel" var profilePath : String = _ var kernelId : String = _ var portLowerBound : Int = -1 var portUpperBound : Int = -1 var responseAddress : String = _ var publicKey : String = _ var alternateSigint : String = _ var initMode : String = "lazy" var toreeArgs = ArrayBuffer[String]() private def pathExists(filePath : String) : Boolean = if (filePath == null) false else Files.exists(Paths.get(filePath)) private def writeToFile(outputPath : String, content : String): Unit = { val file = new File(outputPath) if(!pathExists(file.getParentFile.toString)) { file.getParentFile.mkdirs // mkdir if not exists } val bw = new BufferedWriter(new FileWriter(file)) try{ bw.write(content) } finally { bw.close() } } private def initPortRange(portRange: String): Unit = { val ports = portRange.split("\\.\\.") this.portLowerBound = ports(0).toInt this.portUpperBound = ports(1).toInt logger.info("Port Range: lower bound ( %s ) / upper bound ( %s )" .format(this.portLowerBound, this.portUpperBound)) if (this.portLowerBound != this.portUpperBound) { // Range of zero disables port restrictions if (this.portLowerBound < 0 || this.portUpperBound < 0 || (this.portUpperBound - this.portLowerBound < minPortRangeSize)) { logger.error("Invalid port range, use --port-range .., " + "range must be >= MIN_PORT_RANGE_SIZE ($minPortRangeSize)") sys.exit(-1) } } } private def initArguments(args: Array[String]): Unit = { logger.info("Toree launcher arguments (initial):") args.foreach(logger.info(_)) logger.info("---------------------------") // Walk the arguments, collecting launcher options along the way and buildup a // new toree arguments list. There's got to be a better way to do this. var i = 0 while ( i < args.length ) { var arg: String = args(i) arg match { // Profile is a straight pass-thru to toree case "--profile" => i += 1 profilePath = args(i).trim toreeArgs += arg toreeArgs += profilePath // Alternate sigint is a straight pass-thru to toree case "--alternate-sigint" => i += 1 alternateSigint = args(i).trim toreeArgs += arg toreeArgs += alternateSigint // Initialization mode requires massaging for toree case "--spark-context-initialization-mode" | "--RemoteProcessProxy.spark-context-initialization-mode" => i += 1 initMode = args(i).trim initMode match { case "none" => toreeArgs += "--nosparkcontext" case _ => toreeArgs += "--spark-context-initialization-mode" toreeArgs += initMode } // Port range doesn't apply to toree, consume here case "--port-range" | "--RemoteProcessProxy.port-range" => i += 1 initPortRange(args(i).trim) // Response address doesn't apply to toree, consume here case "--response-address" | "--RemoteProcessProxy.response-address" => i += 1 responseAddress = args(i).trim // kernel id doesn't apply to toree, consume here case "--kernel-id" | "--RemoteProcessProxy.kernel-id" => i += 1 kernelId = args(i).trim // Public key doesn't apply to toree, consume here case "--public-key" | "--RemoteProcessProxy.public-key" => i += 1 publicKey = args(i).trim // All other arguments should pass-thru to toree case _ => toreeArgs += args(i).trim } i += 1 } } // Borrowed from toree to avoid dependency private def deleteDirRecur(file: File): Unit = { // delete directory recursively if (file != null){ if (file.isDirectory){ file.listFiles.foreach(deleteDirRecur) } if (file.exists){ file.delete } } } private def determineConnectionFile(connectionFile: String, kernelId: String): String = { // We know the connection file does not exist, so create a temporary directory // and derive the filename from kernelId, if not null. // If kernelId is null, then use the filename in the connectionFile. val tmpPath = Files.createTempDirectory(kernelTempDir) // tmpPath.toFile.deleteOnExit() doesn't appear to work, use system hook sys.addShutdownHook{ deleteDirRecur(tmpPath.toFile) } val fileName = if (kernelId != null) "kernel-" + kernelId + ".json" else Paths.get(connectionFile).getFileName.toString val newPath = Paths.get(tmpPath.toString, fileName) val newConnectionFile = newPath.toString // Locate --profile and replace next element with new name. If it doesn't exist, add both. val profileIndex = toreeArgs.indexOf("--profile") if (profileIndex >= 0) { toreeArgs(profileIndex + 1) = newConnectionFile } else { toreeArgs += "--profile" toreeArgs += newConnectionFile } newConnectionFile } private def getPID : String = { // Return the current process ID. If not an integer string, server will ignore. ManagementFactory.getRuntimeMXBean.getName.split('@')(0) } private def initProfile(args : Array[String]): ServerSocket = { var commSocket : ServerSocket = null initArguments(args) if (profilePath == null && kernelId == null){ logger.error("At least one of '--profile' or '--kernel-id' " + "must be provided - exiting!") sys.exit(-1) } if (kernelId == null) { logger.error("Parameter '--kernel-id' must be provided - exiting!") sys.exit(-1) } if (publicKey == null) { logger.error("Parameter '--public-key' must be provided - exiting!") sys.exit(-1) } if (!pathExists(profilePath)) { profilePath = determineConnectionFile(profilePath, kernelId) logger.info("The profile %s doesn't exist, now creating it...".format(profilePath)) val content = KernelProfile.createJsonProfile(this.portLowerBound, this.portUpperBound) writeToFile(profilePath, content) if (pathExists(profilePath)) { logger.info("%s saved".format(profilePath)) } else { logger.error("Failed to create: %s".format(profilePath)) sys.exit(-1) } var connectionJson = Json.parse(content) // Now need to also return the PID info in connection JSON connectionJson = connectionJson.as[JsObject] ++ Json.obj("pid" -> getPID) // Add kernelId connectionJson = connectionJson.as[JsObject] ++ Json.obj("kernel_id" -> kernelId) // Server wants to establish socket communication. Create socket and // convey port number back to the server. commSocket = SocketUtils.findSocket(this.portLowerBound, this.portUpperBound) connectionJson = connectionJson.as[JsObject] ++ Json.obj("comm_port" -> commSocket.getLocalPort) val jsonContent = Json.toJson(connectionJson).toString() if (responseAddress != null){ logger.info("JSON Payload: '%s'".format(jsonContent)) val payload = SecurityUtils.encrypt(publicKey, jsonContent) logger.info("Encrypted Payload: '%s'".format(payload)) SocketUtils.writeToSocket(responseAddress, payload) } } commSocket } private def getServerRequest(commSocket : ServerSocket): String = { val s = commSocket.accept() val data = new BufferedSource(s.getInputStream).getLines.mkString s.close() data } private def getReconciledSignalName(sigNum: Int): String = { // To raise the signal, we must map the signal number back to the appropriate // name as follows: Take the common case and assume interrupt and check if an // alternate interrupt signal has been given. If sigNum = 9, use "TERM", else // if no alternate has been provided use "INT". Note that use of SIGINT won't // get received because the JVM won't propagate to background threads, buy it's // the best we can do. We'll still issue a warning in the log. require(sigNum > 0, "sigNum must be greater than zero") if (sigNum == 9) "TERM" else { if (alternateSigint == null) { logger.warn("--alternate-sigint is not defined and signum %d has been " + "requested. Using SIGINT, which probably won't get received due to JVM " + "preventing interrupts on background processes. " + "Define --alternate-sigint using __TOREE_OPTS__." .format(sigNum)) "INT" } else alternateSigint } } private def serverListener(commSocket : ServerSocket): Unit = { var stop = false while (!stop) { val requestData = getServerRequest(commSocket) // Handle each of the requests. Note that we do not make an assumption that these are // mutually exclusive - although that will probably be the case for now. Over time, // this should probably get refactored into a) better scala and b) token/classes for // each request. val requestJson = Json.parse(requestData).as[JsObject].value // Signal the kernel... if ( requestJson.contains("signum")) { val sigNum = requestJson("signum").asInstanceOf[JsNumber].value.toInt if ( sigNum > 0 ) { // If sigNum anything but 0 (for poll), use Signal.raise(signal) to signal the kernel. val sigName = getReconciledSignalName(sigNum) val sigToRaise = new Signal(sigName) logger.info("Server listener raising signal: '%s' (%d) for signum: %d". format(sigToRaise.getName, sigToRaise.getNumber, sigNum)) Signal.raise(sigToRaise) } } // Stop the listener... if ( requestJson.contains("shutdown")) { val shutdown = requestJson("shutdown").asInstanceOf[JsNumber].value.toInt if ( shutdown == 1 ) { // The server has been instructed to shutdown the kernel, so let's stop // the listener so that it doesn't interfere with poll() calls. logger.info("Stopping server listener.") stop = true } } } } def main(args: Array[String]) { val commSocket = initProfile(args) // if commSocket is not null, start a thread to listen on socket if ( commSocket != null ){ val serverListenerThread = new Thread { override def run() { serverListener(commSocket) } } logger.info("Starting server listener...") serverListenerThread.start() } logger.info("Toree kernel arguments (final):") toreeArgs.foreach(logger.info(_)) logger.info("---------------------------") Main.main(toreeArgs.toArray) } } ================================================ FILE: etc/kernel-launchers/scala/toree-launcher/src/main/scala/launcher/utils/SecurityUtils.scala ================================================ /** * Copyright (c) Jupyter Development Team. * Distributed under the terms of the Modified BSD License. */ package launcher.utils import scala.util.Random import java.nio.charset.StandardCharsets import java.security.Key import java.security.KeyFactory import java.security.PublicKey import java.security.spec.X509EncodedKeySpec import java.util.Base64 import javax.crypto.Cipher import javax.crypto.spec.SecretKeySpec import play.api.libs.json._ import org.apache.toree.utils.LogLike case class Payload(key : String, conn_info : String, version : Int = 1) object Payload { def createJson(key: String, conn_info: String) : String = { implicit val writes = Json.writes[Payload] val newPayload = new Payload(key = key, conn_info = conn_info) Json.prettyPrint(Json.toJson(newPayload)) } } object SecurityUtils extends LogLike { def encrypt(publicKey: String, jsonContent: String): String = { // Generate an AES key and encrypt the connection information... logger.info("publicKey: %s".format(publicKey)) val random: Random = new Random() val preKey: Array[Byte] = new Array[Byte](16) random.nextBytes(preKey) logger.info("aes_key: '%s'".format(preKey)) val aesKey: Key = new SecretKeySpec(preKey, "AES") val aesCipher: Cipher = Cipher.getInstance("AES") aesCipher.init(Cipher.ENCRYPT_MODE, aesKey) val connInfo = Base64.getEncoder.encodeToString(aesCipher.doFinal(jsonContent.getBytes(StandardCharsets.UTF_8))) // Encrypt the AES key using the public key... val encodedPK: Array[Byte] = publicKey.getBytes(StandardCharsets.UTF_8) val b64Key = Base64.getDecoder.decode(encodedPK) val keySpec: X509EncodedKeySpec = new X509EncodedKeySpec(b64Key) val keyFactory: KeyFactory = KeyFactory.getInstance("RSA") val rsaKey: PublicKey = keyFactory.generatePublic(keySpec) val rsaCipher: Cipher = Cipher.getInstance("RSA") rsaCipher.init(Cipher.ENCRYPT_MODE, rsaKey) val key = Base64.getEncoder.encodeToString(rsaCipher.doFinal(aesKey.getEncoded())) Base64.getEncoder.encodeToString(Payload.createJson(key, connInfo).getBytes(StandardCharsets.UTF_8)) } } ================================================ FILE: etc/kernel-launchers/scala/toree-launcher/src/main/scala/launcher/utils/SocketUtils.scala ================================================ /** * Copyright (c) Jupyter Development Team. * Distributed under the terms of the Modified BSD License. */ package launcher.utils import java.io.PrintStream import java.net.{InetAddress, ServerSocket, Socket} import org.apache.toree.utils.LogLike import scala.util.Random object SocketUtils extends LogLike { val random: Random = new Random (System.currentTimeMillis) def writeToSocket(socketAddress : String, content : String): Unit = { val ipPort = socketAddress.split(":") if (ipPort.length == 2) { logger.info("Sending connection info to gateway at %s\n%s".format(socketAddress, content)) // scalastyle:off val ip = ipPort(0) val port = ipPort(1).toInt val s = new Socket(InetAddress.getByName(ip), port) val out = new PrintStream(s.getOutputStream) try { out.append(content) out.flush() } finally { s.close() } } else { logger.error("Invalid format for response address '%s'!".format(socketAddress)) // scalastyle:off } } def findPort(portLowerBound: Int, portUpperBound: Int): Int = { val socket = findSocket(portLowerBound, portUpperBound) val port = socket.getLocalPort logger.info("port %s is available".format(port)) // scalastyle:off // now Close the socket/port socket.close() logger.info("Port %s closed...".format(port)) // scalastyle:off port } def findSocket(portLowerBound: Int, portUpperBound: Int): ServerSocket = { var foundAvailable: Boolean = false var socket: ServerSocket = null while (foundAvailable == false) { val candidatePort = getCandidatePort(portLowerBound, portUpperBound) // try candidatePort - only display 'Trying...' if in range if ( candidatePort > 0 ) logger.info("Trying port %s ...".format(candidatePort)) // scalastyle:off try { socket = new ServerSocket(candidatePort) // return the socket to be used foundAvailable = true } catch { case _ : Throwable => logger.info("port %s is in use".format(candidatePort)) // scalastyle:off socket = null } } socket } private def getCandidatePort(portLowerBound: Int, portUpperBound: Int): Int = { val portRange = portUpperBound - portLowerBound if ( portRange <= 0 ) return 0 val port = portLowerBound + random.nextInt(portRange) port } } ================================================ FILE: etc/kernel-resources/ir/kernel.js ================================================ const cmd_key = /Mac/.test(navigator.platform) ? "Cmd" : "Ctrl"; const edit_actions = [ { name: "R Assign", shortcut: "Alt--", icon: "fa-long-arrow-left", help: "R: Inserts the left-assign operator (<-)", handler(cm) { cm.replaceSelection(" <- "); }, }, { name: "R Pipe", shortcut: `Shift-${cmd_key}-M`, icon: "fa-angle-right", help: "R: Inserts the magrittr pipe operator (%>%)", handler(cm) { cm.replaceSelection(" %>% "); }, }, { name: "R Help", shortcut: "F1", icon: "fa-book", help: "R: Shows the manpage for the item under the cursor", handler(cm, cell) { const { anchor, head } = cm.findWordAt(cm.getCursor()); const word = cm.getRange(anchor, head); const callbacks = cell.get_callbacks(); const options = { silent: false, store_history: false, stop_on_error: true, }; cell.last_msg_id = cell.notebook.kernel.execute( `help(\`${word}\`)`, callbacks, options, ); }, }, ]; const prefix = "irkernel"; function add_edit_shortcut(notebook, actions, keyboard_manager, edit_action) { const { name, shortcut, icon, help, handler } = edit_action; const action = { icon, help, help_index: "zz", handler: () => { const cell = notebook.get_selected_cell(); handler(cell.code_mirror, cell); }, }; const full_name = actions.register(action, name, prefix); Jupyter.keyboard_manager.edit_shortcuts.add_shortcut(shortcut, full_name); } function render_math(pager, html) { if (!html) return; const $container = pager.pager_element.find("#pager-container"); $container .find('p[style="text-align: center;"]') .map((i, e) => (e.outerHTML = `\\[${e.querySelector("i").innerHTML}\\]`)); $container.find("i").map((i, e) => (e.outerHTML = `\\(${e.innerHTML}\\)`)); MathJax.Hub.Queue(["Typeset", MathJax.Hub, $container[0]]); } define(["base/js/namespace"], ({ notebook, actions, keyboard_manager, pager, }) => ({ onload() { edit_actions.forEach((a) => add_edit_shortcut(notebook, actions, keyboard_manager, a), ); pager.events.on( "open_with_text.Pager", (event, { data: { "text/html": html } }) => render_math(pager, html), ); }, })); ================================================ FILE: etc/kernelspecs/R_docker/kernel.json ================================================ { "language": "R", "display_name": "R on Docker", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.docker_swarm.DockerSwarmProcessProxy", "config": { "image_name": "elyra/kernel-r:VERSION" } } }, "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/R_docker/scripts/launch_docker.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ================================================ FILE: etc/kernelspecs/R_kubernetes/kernel.json ================================================ { "language": "R", "display_name": "R on Kubernetes", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy", "config": { "image_name": "elyra/kernel-r:VERSION" } } }, "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/R_kubernetes/scripts/launch_kubernetes.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ================================================ FILE: etc/kernelspecs/dask_python_yarn_remote/bin/run.sh ================================================ #!/usr/bin/env bash if [ "${EG_IMPERSONATION_ENABLED}" = "True" ]; then IMPERSONATION_OPTS="--user ${KERNEL_USERNAME:-UNSPECIFIED}" USER_CLAUSE="as user ${KERNEL_USERNAME:-UNSPECIFIED}" else IMPERSONATION_OPTS="" USER_CLAUSE="on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}" fi echo echo "Starting IPython kernel for Dask ${USER_CLAUSE}" echo PROG_HOME="$(cd "`dirname "$0"`"/..; pwd)" set -x eval exec \ "${DASK_YARN_EXE}" submit \ "${DASK_OPTS}" \ "${IMPERSONATION_OPTS}" \ "${PROG_HOME}/scripts/launch_ipykernel.py" \ "${LAUNCH_OPTS}" \ "$@" set +x ================================================ FILE: etc/kernelspecs/dask_python_yarn_remote/kernel.json ================================================ { "language": "python", "display_name": "Dask - Python (YARN Remote Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy" }, "debugger": true }, "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "DASK_YARN_EXE": "/opt/conda/bin/dask-yarn", "DASK_OPTS": "--name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --environment python:///opt/conda/bin/python --temporary-security-credentials --deploy-mode remote", "LAUNCH_OPTS": "" }, "argv": [ "/usr/local/share/jupyter/kernels/dask_python_yarn_remote/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.cluster-type", "dask" ] } ================================================ FILE: etc/kernelspecs/python_distributed/kernel.json ================================================ { "display_name": "Python 3 (distributed)", "language": "python", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy" }, "debugger": true }, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_distributed/scripts/launch_ipykernel.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.spark-context-initialization-mode", "none" ] } ================================================ FILE: etc/kernelspecs/python_docker/kernel.json ================================================ { "language": "python", "display_name": "Python on Docker", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.docker_swarm.DockerSwarmProcessProxy", "config": { "image_name": "elyra/kernel-py:VERSION" } }, "debugger": true }, "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_docker/scripts/launch_docker.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ================================================ FILE: etc/kernelspecs/python_kubernetes/kernel.json ================================================ { "language": "python", "display_name": "Python on Kubernetes", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy", "config": { "image_name": "elyra/kernel-py:VERSION" } }, "debugger": true }, "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_kubernetes/scripts/launch_kubernetes.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ================================================ FILE: etc/kernelspecs/python_tf_docker/kernel.json ================================================ { "language": "python", "display_name": "Python on Docker with Tensorflow", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.docker_swarm.DockerSwarmProcessProxy", "config": { "image_name": "elyra/kernel-tf-py:VERSION" } }, "debugger": true }, "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_tf_docker/scripts/launch_docker.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ================================================ FILE: etc/kernelspecs/python_tf_gpu_docker/kernel.json ================================================ { "language": "python", "display_name": "Python on Docker with Tensorflow with GPUs", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.docker_swarm.DockerSwarmProcessProxy", "config": { "image_name": "elyra/kernel-tf-gpu-py:VERSION" } }, "debugger": true }, "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_tf_gpu_docker/scripts/launch_docker.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ================================================ FILE: etc/kernelspecs/python_tf_gpu_kubernetes/kernel.json ================================================ { "language": "python", "display_name": "Python on Kubernetes with Tensorflow with GPUs", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy", "config": { "image_name": "elyra/kernel-tf-gpu-py:VERSION" } }, "debugger": true }, "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_tf_gpu_kubernetes/scripts/launch_kubernetes.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ================================================ FILE: etc/kernelspecs/python_tf_kubernetes/kernel.json ================================================ { "language": "python", "display_name": "Python on Kubernetes with Tensorflow", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy", "config": { "image_name": "elyra/kernel-tf-py:VERSION" } }, "debugger": true }, "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/python_tf_kubernetes/scripts/launch_kubernetes.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ================================================ FILE: etc/kernelspecs/scala_docker/kernel.json ================================================ { "language": "scala", "display_name": "Scala on Docker", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.docker_swarm.DockerSwarmProcessProxy", "config": { "image_name": "elyra/kernel-scala:VERSION" } } }, "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/scala_docker/scripts/launch_docker.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ================================================ FILE: etc/kernelspecs/scala_kubernetes/kernel.json ================================================ { "language": "scala", "display_name": "Scala on Kubernetes", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy", "config": { "image_name": "elyra/kernel-scala:VERSION" } } }, "env": {}, "argv": [ "python", "/usr/local/share/jupyter/kernels/scala_kubernetes/scripts/launch_kubernetes.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ================================================ FILE: etc/kernelspecs/spark_R_conductor_cluster/bin/run.sh ================================================ #!/usr/bin/env bash if [ "${EG_IMPERSONATION_ENABLED}" = "True" ]; then IMPERSONATION_OPTS="--proxy-user ${KERNEL_USERNAME:-UNSPECIFIED}" USER_CLAUSE="as user ${KERNEL_USERNAME:-UNSPECIFIED}" else IMPERSONATION_OPTS="" USER_CLAUSE="on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}" fi echo echo "Starting IRkernel for Spark Cluster mode ${USER_CLAUSE}" echo if [ -z "${SPARK_HOME}" ]; then echo "SPARK_HOME must be set to the location of a Spark distribution!" exit 1 fi PROG_HOME="$(cd "`dirname "$0"`"/..; pwd)" # Add server_listener.py to files for spark-opts ADDITIONAL_OPTS="--files ${PROG_HOME}/scripts/server_listener.py" eval exec \ "${SPARK_HOME}/bin/spark-submit" \ "${SPARK_OPTS}" \ "${ADDITIONAL_OPTS}" \ "${IMPERSONATION_OPTS}" \ "${PROG_HOME}/scripts/launch_IRkernel.R" \ "${LAUNCH_OPTS}" \ "$@" ================================================ FILE: etc/kernelspecs/spark_R_conductor_cluster/kernel.json ================================================ { "language": "R", "display_name": "Spark R (Spark Cluster Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.conductor.ConductorClusterProcessProxy" } }, "env": { "SPARK_OPTS": "--name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.maxAppAttempts=1 ${KERNEL_EXTRA_SPARK_OPTS}", "LAUNCH_OPTS": "--customAppName ${KERNEL_ID}" }, "argv": [ "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.spark-context-initialization-mode", "eager" ] } ================================================ FILE: etc/kernelspecs/spark_R_kubernetes/bin/run.sh ================================================ #!/usr/bin/env bash if [ "${EG_IMPERSONATION_ENABLED}" = "True" ]; then # IMPERSONATION_OPTS="--proxy-user ${KERNEL_USERNAME:-UNSPECIFIED}" USER_CLAUSE="as user ${KERNEL_USERNAME:-UNSPECIFIED}" else # IMPERSONATION_OPTS="" USER_CLAUSE="on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}" fi echo echo "Starting IRkernel for Spark in Kubernetes mode ${USER_CLAUSE}" echo if [ -z "${SPARK_HOME}" ]; then echo "SPARK_HOME must be set to the location of a Spark distribution!" exit 1 fi if [ -z "${KERNEL_ID}" ]; then echo "KERNEL_ID must be set for discovery and lifecycle management!" exit 1 fi KERNEL_LAUNCHERS_DIR=${KERNEL_LAUNCHERS_DIR:-/usr/local/bin/kernel-launchers} PROG_HOME=${KERNEL_LAUNCHERS_DIR}/R EG_POD_TEMPLATE_DIR=${EG_POD_TEMPLATE_DIR:-/tmp} SCRIPTS_HOME="$(cd "`dirname "$0"`"/../scripts; pwd)" pod_template_file=${EG_POD_TEMPLATE_DIR}/kpt_${KERNEL_ID} spark_opts_out=${EG_POD_TEMPLATE_DIR}/spark_opts_${KERNEL_ID} python ${SCRIPTS_HOME}/launch_kubernetes.py $@ --pod-template=${pod_template_file} --spark-opts-out=${spark_opts_out} additional_spark_opts=`cat ${spark_opts_out}` SPARK_OPTS="${SPARK_OPTS} ${additional_spark_opts}" rm -f ${spark_opts_out} set -x eval exec \ "${SPARK_HOME}/bin/spark-submit" \ "${SPARK_OPTS}" \ "local://${PROG_HOME}/scripts/launch_IRkernel.R" \ "${LAUNCH_OPTS}" \ "$@" set +x ================================================ FILE: etc/kernelspecs/spark_R_kubernetes/kernel.json ================================================ { "language": "R", "display_name": "Spark - R (Kubernetes Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy", "config": { "image_name": "elyra/kernel-spark-r:VERSION", "executor_image_name": "elyra/kernel-spark-r:VERSION" } } }, "env": { "SPARK_HOME": "/opt/spark", "SPARK_OPTS": "--master k8s://https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT} --deploy-mode cluster --name ${KERNEL_USERNAME}-${KERNEL_ID} --conf spark.kubernetes.namespace=${KERNEL_NAMESPACE} --conf spark.kubernetes.driver.label.app=enterprise-gateway --conf spark.kubernetes.driver.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.driver.label.component=kernel --conf spark.kubernetes.executor.label.app=enterprise-gateway --conf spark.kubernetes.executor.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.executor.label.component=worker --conf spark.kubernetes.driver.container.image=${KERNEL_IMAGE} --conf spark.kubernetes.executor.container.image=${KERNEL_EXECUTOR_IMAGE} --conf spark.kubernetes.authenticate.driver.serviceAccountName=${KERNEL_SERVICE_ACCOUNT_NAME} --conf spark.kubernetes.submission.waitAppCompletion=false --conf spark.kubernetes.driverEnv.HTTP2_DISABLE=true ${KERNEL_EXTRA_SPARK_OPTS}", "HTTP2_DISABLE": "true", "LAUNCH_OPTS": "" }, "argv": [ "/usr/local/share/jupyter/kernels/spark_R_kubernetes/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.spark-context-initialization-mode", "lazy" ] } ================================================ FILE: etc/kernelspecs/spark_R_yarn_client/bin/run.sh ================================================ #!/usr/bin/env bash if [ "${EG_IMPERSONATION_ENABLED}" = "True" ]; then IMPERSONATION_OPTS="sudo PATH=${PATH} -H -E -u ${KERNEL_USERNAME:-UNSPECIFIED}" USER_CLAUSE="as user ${KERNEL_USERNAME:-UNSPECIFIED}" else IMPERSONATION_OPTS="" USER_CLAUSE="on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}" fi echo echo "Starting IRkernel for Spark in Yarn Client mode ${USER_CLAUSE}" echo if [ -z "${SPARK_HOME}" ]; then echo "SPARK_HOME must be set to the location of a Spark distribution!" exit 1 fi PROG_HOME="$(cd "`dirname "$0"`"/..; pwd)" set -x eval exec "${IMPERSONATION_OPTS}" \ "${SPARK_HOME}/bin/spark-submit" \ "${SPARK_OPTS}" \ "${PROG_HOME}/scripts/launch_IRkernel.R" \ "${LAUNCH_OPTS}" \ "$@" set +x ================================================ FILE: etc/kernelspecs/spark_R_yarn_client/kernel.json ================================================ { "language": "R", "display_name": "Spark - R (YARN Client Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy" } }, "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "SPARK_OPTS": "--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.sparkr.r.command=/opt/conda/lib/R/bin/Rscript ${KERNEL_EXTRA_SPARK_OPTS}", "LAUNCH_OPTS": "" }, "argv": [ "/usr/local/share/jupyter/kernels/spark_R_yarn_client/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.spark-context-initialization-mode", "lazy" ] } ================================================ FILE: etc/kernelspecs/spark_R_yarn_cluster/bin/run.sh ================================================ #!/usr/bin/env bash if [ "${EG_IMPERSONATION_ENABLED}" = "True" ]; then IMPERSONATION_OPTS="--proxy-user ${KERNEL_USERNAME:-UNSPECIFIED}" USER_CLAUSE="as user ${KERNEL_USERNAME:-UNSPECIFIED}" else IMPERSONATION_OPTS="" USER_CLAUSE="on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}" fi echo echo "Starting IRkernel for Spark in Yarn Cluster mode ${USER_CLAUSE}" echo if [ -z "${SPARK_HOME}" ]; then echo "SPARK_HOME must be set to the location of a Spark distribution!" exit 1 fi PROG_HOME="$(cd "`dirname "$0"`"/..; pwd)" # Add server_listener.py to files for spark-opts ADDITIONAL_OPTS="--files ${PROG_HOME}/scripts/server_listener.py" set -x eval exec \ "${SPARK_HOME}/bin/spark-submit" \ "${SPARK_OPTS}" \ "${ADDITIONAL_OPTS}" \ "${IMPERSONATION_OPTS}" \ "${PROG_HOME}/scripts/launch_IRkernel.R" \ "${LAUNCH_OPTS}" \ "$@" set +x ================================================ FILE: etc/kernelspecs/spark_R_yarn_cluster/kernel.json ================================================ { "language": "R", "display_name": "Spark - R (YARN Cluster Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy" } }, "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "SPARK_OPTS": "--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.am.waitTime=1d --conf spark.yarn.appMasterEnv.PATH=/opt/conda/bin:$PATH --conf spark.sparkr.r.command=/opt/conda/lib/R/bin/Rscript --conf spark.yarn.maxAppAttempts=1 ${KERNEL_EXTRA_SPARK_OPTS}", "LAUNCH_OPTS": "" }, "argv": [ "/usr/local/share/jupyter/kernels/spark_R_yarn_cluster/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.spark-context-initialization-mode", "eager" ] } ================================================ FILE: etc/kernelspecs/spark_python_conductor_cluster/bin/run.sh ================================================ #!/usr/bin/env bash if [ "${EG_IMPERSONATION_ENABLED}" = "True" ]; then IMPERSONATION_OPTS="sudo PATH=${PATH} -H -E -u ${KERNEL_USERNAME:-UNSPECIFIED}" USER_CLAUSE="as user ${KERNEL_USERNAME:-UNSPECIFIED}" else IMPERSONATION_OPTS="" USER_CLAUSE="on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}" fi echo echo "Starting IPython kernel for Spark Cluster mode ${USER_CLAUSE}" echo if [ -z "${SPARK_HOME}" ]; then echo "SPARK_HOME must be set to the location of a Spark distribution!" exit 1 fi if [ -z "${KERNEL_IG_UUID}" ]; then PROG_HOME="$(cd "`dirname "$0"`"/..; pwd)" else PROG_HOME="${SPARK_HOME}" fi eval exec "${IMPERSONATION_OPTS}" \ "${SPARK_HOME}/bin/spark-submit" \ "${SPARK_OPTS}" \ "${PROG_HOME}/scripts/launch_ipykernel.py" \ "${LAUNCH_OPTS}" \ "$@" ================================================ FILE: etc/kernelspecs/spark_python_conductor_cluster/kernel.json ================================================ { "language": "python", "display_name": "Spark Python (Spark Cluster Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.conductor.ConductorClusterProcessProxy" }, "debugger": true }, "env": { "SPARK_OPTS": "--name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.maxAppAttempts=1 ${KERNEL_EXTRA_SPARK_OPTS}", "LAUNCH_OPTS": "" }, "argv": [ "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.spark-context-initialization-mode", "eager" ] } ================================================ FILE: etc/kernelspecs/spark_python_kubernetes/bin/run.sh ================================================ #!/usr/bin/env bash if [ "${EG_IMPERSONATION_ENABLED}" = "True" ]; then # IMPERSONATION_OPTS="--proxy-user ${KERNEL_USERNAME:-UNSPECIFIED}" USER_CLAUSE="as user ${KERNEL_USERNAME:-UNSPECIFIED}" else # IMPERSONATION_OPTS="" USER_CLAUSE="on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}" fi echo echo "Starting IPython kernel for Spark in Kubernetes mode ${USER_CLAUSE}" echo if [ -z "${SPARK_HOME}" ]; then echo "SPARK_HOME must be set to the location of a Spark distribution!" exit 1 fi if [ -z "${KERNEL_ID}" ]; then echo "KERNEL_ID must be set for discovery and lifecycle management!" exit 1 fi KERNEL_LAUNCHERS_DIR=${KERNEL_LAUNCHERS_DIR:-/usr/local/bin/kernel-launchers} PROG_HOME=${KERNEL_LAUNCHERS_DIR}/python EG_POD_TEMPLATE_DIR=${EG_POD_TEMPLATE_DIR:-/tmp} SCRIPTS_HOME="$(cd "`dirname "$0"`"/../scripts; pwd)" pod_template_file=${EG_POD_TEMPLATE_DIR}/kpt_${KERNEL_ID} spark_opts_out=${EG_POD_TEMPLATE_DIR}/spark_opts_${KERNEL_ID} python ${SCRIPTS_HOME}/launch_kubernetes.py $@ --pod-template=${pod_template_file} --spark-opts-out=${spark_opts_out} additional_spark_opts=`cat ${spark_opts_out}` SPARK_OPTS="${SPARK_OPTS} ${additional_spark_opts}" rm -f ${spark_opts_out} set -x eval exec \ "${SPARK_HOME}/bin/spark-submit" \ "${SPARK_OPTS}" \ "local://${PROG_HOME}/scripts/launch_ipykernel.py" \ "${LAUNCH_OPTS}" \ "$@" set +x ================================================ FILE: etc/kernelspecs/spark_python_kubernetes/kernel.json ================================================ { "language": "python", "display_name": "Spark - Python (Kubernetes Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy", "config": { "image_name": "elyra/kernel-spark-py:VERSION", "executor_image_name": "elyra/kernel-spark-py:VERSION" } }, "debugger": true }, "env": { "SPARK_HOME": "/opt/spark", "SPARK_OPTS": "--master k8s://https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT} --deploy-mode cluster --name ${KERNEL_USERNAME}-${KERNEL_ID} --conf spark.kubernetes.namespace=${KERNEL_NAMESPACE} --conf spark.kubernetes.driver.label.app=enterprise-gateway --conf spark.kubernetes.driver.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.driver.label.component=kernel --conf spark.kubernetes.executor.label.app=enterprise-gateway --conf spark.kubernetes.executor.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.executor.label.component=worker --conf spark.kubernetes.driver.container.image=${KERNEL_IMAGE} --conf spark.kubernetes.executor.container.image=${KERNEL_EXECUTOR_IMAGE} --conf spark.kubernetes.authenticate.driver.serviceAccountName=${KERNEL_SERVICE_ACCOUNT_NAME} --conf spark.kubernetes.submission.waitAppCompletion=false --conf spark.kubernetes.driverEnv.HTTP2_DISABLE=true ${KERNEL_EXTRA_SPARK_OPTS}", "HTTP2_DISABLE": "true", "LAUNCH_OPTS": "" }, "argv": [ "/usr/local/share/jupyter/kernels/spark_python_kubernetes/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.spark-context-initialization-mode", "lazy" ] } ================================================ FILE: etc/kernelspecs/spark_python_operator/kernel.json ================================================ { "language": "python", "display_name": "Spark Operator (Python)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.spark_operator.SparkOperatorProcessProxy", "config": { "image_name": "elyra/kernel-spark-py:VERSION", "executor_image_name": "elyra/kernel-spark-py:VERSION" } } }, "argv": [ "python", "/usr/local/share/jupyter/kernels/spark_python_operator/scripts/launch_custom_resource.py", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}" ] } ================================================ FILE: etc/kernelspecs/spark_python_yarn_client/bin/run.sh ================================================ #!/usr/bin/env bash if [ "${EG_IMPERSONATION_ENABLED}" = "True" ]; then IMPERSONATION_OPTS="sudo PATH=${PATH} -H -E -u ${KERNEL_USERNAME:-UNSPECIFIED}" USER_CLAUSE="as user ${KERNEL_USERNAME:-UNSPECIFIED}" else IMPERSONATION_OPTS="" USER_CLAUSE="on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}" fi echo echo "Starting IPython kernel for Spark in Yarn Client mode ${USER_CLAUSE}" echo if [ -z "${SPARK_HOME}" ]; then echo "SPARK_HOME must be set to the location of a Spark distribution!" exit 1 fi PROG_HOME="$(cd "`dirname "$0"`"/..; pwd)" set -x eval exec "${IMPERSONATION_OPTS}" \ "${SPARK_HOME}/bin/spark-submit" \ "${SPARK_OPTS}" \ "${PROG_HOME}/scripts/launch_ipykernel.py" \ "${LAUNCH_OPTS}" \ "$@" set +x ================================================ FILE: etc/kernelspecs/spark_python_yarn_client/kernel.json ================================================ { "language": "python", "display_name": "Spark - Python (YARN Client Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy" }, "debugger": true }, "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "PYSPARK_PYTHON": "/opt/conda/bin/python", "PYTHONPATH": "${HOME}/.local/lib/python3.8/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip", "SPARK_OPTS": "--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} ${KERNEL_EXTRA_SPARK_OPTS}", "LAUNCH_OPTS": "" }, "argv": [ "/usr/local/share/jupyter/kernels/spark_python_yarn_client/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.spark-context-initialization-mode", "lazy" ] } ================================================ FILE: etc/kernelspecs/spark_python_yarn_cluster/bin/run.sh ================================================ #!/usr/bin/env bash if [ "${EG_IMPERSONATION_ENABLED}" = "True" ]; then IMPERSONATION_OPTS="--proxy-user ${KERNEL_USERNAME:-UNSPECIFIED}" USER_CLAUSE="as user ${KERNEL_USERNAME:-UNSPECIFIED}" else IMPERSONATION_OPTS="" USER_CLAUSE="on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}" fi echo echo "Starting IPython kernel for Spark in Yarn Cluster mode ${USER_CLAUSE}" echo if [ -z "${SPARK_HOME}" ]; then echo "SPARK_HOME must be set to the location of a Spark distribution!" exit 1 fi PROG_HOME="$(cd "`dirname "$0"`"/..; pwd)" set -x eval exec \ "${SPARK_HOME}/bin/spark-submit" \ "${SPARK_OPTS}" \ "${IMPERSONATION_OPTS}" \ "${PROG_HOME}/scripts/launch_ipykernel.py" \ "${LAUNCH_OPTS}" \ "$@" set +x ================================================ FILE: etc/kernelspecs/spark_python_yarn_cluster/kernel.json ================================================ { "language": "python", "display_name": "Spark - Python (YARN Cluster Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy" }, "debugger": true }, "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "PYSPARK_PYTHON": "/opt/conda/bin/python", "PYTHONPATH": "${HOME}/.local/lib/python3.8/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip", "SPARK_OPTS": "--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.appMasterEnv.PYTHONUSERBASE=/home/${KERNEL_USERNAME}/.local --conf spark.yarn.appMasterEnv.PYTHONPATH=${HOME}/.local/lib/python3.8/site-packages:/usr/hdp/current/spark2-client/python:/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip --conf spark.yarn.appMasterEnv.PATH=/opt/conda/bin:$PATH --conf spark.yarn.maxAppAttempts=1 ${KERNEL_EXTRA_SPARK_OPTS}", "LAUNCH_OPTS": "" }, "argv": [ "/usr/local/share/jupyter/kernels/spark_python_yarn_cluster/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.spark-context-initialization-mode", "lazy" ] } ================================================ FILE: etc/kernelspecs/spark_scala_conductor_cluster/bin/run.sh ================================================ #!/usr/bin/env bash if [ "${EG_IMPERSONATION_ENABLED}" = "True" ]; then IMPERSONATION_OPTS="--proxy-user ${KERNEL_USERNAME:-UNSPECIFIED}" USER_CLAUSE="as user ${KERNEL_USERNAME:-UNSPECIFIED}" else IMPERSONATION_OPTS="" USER_CLAUSE="on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}" fi echo echo "Starting Scala kernel for Spark Cluster mode ${USER_CLAUSE}" echo if [ -z "${SPARK_HOME}" ]; then echo "SPARK_HOME must be set to the location of a Spark distribution!" exit 1 fi PROG_HOME="$(cd "`dirname "$0"`"/..; pwd)" KERNEL_ASSEMBLY=`(cd "${PROG_HOME}/lib"; ls -1 toree-assembly-*.jar;)` TOREE_ASSEMBLY="${PROG_HOME}/lib/${KERNEL_ASSEMBLY}" if [ ! -f ${TOREE_ASSEMBLY} ]; then echo "Toree assembly '${PROG_HOME}/lib/toree-assembly-*.jar' is missing. Exiting..." exit 1 fi # The SPARK_OPTS values during installation are stored in __TOREE_SPARK_OPTS__. This allows values to be specified during # install, but also during runtime. The runtime options take precedence over the install options. if [ "${SPARK_OPTS}" = "" ]; then SPARK_OPTS=${__TOREE_SPARK_OPTS__} fi if [ "${TOREE_OPTS}" = "" ]; then TOREE_OPTS=${__TOREE_OPTS__} fi # Toree launcher jar path, plus required lib jars (toree-assembly) JARS="${TOREE_ASSEMBLY}" # Toree launcher app path LAUNCHER_JAR=`(cd "${PROG_HOME}/lib"; ls -1 toree-launcher*.jar;)` LAUNCHER_APP="${PROG_HOME}/lib/${LAUNCHER_JAR}" if [ ! -f ${LAUNCHER_APP} ]; then echo "Scala launcher jar '${PROG_HOME}/lib/toree-launcher*.jar' is missing. Exiting..." exit 1 fi eval exec \ "${SPARK_HOME}/bin/spark-submit" \ "${SPARK_OPTS}" \ "${IMPERSONATION_OPTS}" \ --jars "${JARS}" \ --class launcher.ToreeLauncher \ "${LAUNCHER_APP}" \ "${TOREE_OPTS}" \ "${LAUNCH_OPTS}" \ "$@" ================================================ FILE: etc/kernelspecs/spark_scala_conductor_cluster/kernel.json ================================================ { "language": "scala", "display_name": "Spark Scala (Spark Cluster Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.conductor.ConductorClusterProcessProxy" } }, "env": { "SPARK_OPTS": "--name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.maxAppAttempts=1 ${KERNEL_EXTRA_SPARK_OPTS}", "__TOREE_OPTS__": "--alternate-sigint USR2 --spark-context-initialization-mode eager", "LAUNCH_OPTS": "", "DEFAULT_INTERPRETER": "Scala" }, "argv": [ "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}" ] } ================================================ FILE: etc/kernelspecs/spark_scala_kubernetes/bin/run.sh ================================================ #!/usr/bin/env bash if [ "${EG_IMPERSONATION_ENABLED}" = "True" ]; then # IMPERSONATION_OPTS="--proxy-user ${KERNEL_USERNAME:-UNSPECIFIED}" USER_CLAUSE="as user ${KERNEL_USERNAME:-UNSPECIFIED}" else # IMPERSONATION_OPTS="" USER_CLAUSE="on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}" fi echo echo "Starting Toree kernel for Spark in Kubernetes mode ${USER_CLAUSE}" echo if [ -z "${SPARK_HOME}" ]; then echo "SPARK_HOME must be set to the location of a Spark distribution!" exit 1 fi if [ -z "${KERNEL_ID}" ]; then echo "KERNEL_ID must be set for discovery and lifecycle management!" exit 1 fi KERNEL_LAUNCHERS_DIR=${KERNEL_LAUNCHERS_DIR:-/usr/local/bin/kernel-launchers} PROG_HOME=${KERNEL_LAUNCHERS_DIR}/scala KERNEL_ASSEMBLY=`(cd "${PROG_HOME}/lib"; ls -1 toree-assembly-*.jar;)` TOREE_ASSEMBLY="${PROG_HOME}/lib/${KERNEL_ASSEMBLY}" if [ ! -f ${TOREE_ASSEMBLY} ]; then echo "Toree assembly '${PROG_HOME}/lib/toree-assembly-*.jar' is missing. Exiting..." exit 1 fi # The SPARK_OPTS values during installation are stored in __TOREE_SPARK_OPTS__. This allows values to be specified during # install, but also during runtime. The runtime options take precedence over the install options. if [ "${SPARK_OPTS}" = "" ]; then SPARK_OPTS=${__TOREE_SPARK_OPTS__} fi if [ "${TOREE_OPTS}" = "" ]; then TOREE_OPTS=${__TOREE_OPTS__} fi # Toree launcher jar path, plus required lib jars (toree-assembly) JARS="local://${TOREE_ASSEMBLY}" # Toree launcher app path LAUNCHER_JAR=`(cd "${PROG_HOME}/lib"; ls -1 toree-launcher*.jar;)` LAUNCHER_APP="${PROG_HOME}/lib/${LAUNCHER_JAR}" if [ ! -f ${LAUNCHER_APP} ]; then echo "Scala launcher jar '${PROG_HOME}/lib/toree-launcher*.jar' is missing. Exiting..." exit 1 fi EG_POD_TEMPLATE_DIR=${EG_POD_TEMPLATE_DIR:-/tmp} SCRIPTS_HOME="$(cd "`dirname "$0"`"/../scripts; pwd)" pod_template_file=${EG_POD_TEMPLATE_DIR}/kpt_${KERNEL_ID} spark_opts_out=${EG_POD_TEMPLATE_DIR}/spark_opts_${KERNEL_ID} python ${SCRIPTS_HOME}/launch_kubernetes.py $@ --pod-template=${pod_template_file} --spark-opts-out=${spark_opts_out} additional_spark_opts=`cat ${spark_opts_out}` SPARK_OPTS="${SPARK_OPTS} ${additional_spark_opts}" rm -f ${spark_opts_out} set -x eval exec "${IMPERSONATION_OPTS}" \ "${SPARK_HOME}/bin/spark-submit" \ "${SPARK_OPTS}" \ --jars "${JARS}" \ --class launcher.ToreeLauncher \ "local://${LAUNCHER_APP}" \ "${TOREE_OPTS}" \ "${LAUNCH_OPTS}" \ "$@" set +x ================================================ FILE: etc/kernelspecs/spark_scala_kubernetes/kernel.json ================================================ { "language": "scala", "display_name": "Spark - Scala (Kubernetes Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.k8s.KubernetesProcessProxy", "config": { "image_name": "elyra/kernel-scala:VERSION", "executor_image_name": "elyra/kernel-scala:VERSION" } } }, "env": { "SPARK_HOME": "/opt/spark", "__TOREE_SPARK_OPTS__": "--master k8s://https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT} --deploy-mode cluster --name ${KERNEL_USERNAME}-${KERNEL_ID} --conf spark.kubernetes.namespace=${KERNEL_NAMESPACE} --driver-memory 2G --conf spark.kubernetes.driver.label.app=enterprise-gateway --conf spark.kubernetes.driver.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.driver.label.component=kernel --conf spark.kubernetes.executor.label.app=enterprise-gateway --conf spark.kubernetes.executor.label.kernel_id=${KERNEL_ID} --conf spark.kubernetes.executor.label.component=worker --conf spark.kubernetes.driver.container.image=${KERNEL_IMAGE} --conf spark.kubernetes.executor.container.image=${KERNEL_EXECUTOR_IMAGE} --conf spark.kubernetes.authenticate.driver.serviceAccountName=${KERNEL_SERVICE_ACCOUNT_NAME} --conf spark.kubernetes.submission.waitAppCompletion=false --conf spark.kubernetes.driverEnv.HTTP2_DISABLE=true ${KERNEL_EXTRA_SPARK_OPTS}", "__TOREE_OPTS__": "--alternate-sigint USR2", "HTTP2_DISABLE": "true", "LAUNCH_OPTS": "", "DEFAULT_INTERPRETER": "Scala" }, "argv": [ "/usr/local/share/jupyter/kernels/spark_scala_kubernetes/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.spark-context-initialization-mode", "eager" ] } ================================================ FILE: etc/kernelspecs/spark_scala_yarn_client/bin/run.sh ================================================ #!/usr/bin/env bash if [ "${EG_IMPERSONATION_ENABLED}" = "True" ]; then IMPERSONATION_OPTS="sudo PATH=${PATH} -H -E -u ${KERNEL_USERNAME:-UNSPECIFIED}" USER_CLAUSE="as user ${KERNEL_USERNAME:-UNSPECIFIED}" else IMPERSONATION_OPTS="" USER_CLAUSE="on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}" fi echo echo "Starting Scala kernel for Spark in Yarn Client mode ${USER_CLAUSE}" echo if [ -z "${SPARK_HOME}" ]; then echo "SPARK_HOME must be set to the location of a Spark distribution!" exit 1 fi PROG_HOME="$(cd "`dirname "$0"`"/..; pwd)" KERNEL_ASSEMBLY=`(cd "${PROG_HOME}/lib"; ls -1 toree-assembly-*.jar;)` TOREE_ASSEMBLY="${PROG_HOME}/lib/${KERNEL_ASSEMBLY}" if [ ! -f ${TOREE_ASSEMBLY} ]; then echo "Toree assembly '${PROG_HOME}/lib/toree-assembly-*.jar' is missing. Exiting..." exit 1 fi # The SPARK_OPTS values during installation are stored in __TOREE_SPARK_OPTS__. This allows values to be specified during # install, but also during runtime. The runtime options take precedence over the install options. if [ "${SPARK_OPTS}" = "" ]; then SPARK_OPTS=${__TOREE_SPARK_OPTS__} fi if [ "${TOREE_OPTS}" = "" ]; then TOREE_OPTS=${__TOREE_OPTS__} fi # Toree launcher jar path, plus required lib jars (toree-assembly) JARS="${TOREE_ASSEMBLY}" # Toree launcher app path LAUNCHER_JAR=`(cd "${PROG_HOME}/lib"; ls -1 toree-launcher*.jar;)` LAUNCHER_APP="${PROG_HOME}/lib/${LAUNCHER_JAR}" if [ ! -f ${LAUNCHER_APP} ]; then echo "Scala launcher jar '${PROG_HOME}/lib/toree-launcher*.jar' is missing. Exiting..." exit 1 fi set -x eval exec "${IMPERSONATION_OPTS}" \ "${SPARK_HOME}/bin/spark-submit" \ "${SPARK_OPTS}" \ --jars "${JARS}" \ --class launcher.ToreeLauncher \ "${LAUNCHER_APP}" \ "${TOREE_OPTS}" \ "${LAUNCH_OPTS}" \ "$@" set +x ================================================ FILE: etc/kernelspecs/spark_scala_yarn_client/kernel.json ================================================ { "language": "scala", "display_name": "Spark - Scala (YARN Client Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.distributed.DistributedProcessProxy" } }, "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "__TOREE_SPARK_OPTS__": "--master yarn --deploy-mode client --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} ${KERNEL_EXTRA_SPARK_OPTS}", "__TOREE_OPTS__": "--alternate-sigint USR2", "LAUNCH_OPTS": "", "DEFAULT_INTERPRETER": "Scala" }, "argv": [ "/usr/local/share/jupyter/kernels/spark_scala_yarn_client/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.spark-context-initialization-mode", "lazy" ] } ================================================ FILE: etc/kernelspecs/spark_scala_yarn_cluster/bin/run.sh ================================================ #!/usr/bin/env bash if [ "${EG_IMPERSONATION_ENABLED}" = "True" ]; then IMPERSONATION_OPTS="--proxy-user ${KERNEL_USERNAME:-UNSPECIFIED}" USER_CLAUSE="as user ${KERNEL_USERNAME:-UNSPECIFIED}" else IMPERSONATION_OPTS="" USER_CLAUSE="on behalf of user ${KERNEL_USERNAME:-UNSPECIFIED}" fi echo echo "Starting Scala kernel for Spark in Yarn Cluster mode ${USER_CLAUSE}" echo if [ -z "${SPARK_HOME}" ]; then echo "SPARK_HOME must be set to the location of a Spark distribution!" exit 1 fi PROG_HOME="$(cd "`dirname "$0"`"/..; pwd)" KERNEL_ASSEMBLY=`(cd "${PROG_HOME}/lib"; ls -1 toree-assembly-*.jar;)` TOREE_ASSEMBLY="${PROG_HOME}/lib/${KERNEL_ASSEMBLY}" if [ ! -f ${TOREE_ASSEMBLY} ]; then echo "Toree assembly '${PROG_HOME}/lib/toree-assembly-*.jar' is missing. Exiting..." exit 1 fi # The SPARK_OPTS values during installation are stored in __TOREE_SPARK_OPTS__. This allows values to be specified during # install, but also during runtime. The runtime options take precedence over the install options. if [ "${SPARK_OPTS}" = "" ]; then SPARK_OPTS=${__TOREE_SPARK_OPTS__} fi if [ "${TOREE_OPTS}" = "" ]; then TOREE_OPTS=${__TOREE_OPTS__} fi # Toree launcher jar path, plus required lib jars (toree-assembly) JARS="${TOREE_ASSEMBLY}" # Toree launcher app path LAUNCHER_JAR=`(cd "${PROG_HOME}/lib"; ls -1 toree-launcher*.jar;)` LAUNCHER_APP="${PROG_HOME}/lib/${LAUNCHER_JAR}" if [ ! -f ${LAUNCHER_APP} ]; then echo "Scala launcher jar '${PROG_HOME}/lib/toree-launcher*.jar' is missing. Exiting..." exit 1 fi set -x eval exec \ "${SPARK_HOME}/bin/spark-submit" \ "${SPARK_OPTS}" \ "${IMPERSONATION_OPTS}" \ --jars "${JARS}" \ --class launcher.ToreeLauncher \ "${LAUNCHER_APP}" \ "${TOREE_OPTS}" \ "${LAUNCH_OPTS}" \ "$@" set +x ================================================ FILE: etc/kernelspecs/spark_scala_yarn_cluster/kernel.json ================================================ { "language": "scala", "display_name": "Spark - Scala (YARN Cluster Mode)", "metadata": { "process_proxy": { "class_name": "enterprise_gateway.services.processproxies.yarn.YarnClusterProcessProxy" } }, "env": { "SPARK_HOME": "/usr/hdp/current/spark2-client", "__TOREE_SPARK_OPTS__": "--master yarn --deploy-mode cluster --name ${KERNEL_ID:-ERROR__NO__KERNEL_ID} --conf spark.yarn.submit.waitAppCompletion=false --conf spark.yarn.am.waitTime=1d --conf spark.yarn.maxAppAttempts=1 ${KERNEL_EXTRA_SPARK_OPTS}", "__TOREE_OPTS__": "--alternate-sigint USR2", "LAUNCH_OPTS": "", "DEFAULT_INTERPRETER": "Scala" }, "argv": [ "/usr/local/share/jupyter/kernels/spark_scala_yarn_cluster/bin/run.sh", "--RemoteProcessProxy.kernel-id", "{kernel_id}", "--RemoteProcessProxy.response-address", "{response_address}", "--RemoteProcessProxy.public-key", "{public_key}", "--RemoteProcessProxy.port-range", "{port_range}", "--RemoteProcessProxy.spark-context-initialization-mode", "lazy" ] } ================================================ FILE: etc/kubernetes/helm/enterprise-gateway/Chart.yaml ================================================ apiVersion: v2 name: enterprise-gateway description: A helm chart to deploy Jupyter Enterprise Gateway # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) version: 3.3.0-dev0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. appVersion: 3.3.0.dev0 icon: https://avatars1.githubusercontent.com/u/7388996?s=200&v=4 home: https://jupyter.org # A chart can be either an 'application' or a 'library' chart. # # Application charts are a collection of templates that can be packaged into versioned archives # to be deployed. # # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. type: application sources: - https://github.com/jupyter-server/enterprise_gateway kubeVersion: '>=1.18.0-0' ================================================ FILE: etc/kubernetes/helm/enterprise-gateway/templates/daemonset.yaml ================================================ {{- if .Values.kip.enabled }} apiVersion: apps/v1 kind: DaemonSet metadata: name: kernel-image-puller namespace: {{ .Values.namespace | default .Release.Namespace }} labels: gateway-selector: enterprise-gateway app: enterprise-gateway component: kernel-image-puller chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} {{- range $key, $val := .Values.global.commonLabels }} {{ $key }}: "{{ $val }}" {{- end }} spec: selector: matchLabels: name: kernel-image-puller template: metadata: labels: name: kernel-image-puller app: enterprise-gateway component: kernel-image-puller chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} {{- range $key, $val := .Values.global.commonLabels }} {{ $key }}: "{{ $val }}" {{- end }} spec: serviceAccountName: {{ .Values.kip.serviceAccountName }} containers: - name: kernel-image-puller image: {{ .Values.kip.image }} imagePullPolicy: {{ .Values.kip.imagePullPolicy }} env: - name: KIP_LOG_LEVEL value: {{ .Values.logLevel }} - name: KIP_GATEWAY_HOST value: "{{ .Values.kip.server.protocol }}://{{ .Values.kip.serviceName }}.{{ .Release.Namespace }}:{{ .Values.kip.server.port }}" - name: KIP_VALIDATE_CERT value: {{ .Values.kip.server.validate_cert | quote }} - name: KIP_INTERVAL value: !!str {{ .Values.kip.interval }} - name: KIP_PULL_POLICY value: {{ .Values.kip.pullPolicy }} - name: KIP_CRI_ENDPOINT value: "unix://{{ .Values.kip.criSocket }}" {{- if .Values.kip.defaultContainerRegistry }} - name: KIP_DEFAULT_CONTAINER_REGISTRY value: {{ .Values.kip.defaultContainerRegistry }} {{- end }} # Optional authorization token passed in all requests (should match EG_AUTH_TOKEN) {{- if .Values.authToken }} - name: KIP_AUTH_TOKEN value: {{ .Values.authToken }} {{- end }} # fetcher to fetch image names, defaults to KernelSpecsFetcher {{- if .Values.kip.fetcher }} - name: KIP_IMAGE_FETCHER value: "{{ .Values.kip.fetcher }}" {{- end }} # if CombinedImagesFetcher is used KIP_INTERNAL_FETCHERS defines the fetchers that get used internally {{- if .Values.kip.internalFetcher }} - name: KIP_INTERNAL_FETCHERS value: "{{ .Values.kip.internalFetcher }}" {{- end }} # if StaticListFetcher is used KIP_IMAGES defines the list of images pullers will fetch {{- if .Values.kip.images}} - name: KIP_IMAGES value: "{{ .Values.kip.images }}" {{- end }} {{- if .Values.kip.resources }} resources: {{- toYaml .Values.kip.resources | nindent 10 }} {{- end }} volumeMounts: - name: cri-socket mountPath: !!str {{ .Values.kip.criSocket }} # see env KIP_CRI_ENDPOINT readOnly: true volumes: - name: cri-socket hostPath: path: {{ .Values.kip.criSocket }} {{- if .Values.kip.tolerations }} tolerations: {{- toYaml .Values.kip.tolerations | nindent 8 }} {{- end }} {{- if .Values.kip.nodeSelector }} nodeSelector: {{- toYaml .Values.kip.nodeSelector | nindent 8 }} {{- end }} {{- if .Values.kip.affinity }} affinity: {{- toYaml .Values.kip.affinity | nindent 8 }} {{- end }} {{- end }} ================================================ FILE: etc/kubernetes/helm/enterprise-gateway/templates/deployment.yaml ================================================ {{- if .Values.deployment.enabled }} apiVersion: apps/v1 kind: Deployment metadata: name: enterprise-gateway namespace: {{ .Values.namespace | default .Release.Namespace }} labels: gateway-selector: enterprise-gateway app: enterprise-gateway component: enterprise-gateway chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} {{- range $key, $val := .Values.global.commonLabels }} {{ $key }}: "{{ $val }}" {{- end }} spec: replicas: {{ .Values.deployment.replicas }} selector: matchLabels: gateway-selector: enterprise-gateway template: metadata: labels: gateway-selector: enterprise-gateway app: enterprise-gateway component: enterprise-gateway {{- range $key, $val := .Values.global.commonLabels }} {{ $key }}: "{{ $val }}" {{- end }} spec: # Created by this chart. serviceAccountName: {{ .Values.deployment.serviceAccountName }} terminationGracePeriodSeconds: {{ .Values.deployment.terminationGracePeriodSeconds }} {{- if .Values.kernelspecs.image }} initContainers: - name: kernelspecs image: {{ .Values.kernelspecs.image }} imagePullPolicy: {{ .Values.kernelspecs.imagePullPolicy }} args: ["cp", "-r", "/kernels", "/usr/local/share/jupyter"] volumeMounts: - name: image-kernelspecs mountPath: "/usr/local/share/jupyter/kernels" {{- end }} containers: - name: enterprise-gateway image: {{ .Values.image }} imagePullPolicy: {{ .Values.imagePullPolicy }} env: - name: EG_PORT {{ with index .Values.service.ports 0 }} value: !!str {{ .port }} {{- end }} - name: EG_RESPONSE_PORT {{ with index .Values.service.ports 1 }} value: !!str {{ .port }} {{- end }} - name: EG_NAMESPACE value: {{ .Release.Namespace }} - name: EG_KERNEL_CLUSTER_ROLE value: {{ .Values.kernel.clusterRole }} - name: EG_SHARED_NAMESPACE value: {{ if .Values.kernel.shareGatewayNamespace }}"True"{{ else }}"False"{{ end }} - name: EG_MIRROR_WORKING_DIRS value: {{ if .Values.mirrorWorkingDirs }}"True"{{ else }}"False"{{ end }} - name: EG_CULL_IDLE_TIMEOUT value: !!str {{ .Values.kernel.cullIdleTimeout }} - name: EG_CULL_CONNECTED value: {{ if .Values.kernel.cullConnected }}"True"{{ else }}"False"{{ end }} - name: EG_LOG_LEVEL value: {{ .Values.logLevel }} - name: EG_KERNEL_LAUNCH_TIMEOUT value: !!str {{ .Values.kernel.launchTimeout }} - name: EG_KERNEL_INFO_TIMEOUT value: !!str {{ .Values.kernel.infoTimeout }} - name: EG_ALLOWED_KERNELS value: {{ toJson .Values.kernel.allowedKernels | squote }} - name: EG_DEFAULT_KERNEL_NAME value: {{ .Values.kernel.defaultKernelName }} - name: EG_DEFAULT_KERNEL_SERVICE_ACCOUNT_NAME value: {{ .Values.kernel.defaultServiceAccountName }} # Optional authorization token passed in all requests {{- if .Values.authToken }} - name: EG_AUTH_TOKEN value: {{ .Values.authToken }} {{- end }} {{- if .Values.deployment.extraEnv }} {{- range $key, $val := .Values.deployment.extraEnv }} - name: {{ $key }} value: |- {{ $val | indent 12 }} {{- end }} {{- end }} ports: {{ with index .Values.service.ports 0 }} - containerPort: {{ .port }} {{- end }} {{ with index .Values.service.ports 1 }} - containerPort: {{ .port }} {{- end }} {{- if .Values.deployment.resources }} resources: {{- toYaml .Values.deployment.resources | nindent 10 }} {{- end }} {{- if .Values.nfs.enabled }} volumeMounts: - name: nfs-kernelspecs mountPath: "/usr/local/share/jupyter/kernels" volumes: - name: nfs-kernelspecs nfs: server: {{ .Values.nfs.internalServerIPAddress }} path: "/usr/local/share/jupyter/kernels" {{- else if .Values.kernelspecsPvc.enabled }} volumeMounts: - name: pvc-kernelspecs mountPath: "/usr/local/share/jupyter/kernels" volumes: - name: pvc-kernelspecs persistentVolumeClaim: claimName: {{ .Values.kernelspecsPvc.name }} {{- else if .Values.kernelspecs.image }} volumeMounts: - name: image-kernelspecs mountPath: "/usr/local/share/jupyter/kernels" volumes: - name: image-kernelspecs emptyDir: medium: Memory {{- end }} {{- if .Values.deployment.tolerations }} tolerations: {{- toYaml .Values.deployment.tolerations | nindent 8 }} {{- end }} {{- if .Values.deployment.nodeSelector }} nodeSelector: {{- toYaml .Values.deployment.nodeSelector | nindent 8 }} {{- end }} {{- if .Values.deployment.affinity }} affinity: {{- toYaml .Values.deployment.affinity | nindent 8 }} {{- end }} {{- end }} ================================================ FILE: etc/kubernetes/helm/enterprise-gateway/templates/eg-clusterrole.yaml ================================================ {{- if and (.Values.deployment.serviceAccountName) (.Values.global.rbac) }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: enterprise-gateway-controller labels: app: enterprise-gateway component: enterprise-gateway chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} {{- range $key, $val := .Values.global.commonLabels }} {{ $key }}: "{{ $val }}" {{- end }} rules: - apiGroups: [""] resources: ["pods", "namespaces", "services", "configmaps", "secrets", "persistentvolumes", "persistentvolumeclaims"] verbs: ["get", "watch", "list", "create", "delete"] - apiGroups: ["rbac.authorization.k8s.io"] resources: ["rolebindings"] verbs: ["get", "list", "create", "delete"] - apiGroups: ["sparkoperator.k8s.io"] resources: ["sparkapplications", "sparkapplications/status", "scheduledsparkapplications", "scheduledsparkapplications/status"] verbs: ["get", "watch", "list", "create", "delete"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: # Referenced by EG_KERNEL_CLUSTER_ROLE in the Deployment name: kernel-controller labels: app: enterprise-gateway component: kernel chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} rules: - apiGroups: [""] resources: ["pods"] verbs: ["get", "watch", "list", "create", "delete"] - apiGroups: [""] resources: ["configmaps"] verbs: ["list", "create"] - apiGroups: [""] resources: ["services", "persistentvolumeclaims"] verbs: ["list"] {{- end }} ================================================ FILE: etc/kubernetes/helm/enterprise-gateway/templates/eg-clusterrolebinding.yaml ================================================ {{- if and (.Values.deployment.serviceAccountName) (.Values.global.rbac) }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: enterprise-gateway-controller labels: app: enterprise-gateway component: enterprise-gateway chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} {{- range $key, $val := .Values.global.commonLabels }} {{ $key }}: "{{ $val }}" {{- end }} subjects: - kind: ServiceAccount name: {{ .Values.deployment.serviceAccountName }} namespace: {{ .Release.Namespace }} roleRef: kind: ClusterRole name: enterprise-gateway-controller apiGroup: rbac.authorization.k8s.io {{- end }} ================================================ FILE: etc/kubernetes/helm/enterprise-gateway/templates/eg-serviceaccount.yaml ================================================ {{- if and (.Values.deployment.serviceAccountName) (.Values.global.rbac) }} --- apiVersion: v1 kind: ServiceAccount {{- if .Values.global.imagePullSecrets }} imagePullSecrets: {{- $parent := . -}} {{- range .Values.global.imagePullSecrets }} - name: {{ . }} {{- end }} {{- end }} metadata: name: {{ .Values.deployment.serviceAccountName }} namespace: {{ .Values.namespace | default .Release.Namespace }} labels: app: enterprise-gateway component: enterprise-gateway chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} {{- range $key, $val := .Values.global.commonLabels }} {{ $key }}: "{{ $val }}" {{- end }} {{- if .Values.deployment.annotations }} annotations: {{- range $key, $val := .Values.deployment.annotations }} {{ $key }}: "{{ $val }}" {{- end }} {{- end }} {{- end }} ================================================ FILE: etc/kubernetes/helm/enterprise-gateway/templates/imagepullSecret.yaml ================================================ {{- if and (.Values.imagePullSecretsCreate.enabled) (.Values.imagePullSecretsCreate.secrets) -}} --- {{- $root := .Values }} {{- range .Values.imagePullSecretsCreate.secrets }} apiVersion: v1 data: .dockerconfigjson: {{ .data }} kind: Secret metadata: name: {{ .name }} {{- if ($root.imagePullSecretsCreate.annotations) -}} {{- with $root.imagePullSecretsCreate.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} {{- end }} type: kubernetes.io/dockerconfigjson {{- end }} --- {{- end }} ================================================ FILE: etc/kubernetes/helm/enterprise-gateway/templates/ingress.yaml ================================================ {{ if .Values.ingress.enabled }} {{- $parent := . -}} {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} apiVersion: networking.k8s.io/v1 {{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} apiVersion: networking.k8s.io/v1beta1 {{- else -}} apiVersion: extensions/v1beta1 {{- end }} kind: Ingress metadata: namespace: {{ .Values.namespace | default .Release.Namespace }} name: enterprise-gateway-ingress {{- if .Values.ingress.annotations }} annotations: {{ toYaml .Values.ingress.annotations | indent 4}} {{- end }} spec: {{ if .Values.ingress.ingressClassName }} ingressClassName: {{ .Values.ingress.ingressClassName }} {{ end }} rules: - host: {{ .Values.ingress.hostName }} http: paths: - path: {{ .Values.ingress.path }} {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} pathType: {{ .Values.ingress.pathType }} {{- end }} backend: {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} service: name: "enterprise-gateway" port: {{ with index .Values.service.ports 0 }} number: {{ .port }} {{ end }} {{- else }} serviceName: "enterprise-gateway" {{ with index .Values.service.ports 0 }} servicePort: {{ .port }} {{- end }} {{- end }} {{- if .Values.ingress.tls }} tls: {{- range .Values.ingress.tls }} - hosts: {{- range .hosts }} - {{ . }} {{- end }} secretName: {{ .secretName }} {{- end }} {{- end }} {{- end }} ================================================ FILE: etc/kubernetes/helm/enterprise-gateway/templates/kip-clusterrole.yaml ================================================ {{- if and (.Values.kip.serviceAccountName) (.Values.global.rbac) }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: kip-controller labels: app: enterprise-gateway component: kernel-image-puller chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} {{- range $key, $val := .Values.global.commonLabels }} {{ $key }}: "{{ $val }}" {{- end }} rules: - apiGroups: [""] resources: ["pods"] verbs: ["get", "watch", "list", "create", "delete"] {{- if .Values.kip.podSecurityPolicy.create }} - apiGroups: - policy resources: - podsecuritypolicies resourceNames: - "kip-psp" verbs: - use {{- end }} {{- end }} ================================================ FILE: etc/kubernetes/helm/enterprise-gateway/templates/kip-clusterrolebinding.yaml ================================================ {{- if and (.Values.kip.serviceAccountName) (.Values.global.rbac) }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: enterprise-gateway-kip labels: app: enterprise-gateway component: kernel-image-puller chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} {{- range $key, $val := .Values.global.commonLabels }} {{ $key }}: "{{ $val }}" {{- end }} subjects: - kind: ServiceAccount name: {{ .Values.kip.serviceAccountName }} namespace: {{ .Release.Namespace }} roleRef: kind: ClusterRole name: kip-controller apiGroup: rbac.authorization.k8s.io {{- end }} ================================================ FILE: etc/kubernetes/helm/enterprise-gateway/templates/kip-serviceaccount.yaml ================================================ {{- if and (.Values.kip.serviceAccountName) (.Values.global.rbac) }} apiVersion: v1 kind: ServiceAccount {{- if .Values.global.imagePullSecrets }} imagePullSecrets: {{- $parent := . -}} {{- range .Values.global.imagePullSecrets }} - name: {{ . }} {{- end }} {{- end }} metadata: name: {{ .Values.kip.serviceAccountName }} namespace: {{ .Values.namespace | default .Release.Namespace }} labels: app: enterprise-gateway component: enterprise-gateway chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} {{- range $key, $val := .Values.global.commonLabels }} {{ $key }}: "{{ $val }}" {{- end }} {{- if .Values.kip.annotations }} annotations: {{- range $key, $val := .Values.kip.annotations }} {{ $key }}: "{{ $val }}" {{- end }} {{- end }} {{- end }} ================================================ FILE: etc/kubernetes/helm/enterprise-gateway/templates/psp.yaml ================================================ {{- if and (.Values.kip.podSecurityPolicy.create) (.Values.global.rbac) }} apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: "kip-psp" {{- if .Values.kip.podSecurityPolicy.annotations }} annotations: {{- toYaml .Values.kip.podSecurityPolicy.annotations | nindent 4 }} {{- end }} spec: privileged: false # Required to prevent escalations to root. allowPrivilegeEscalation: false # This is redundant with non-root + disallow privilege escalation, # but we can provide it for defense in depth. requiredDropCapabilities: - ALL hostNetwork: false hostIPC: false hostPID: false runAsUser: # TODO: Require the container to run without root privileges. rule: 'RunAsAny' seLinux: # This policy assumes the nodes are using AppArmor rather than SELinux. rule: 'RunAsAny' supplementalGroups: rule: 'MustRunAs' ranges: # Forbid adding the root group. - min: 1 max: 65535 fsGroup: rule: 'MustRunAs' ranges: # Forbid adding the root group. - min: 1 max: 65535 readOnlyRootFilesystem: false allowedHostPaths: - pathPrefix: /var/run readOnly: true # only allow read-only mounts volumes: - '*' # - 'secret' # - 'hostPath' {{- end }} ================================================ FILE: etc/kubernetes/helm/enterprise-gateway/templates/service.yaml ================================================ {{- if and .Values.deployment.enabled }} apiVersion: v1 kind: Service metadata: labels: app: enterprise-gateway component: enterprise-gateway chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} {{- range $key, $val := .Values.global.commonLabels }} {{ $key }}: "{{ $val }}" {{- end }} name: enterprise-gateway namespace: {{ .Values.namespace | default .Release.Namespace }} spec: ports: {{- range $key, $val := .Values.service.ports }} - {{- range $pkey, $pval := $val }} {{ $pkey}}: {{ $pval }} {{- end }} {{- end }} selector: gateway-selector: enterprise-gateway sessionAffinity: ClientIP type: {{ .Values.service.type }} {{- if .Values.service.externalIPs.k8sMasterPublicIP }} externalIPs: - {{ .Values.service.externalIPs.k8sMasterPublicIP }} {{- end }} {{- end }} ================================================ FILE: etc/kubernetes/helm/enterprise-gateway/values.yaml ================================================ global: # Create RBAC resources rbac: true # ImagePullSecrets for a ServiceAccount, list of secrets in the same namespace # to use for pulling any images in pods that reference this ServiceAccount. # Must be set for any cluster configured with private docker registry. imagePullSecrets: [] # - private-registry-key commonLabels: {} # app.kubernetes.io/name: [your app name] # You can optionally create imagePull Secrets imagePullSecretsCreate: enabled: false annotations: {} # this annotatoin allows to keep secret even if helm release is deleted # "helm.sh/resource-policy": "keep" secrets: [] # - name: "private" # # base64 encoded value # --set imagePullSecretsCreate.secrets[0].data="UHJvZCBTZWNyZXQgSW5mb3JtYXRpb24K" # - name: "private2" # --set imagePullSecretsCreate.secrets[1].data="UHJvZCBTZWNyZXQgSW5mb3JtYXRpb24K" # Enterprise Gateway image name and tag to use. image: elyra/enterprise-gateway:dev # Enterprise Gateway image pull policy. imagePullPolicy: IfNotPresent # K8s Enterprise Gateway Service service: type: "NodePort" externalIPs: # Master public IP on which to expose EG. k8sMasterPublicIP: '' ports: # The primary port on which Enterprise Gateway is servicing requests. - name: "http" port: 8888 targetPort: 8888 # nodePort: 32652 # optional nodePort # The port on which Enterprise Gateway will receive kernel connection info responses. - name: "http-response" port: 8877 targetPort: 8877 # nodePort: 30481 # optional nodePort deployment: enabled: true serviceAccountName: 'enterprise-gateway-sa' terminationGracePeriodSeconds: 30 annotations: {} resources: {} # resources: # limits: # cpu: 2 # memory: 10Gi # requests: # cpu: 1 # memory: 2Gi # Update to deploy multiple replicas of EG. replicas: 1 tolerations: [] affinity: {} nodeSelector: {} extraEnv: { # SOME_ENV_VAR_WITH_SIMPLE_VALUE: "example" # SOME_ENV_VAR_WITH_LONG_VALUE: > # 'this example', # 'will not preserve', # 'line breaks', # SOME_ENV_VAR_WITH_MULTILINE_VALUE: | # this example # will preserve # line breaks EG_INHERITED_ENVS: "PATH" } # Log output level. logLevel: DEBUG # Whether to mirror working directories. mirrorWorkingDirs: false # Optional authorization token passed in all requests (see --EnterpriseGatewayApp.auth_token) authToken: kernel: # Kernel cluster role created by this chart. clusterRole: kernel-controller # Will start kernels in the same namespace as EG if True. shareGatewayNamespace: false # Timeout for kernel launching in seconds. launchTimeout: 60 # Timeout for an idle kernel before its culled in seconds. Default is 1 hour. cullIdleTimeout: 3600 # Whether to cull idle kernels with connecting clients cullConnected: false # List of kernel names that are available for use. To allow additional kernelspecs without # requiring redeployment (and assuming kernelspecs are mounted or otherwise accessible # outside the pod), comment out (or remove) the entries, leaving only `allowedKernels:`. allowedKernels: - r_kubernetes - python_kubernetes - python_tf_kubernetes - python_tf_gpu_kubernetes - scala_kubernetes - spark_r_kubernetes - spark_python_kubernetes - spark_scala_kubernetes - spark_python_operator # Default kernel name should be something from the allowedKernels defaultKernelName: python_kubernetes # Service account name to use for kernel pods when no service account is specified. # This service account should exist in the namespace where kernel pods are launched. defaultServiceAccountName: default kernelspecs: # Optional custom data image containing kernelspecs to use. image: # Kernelspecs image pull policy. imagePullPolicy: Always nfs: enabled: false # IP address of NFS server. Required if enabled. internalServerIPAddress: kernelspecsPvc: enabled: false # PVC name. Required if want mount kernelspecs without nfs. PVC should create in the same namespace before EG deployed. name: ingress: ingressClassName: # available since k8s 1.18. Depending on the ingress controller, you can use annotation as alternative enabled: false hostName: "" # Ingress resource host pathType: "Prefix" path: "/" annotations: {} # Optional TLS section # tls: # - secretName: "mysecret-name-tls" # hosts: # - myhost.example.com # Kernel Image Puller (daemonset) kip: enabled: true serviceAccountName: 'kernel-image-puller-sa' serviceName: enterprise-gateway server: # EG server properties. protocol: http port: 8888 validate_cert: false podSecurityPolicy: # Note: PodSecurityPolicy is deprecated as of 1.21 and removed in 1.25. # Operators deploying into k8s clusters >= 1.25 that require PSP equivalency will need to # look into alternatives like Gatekeeper (https://github.com/open-policy-agent/gatekeeper). # Creation of PSP in KIP is disabled by default. create: false annotations: {} # Kernel Image Puller image name and tag to use. image: elyra/kernel-image-puller:dev # Kernel Image Puller image pull policy. imagePullPolicy: IfNotPresent # Determines whether the Kernel Image Puller will pull kernel images it has previously pulled pullPolicy: IfNotPresent # The interval (in seconds) at which the Kernel Image Puller fetches kernelspecs to pull kernel images. interval: 300 # The container runtime interface socket, use /run/containerd/containerd.sock for containerd installations criSocket: /var/run/docker.sock # Prefix to use if a registry is not already specified on image name (e.g., quay.io/elyra/kernel-py:2.5.0) defaultContainerRegistry: docker.io fetcher: KernelSpecsFetcher annotations: {} tolerations: [] affinity: {} nodeSelector: {} resources: {} # resources: # limits: # cpu: 1 # memory: 1Gi # requests: # cpu: 1 # memory: 1Gi ================================================ FILE: pyproject.toml ================================================ [build-system] requires = ["hatchling>=1.21.1"] build-backend = "hatchling.build" [project] name = "jupyter_enterprise_gateway" version = "3.3.0.dev0" description = "A web server for spawning and communicating with remote Jupyter kernels" license = { file = "LICENSE.md" } keywords = ["Interactive","Interpreter","Kernel", "Web", "Cloud"] classifiers = [ "Intended Audience :: Developers", "Intended Audience :: Science/Research", "Intended Audience :: System Administrators", "License :: OSI Approved :: BSD License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11" ] requires-python = ">=3.10" dependencies = [ "docker>=3.5.0", "future", "jinja2>=3.1", "jupyter_client>=6.1.12,<7", # Remove cap once EG supports kernel provisioners "jupyter_core>=4.7.0", "kubernetes>=18.20.0", "jupyter_server>=1.7,<2.0", # Remove cap (increase floor) once EG suport kernel provisioners "paramiko>=2.11", "pexpect>=4.8.0", "pycryptodomex>=3.9.7", "pyzmq>=20.0,<25.0", # Pyzmq 25 removes deprecated code that jupyter_client 6 uses, remove if v6 gets updated "requests>=2.14.2", "tornado>=6.1", "traitlets>=5.3.0", "watchdog>=2.1.3", "yarn-api-client>=1.0" ] [[project.authors]] name = "Jupyter Development Team" email = "jupyter@googlegroups.com" [project.readme] text = "A lightweight, multi-tenant, scalable, and secure gateway that enables\nJupyter Notebooks to share resources across distributed clusters such as\nApache Spark, Kubernetes and others." content-type = "text/plain" [project.urls] Homepage = "http://github.com/jupyter/enterprise_gateway" [project.scripts] jupyter-enterprisegateway = "enterprise_gateway.enterprisegatewayapp:launch_instance" [project.optional-dependencies] test = [ "coverage", "pytest<8.1.0", "pytest-tornasync", "ipykernel", "pre-commit", "websocket-client" ] lint = [ "black[jupyter]==24.2.0", "mdformat>0.7", "mdformat-gfm>=0.3.5", "ruff==0.3.0" ] [tool.ruff.pylint] max-args = 10 max-statements = 60 [tool.hatch.build.targets.wheel] include = ["enterprise_gateway"] [tool.tbump.version] current = "3.3.0.dev0" regex = ''' (?P\d+)\.(?P\d+)\.(?P\d+) ((?Pa|b|rc|.dev)(?P\d+))? ''' [tool.tbump.git] message_template = "Bump to {new_version}" tag_template = "v{new_version}" [[tool.tbump.file]] src = "enterprise_gateway/_version.py" [[tool.tbump.file]] src = "pyproject.toml" [[tool.tbump.file]] src = "Makefile" [[tool.tbump.file]] src = "etc/kubernetes/helm/enterprise-gateway/Chart.yaml" search = 'appVersion: {current_version}' [[tool.tbump.field]] name = "channel" default = "" [[tool.tbump.field]] name = "release" default = "" [tool.pytest.ini_options] addopts = "-raXs --durations 10 --color=yes --doctest-modules" testpaths = [ "enterprise_gateway/tests/" ] filterwarnings = [ "error", "ignore:There is no current event loop:DeprecationWarning", "ignore:Passing unrecognized arguments to super:DeprecationWarning:jupyter_client", "ignore:Jupyter is migrating its paths to use standard platformdirs:DeprecationWarning", ] [tool.coverage.report] exclude_lines = [ "pragma: no cover", "def __repr__", "if self.debug:", "if settings.DEBUG", "raise AssertionError", "raise NotImplementedError", "if 0:", "if __name__ == .__main__.:", "class .*\bProtocol\\):", "@(abc\\.)?abstractmethod", ] [tool.black] line-length = 100 target-version = ["py37"] skip-string-normalization = true extend-exclude = "enterprise_gateway/.*ipynb" [tool.ruff] target-version = "py37" line-length = 100 select = [ "A", "B", "C", "DTZ", "E", "EM", "F", "FBT", "I", "ICN", "ISC", "N", "PLC", "PLE", "PLW", "Q", "RUF", "S", "SIM", "T", "TID", "UP", "W", "YTT", ] ignore = [ # Q000 Single quotes found but double quotes preferred "Q000", # FBT001 Boolean positional arg in function definition "FBT001", "FBT002", "FBT003", # E501 Line too long (158 > 100 characters) "E501", # SIM105 Use `contextlib.suppress(...)` "SIM105", # S507 Paramiko call with policy set to automatically trust the unknown host key "S507", # S311 Standard pseudo-random generators are not suitable for cryptographic purposes "S311", # S603 `subprocess` call: check for execution of untrusted input "S603", # TID252 Relative imports from parent modules are banned "TID252", # N806 Variable `sqlContext` in function should be lowercase "N806", # PLR2004 Magic value used in comparison "PLR2004", # PLW0603 Using the global statement to update is discouraged "PLW0603", # PLW1508 Invalid type for environment variable default "PLW1508", ] unfixable = [ # Don't touch print statements "T201", # Don't touch noqa lines "RUF100", ] [tool.ruff.per-file-ignores] # S101 Use of `assert` detected # F841 local variable 'foo' is assigned to but never used # S105 Possible hardcoded password: `"NeverHeardOf"` # T201 `print` found "enterprise_gateway/tests/*" = ["S101", "F841", "S105", "T201"] "enterprise_gateway/itests/*" = ["S101", "F841", "S105", "T201"] # T201 `print` found "etc/*" = ["T201"] # C901 Function is too complex "enterprise_gateway/client/gateway_client.py" = ["C901"] # `_read_responses` is too complex (12) "etc/docker/kernel-image-puller/kernel_image_puller.py" = ["C901"] # `fetch_image_names` is too complex (13) "enterprise_gateway/services/processproxies/k8s.py" = ["C901"] # `terminate_container_resources` is too complex (13) "etc/kernel-launchers/kubernetes/scripts/launch_kubernetes.py" = ["C901"] # `launch_kubernetes_kernel` is too complex (32) "etc/docker/kernel-image-puller/image_fetcher.py" = ["C901"] # `fetch_image_names` is too complex (11) [tool.interrogate] ignore-init-module=true ignore-private=true ignore-semiprivate=true ignore-property-decorators=true ignore-nested-functions=true ignore-nested-classes=true fail-under=100 exclude = ["docs", "*/tests", "*/itests","conftest"] ================================================ FILE: release.sh ================================================ #!/usr/bin/env bash # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. function exit_with_usage { cat << EOF release - Creates build distributions from a git commit hash or from HEAD. SYNOPSIS usage: release.sh [--release-prepare | --release-publish] DESCRIPTION Perform necessary tasks for a Jupyter Enterprise Gateway release release-prepare: This form creates a release tag and builds the release distribution artifacts. --release-prepare --currentVersion="2.0.0.dev0" --releaseVersion="2.0.0" --developmentVersion="2.1.0.dev1" --previousVersion="2.0.0rc2" [--tag="v2.0.0"] [--gitCommitHash="a874b73"] release-publish: Publish the release distribution artifacts to PyPI. --release-publish --tag="v2.0.0" OPTIONS --currentVersion - Current development version --releaseVersion - Release identifier used when publishing --developmentVersion - Release identifier used for next development cycle --previousVersion - Release identifier left in download links from previous release --tag - Release Tag identifier used when taging the release, default 'v$releaseVersion' --gitCommitHash - Release tag or commit to build from, default master HEAD --dryRun - Dry run only, mostly used for testing. EXAMPLES release.sh --release-prepare --currentVersion="2.0.0.dev0" --releaseVersion="2.0.0" --developmentVersion="2.1.0.dev1" --previousVersion="2.0.0rc2" release.sh --release-prepare --currentVersion="2.0.0.dev0" --releaseVersion="2.0.0" --developmentVersion="2.1.0.dev1" --previousVersion="2.0.0rc2" --tag="v2.0.0" --dryRun release.sh --release-publish --gitTag="v2.0.0" EOF exit 1 } set -e if [ $# -eq 0 ]; then exit_with_usage fi # Process each provided argument configuration while [ "${1+defined}" ]; do IFS="=" read -ra PARTS <<< "$1" case "${PARTS[0]}" in --release-prepare) GOAL="release-prepare" RELEASE_PREPARE=true shift ;; --release-publish) GOAL="release-publish" RELEASE_PUBLISH=true shift ;; --release-snapshot) GOAL="release-snapshot" RELEASE_SNAPSHOT=true shift ;; --gitCommitHash) GIT_REF="${PARTS[1]}" shift ;; --gitTag) GIT_TAG="${PARTS[1]}" shift ;; --currentVersion) CURRENT_VERSION="${PARTS[1]}" shift ;; --releaseVersion) RELEASE_VERSION="${PARTS[1]}" shift ;; --developmentVersion) DEVELOPMENT_VERSION="${PARTS[1]}" shift ;; --previousVersion) PREVIOUS_VERSION="${PARTS[1]}" shift ;; --tag) RELEASE_TAG="${PARTS[1]}" shift ;; --dryRun) DRY_RUN="-DdryRun=true" shift ;; *help* | -h) exit_with_usage exit 0 ;; -*) echo "Error: Unknown option: $1" >&2 exit 1 ;; *) # No more options break ;; esac done if [[ "$RELEASE_PREPARE" == "true" && -z "$RELEASE_VERSION" ]]; then echo "ERROR: --releaseVersion must be passed as an argument to run this script" exit_with_usage fi if [[ "$RELEASE_PREPARE" == "true" && -z "$DEVELOPMENT_VERSION" ]]; then echo "ERROR: --developmentVersion must be passed as an argument to run this script" exit_with_usage fi if [[ "$RELEASE_PREPARE" == "true" && -z "$PREVIOUS_VERSION" ]]; then echo "ERROR: --previousVersion must be passed as an argument to run this script" exit_with_usage fi if [[ "$RELEASE_PUBLISH" == "true" ]]; then if [[ "$GIT_REF" && "$GIT_TAG" ]]; then echo "ERROR: Only one argumented permitted when publishing : --gitCommitHash or --gitTag" exit_with_usage fi if [[ -z "$GIT_REF" && -z "$GIT_TAG" ]]; then echo "ERROR: --gitCommitHash OR --gitTag must be passed as an argument to run this script" exit_with_usage fi fi if [[ "$RELEASE_PUBLISH" == "true" && "$DRY_RUN" ]]; then echo "ERROR: --dryRun not supported for --release-publish" exit_with_usage fi # Commit ref to checkout when building GIT_REF=${GIT_REF:-HEAD} if [[ "$RELEASE_PUBLISH" == "true" && "$GIT_TAG" ]]; then GIT_REF="tags/$GIT_TAG" fi BASE_DIR=$(pwd) WORK_DIR=$(pwd)/build/release SOURCE_DIR=$(pwd)/build/release/enterprise_gateway if [ -z "$RELEASE_TAG" ]; then RELEASE_TAG="v$RELEASE_VERSION" fi echo " " echo "Base directory: $BASE_DIR" echo "Work directory: $WORK_DIR" echo "Source directory: $SOURCE_DIR" echo " " echo "-------------------------------------------------------------" echo "------- Release preparation with the following parameters ---" echo "-------------------------------------------------------------" echo "Executing ==> $GOAL" echo "Git reference ==> $GIT_REF" echo "Release version ==> $RELEASE_VERSION" echo "Development version ==> $DEVELOPMENT_VERSION" echo "Tag ==> $RELEASE_TAG" if [ "$DRY_RUN" ]; then echo "dry run ? ==> true" fi set -o xtrace function checkout_code { rm -rf $WORK_DIR mkdir -p $WORK_DIR cd $WORK_DIR # Checkout code git clone git@github.com:jupyter/enterprise_gateway.git cd enterprise_gateway git checkout $GIT_REF git_hash=`git rev-parse --short HEAD` echo "Checked out Jupyter Enterprise Gateway git hash $git_hash" } function update_version_to_release { cd $SOURCE_DIR # Update tbump-managed versions pip install tbump tbump --non-interactive --no-tag --no-push $RELEASE_VERSION # Update Kubernetes Helm chart and values files (tbump will handle appVersion in Chart.yaml) # We need to inject "-" prior to pre-release suffices for 'version:' since it follows strict semantic version rules. # For example 3.0.0rc1 -> 3.0.0-rc1 k8s_version=`echo $RELEASE_VERSION | sed 's/\([0-9]\)\([a-z]\)/\1-\2/'` sed -i .bak "s@version: [0-9,\.,a-z,-]*@version: $k8s_version@g" etc/kubernetes/helm/enterprise-gateway/Chart.yaml sed -i .bak "s@elyra/enterprise-gateway:dev@elyra/enterprise-gateway:$RELEASE_VERSION@g" etc/kubernetes/helm/enterprise-gateway/values.yaml sed -i .bak "s@elyra/kernel-image-puller:dev@elyra/kernel-image-puller:$RELEASE_VERSION@g" etc/kubernetes/helm/enterprise-gateway/values.yaml # Update Docker compose version sed -i .bak "s@elyra/enterprise-gateway:dev@elyra/enterprise-gateway:$RELEASE_VERSION@g" etc/docker/docker-compose.yml sed -i .bak "s@elyra/kernel-image-puller:dev@elyra/kernel-image-puller:$RELEASE_VERSION@g" etc/docker/docker-compose.yml # Update documentation - this is a one-way change since links will not be valid in dev "releases". find docs/source -name "*.md" -type f -exec sed -i .bak "s@$PREVIOUS_VERSION@$RELEASE_VERSION@g" {} \; } function update_version_to_development { cd $SOURCE_DIR # Update tbump-managed versions pip install tbump tbump --non-interactive --no-tag --no-push $DEVELOPMENT_VERSION # Update Kubernetes Helm chart and values files (tbump will handle appVersion in Chart.yaml) # We need to replace ".devN" suffix with "-devN for 'version:' since it follows strict semantic version rules. # For example 3.0.0.dev1 -> 3.0.0-dev1 k8s_version=`echo $DEVELOPMENT_VERSION | sed 's/\.\(dev\)/-\1/'` sed -i .bak "s@version: [0-9,\.,a-z,-]*@version: $k8s_version@g" etc/kubernetes/helm/enterprise-gateway/Chart.yaml sed -i .bak "s@elyra/enterprise-gateway:$RELEASE_VERSION@elyra/enterprise-gateway:dev@g" etc/kubernetes/helm/enterprise-gateway/values.yaml sed -i .bak "s@elyra/kernel-image-puller:$RELEASE_VERSION@elyra/kernel-image-puller:dev@g" etc/kubernetes/helm/enterprise-gateway/values.yaml # Update Docker compose version sed -i .bak "s@elyra/enterprise-gateway:$RELEASE_VERSION@elyra/enterprise-gateway:dev@g" etc/docker/docker-compose.yml sed -i .bak "s@elyra/kernel-image-puller:$RELEASE_VERSION@elyra/kernel-image-puller:dev@g" etc/docker/docker-compose.yml } if [[ "$RELEASE_PREPARE" == "true" ]]; then echo "Preparing release $RELEASE_VERSION ($RELEASE_VERSION)" # Checkout code checkout_code update_version_to_release cd $SOURCE_DIR if [ -z "$DRY_RUN" ]; then make MULTIARCH_BUILD=y clean dist release docs docker-images else make clean dist docs docker-images fi mkdir -p $WORK_DIR/$RELEASE_TAG cp $SOURCE_DIR/dist/jupyter_enterprise_gateway* $WORK_DIR/$RELEASE_TAG # Build and prepare the release git commit -a -m "Prepare release $RELEASE_VERSION" git tag $RELEASE_TAG update_version_to_development cd $SOURCE_DIR mv dist $WORK_DIR/$RELEASE_TAG mv build $WORK_DIR/$RELEASE_TAG make clean dist docs # Build next development iteraction git commit -a -m"Prepare for next development interaction $DEVELOPMENT_VERSION" if [ -z "$DRY_RUN" ]; then git push git push --tags fi cd "$BASE_DIR" #exit target exit 0 fi if [[ "$RELEASE_PUBLISH" == "true" ]]; then echo "Publishing release $RELEASE_VERSION" # Checkout code checkout_code cd $SOURCE_DIR git checkout $RELEASE_TAG git clean -d -f -x make clean dist docs cd "$BASE_DIR" #exit target exit 0 fi cd "$BASE_DIR" #return to base dir rm -rf target echo "ERROR: wrong execution goals" exit_with_usage ================================================ FILE: requirements.yml ================================================ channels: - conda-forge - defaults dependencies: - docker-py>=3.5.0 - future - jinja2>=3.1 - jupyter_client>=6.1,<7 # Remove cap once EG supports kernel provisioners - jupyter_core>=4.7.0 - jupyter_server>=1.7,<2 # Remove cap (increase floor) once EG suport kernel provisioners - paramiko>=2.1.2 - pexpect>=4.2.0 - pip - pre-commit - pycryptodomex>=3.9.7 - python-kubernetes>=18.20.0 - pyzmq>=20.0.0,<25 # Pyzmq 25 removes deprecated code that jupyter_client 6 uses, remove if v6 gets updated - requests>=2.14.2 - tornado>=6.1 - traitlets>=5.3.0 - watchdog>=2.1.3 - yarn-api-client>=1.0 # Test Requirements - coverage - ipykernel - mock - pytest<8.1.0 - pytest-tornasync - websocket-client # Code Style - flake8 - pip: - . - -r docs/doc-requirements.txt ================================================ FILE: website/.gitignore ================================================ _site/ .sass-cache/ ================================================ FILE: website/README.md ================================================ # Jupyter Enterprise Gateway website A Jekyll based website describing a general overview of the Jupyter Enterprise Gateway project ## Building the project jekyll serve --watch ================================================ FILE: website/_config.yml ================================================ # Site settings title: Jupyter Enterprise Gateway email: description: > # this means to ignore newlines until "baseurl:" baseurl: "/enterprise_gateway" # the subpath of your site, e.g. /blog/ url: "" # the base hostname & protocol for your site twitter_username: github_username: # Build settings markdown: kramdown ================================================ FILE: website/_data/navigation.yml ================================================ topnav: - title: Enterprise Gateway subcategories: - title: What is it? url: /enterprise_gateway/#about - title: Features url: /enterprise_gateway/#features - title: Supported Platforms url: /enterprise_gateway/#platforms - title: Contact url: /enterprise_gateway/#contact - title: Documentation url: https://jupyter-enterprise-gateway.readthedocs.io/en/latest/ - title: GitHub url: https://github.com/jupyter/enterprise_gateway - title: Privacy url: /enterprise_gateway/privacy-policy ================================================ FILE: website/_includes/call-to-action.html ================================================

Jupyter Enterprise Gateway


A lightweight, multi-tenant, scalable and secure gateway that enables Jupyter Notebooks to share resources across distributed clusters such as Apache Spark, Kubernetes and others.

Get Started!
================================================ FILE: website/_includes/contact.html ================================================

Let's Get In Touch!

Join the Jupyter Gateway community by interacting with us on the Jupyter mailing list or via the project Github.

================================================ FILE: website/_includes/features.html ================================================

Key capabilities offered by Jupyter Enterprise Gateway

Optimized and Distributed Resource Allocation

It enables Jupyter to utilize distributed cluster resources by running kernels as Apache Spark applications in YARN cluster mode or as independent pods in a Kubernetes cluster.

Enhanced Security

It provides end-to-end security such as: encrypted HTTP communication between Jupyter Notebook and Gateway and secured socket communications between Gateway and remote kernels.

Multiuser Support with User Impersonation

It enhances security and sandboxing of all kernels by enabling user impersonation leveraging Kerberos.

Open Source

All of these, available as free open source software, built by a community for the community.

================================================ FILE: website/_includes/head.html ================================================ Jupyter Enterprise Gateway ================================================ FILE: website/_includes/header.html ================================================

Jupyter
Enterprise Gateway


The Jupyter Enterprise Gateway project is dedicated to making Jupyter Notebook stack multi-tenant, scalable, secure and ready for Enterprise scenarios such as Big Data Analytics, Machine Learning and Deep Learning model development.

Find Out More
================================================ FILE: website/_includes/nav.html ================================================ ================================================ FILE: website/_includes/platforms.html ================================================

Supported Platforms

Apache Spark running on YARN cluster mode

Jupyter Enterprise Gateway enables Jupyter Notebook kernels to run as Apache Spark applications in YARN cluster mode, which enables the kernels to run on differente nodes of the cluster.

Learn more...

Kubernetes

Jupyter Enterprise Gateway enables Jupyter Notebook kernels to run as independent pods distributed in a Kubernetes cluster.

Learn more...

Docker Swarm

Jupyter Enterprise Gateway enables Jupyter Notebook kernels to run as independent containers distributed in a Docker Swarm cluster.

Dask

Jupyter Enterprise Gateway enables Jupyter Notebook kernels to be scaled using DASK in YARN cluster mode, which enables the kernels to run on differente nodes of the cluster.

IBM Spectrum Conductor

Jupyter Enterprise Gateway enables Jupyter Notebook kernels to run as Apache Spark applications in the IBM Spectrum Conductor platform.

================================================ FILE: website/_includes/scripts.html ================================================ ================================================ FILE: website/_layouts/home.html ================================================ {% include head.html %} {% include nav.html %} {% include header.html %} {% include call-to-action.html %} {% include features.html %} {% include platforms.html %} {% include contact.html %} {% include scripts.html %} ================================================ FILE: website/_layouts/page.html ================================================ {% include head.html %} {% include nav.html %}
{{ content }}
{% include scripts.html %} ================================================ FILE: website/_sass/_base.scss ================================================ html, body { height: 100%; width: 100%; } body { @include serif-font; } hr { border-color: $theme-primary; border-width: 3px; //max-width: 50px; } hr.light { border-color: white; } a { @include transition-all; color: $theme-primary; &:hover, &:focus { color: darken($theme-primary, 10%); } } h1, h2, h3, h4, h5, h6 { @include sans-serif-font; } p { font-size: 16px; line-height: 1.5; margin-bottom: 20px; } .bg-primary { background-color: $theme-primary; } .bg-dark { background-color: $theme-dark; color: white; } .text-faded { color: rgba(white, .7); } section { padding: 100px 0; } aside { padding: 50px 0; } .no-padding { padding: 0; } // Navigation .navbar-default { background-color: white; border-color: rgba($theme-dark, .5); @include sans-serif-font; @include transition-all; .navbar-header .navbar-brand { color: $theme-primary; @include sans-serif-font; font-weight: 700; text-transform: uppercase; &:hover, &:focus { color: darken($theme-primary, 10%); } } .nav { > li { > a, > a:focus { text-transform: uppercase; font-weight: 700; font-size: 13px; color: $theme-dark; &:hover { color: $theme-primary; } } &.active { > a, > a:focus { color: $theme-primary !important; background-color: transparent; &:hover { background-color: transparent; } } } } } @media (min-width: 768px) { background-color: transparent; border-color: rgba(white, .3); .navbar-header .navbar-brand { color: rgba(white, .7); &:hover, &:focus { color: white; } } .nav > li > a, .nav > li > a:focus { color: rgba(white, .7); &:hover { color: white; } } &.affix { background-color: white; border-color: rgba($theme-dark, .5); .navbar-header .navbar-brand { color: $theme-primary; font-size: 14px; &:hover, &:focus { color: darken($theme-primary, 10%); } } .nav > li > a, .nav > li > a:focus { color: $theme-dark; &:hover { color: $theme-primary; } } } } } // Homepage Header header { position: relative; width: 100%; min-height: auto; @include background-cover; background-position: center; background-image: url('../img/header.jpg'); text-align: center; color: white; .header-content { position: relative; text-align: center; padding: 100px 15px 100px; width: 100%; .header-content-inner { h1 { font-weight: 700; text-transform: uppercase; margin-top: 0; margin-bottom: 0; } hr { margin: 30px auto; } p { font-weight: 300; color: rgba(white, .7); font-size: 16px; margin-bottom: 50px; } } } @media (min-width: 768px) { min-height: 100%; .header-content { position: absolute; top: 50%; -webkit-transform: translateY(-50%); -ms-transform: translateY(-50%); transform: translateY(-50%); padding: 0 50px; .header-content-inner { max-width: 1000px; margin-left: auto; margin-right: auto; p { font-size: 18px; max-width: 80%; margin-left: auto; margin-right: auto; } } } } } // Sections .section-heading { margin-top: 0; } .service-box { max-width: 400px; margin: 50px auto 0; @media (min-width: 992px) { margin: 20px auto 0; } p { margin-bottom: 0; } h3 { height: 125px; padding: 25px; } } .top-buffer { margin-top:75px; } .portfolio-box { position: relative; display: block; max-width: 650px; margin: 0 auto; .portfolio-box-caption { color: white; opacity: 0; display: block; background: rgba( $theme-primary, .9 ); position: absolute; bottom: 0; text-align: center; width: 100%; height: 100%; @include transition-all; .portfolio-box-caption-content { width: 100%; text-align: center; position: absolute; top: 50%; transform: translateY(-50%); .project-category, .project-name { @include sans-serif-font; padding: 0 15px; } .project-category { text-transform: uppercase; font-weight: 600; font-size: 14px; } .project-name { font-size: 18px; } } } &:hover { .portfolio-box-caption { opacity: 1; } } @media (min-width: 768px) { .portfolio-box-caption { .portfolio-box-caption-content { .project-category { font-size: 16px; } .project-name { font-size: 22px; } } } } } .call-to-action { h2 { margin: 0 auto 20px; } } // Bootstrap Overrides .text-primary { color: $theme-primary; } .no-gutter > [class*='col-'] { padding-right:0; padding-left:0; } // Button Styles .btn-default { @include button-variant($theme-dark, white, white); } .btn-primary { @include button-variant(white, $theme-primary, $theme-primary); } .btn { @include sans-serif-font; border: none; border-radius: 300px; font-weight: 700; text-transform: uppercase; } .btn-xl { padding: 15px 30px; } // Contact #contact .fa { color: $theme-dark; font-size: 4em; } // Extras // -- Highlight Color Customization ::-moz-selection { color: white; text-shadow: none; background: $theme-dark; } ::selection { color: white; text-shadow: none; background: $theme-dark; } img::selection { color: white; background: transparent; } img::-moz-selection { color: white; background: transparent; } body { -webkit-tap-highlight-color: $theme-dark; } ================================================ FILE: website/_sass/_mixins.scss ================================================ @mixin transition-all() { -webkit-transition: all 0.35s; -moz-transition: all 0.35s; transition: all 0.35s; } @mixin background-cover() { -webkit-background-size: cover; -moz-background-size: cover; background-size: cover; -o-background-size: cover; } @mixin button-variant($color, $background, $border) { color: $color; background-color: $background; border-color: $border; @include transition-all; &:hover, &:focus, &.focus, &:active, &.active, .open > .dropdown-toggle & { color: $color; background-color: darken($background, 5%); border-color: darken($border, 7%); } &:active, &.active, .open > .dropdown-toggle & { background-image: none; } &.disabled, &[disabled], fieldset[disabled] & { &, &:hover, &:focus, &.focus, &:active, &.active { background-color: $background; border-color: $border; } } .badge { color: $background; background-color: $color; } } @mixin sans-serif-font() { font-family: 'Open Sans', 'Helvetica Neue', Arial, sans-serif; } @mixin serif-font() { font-family: 'Merriweather', 'Helvetica Neue', Arial, sans-serif; } ================================================ FILE: website/css/bootstrap.css ================================================ /*! * Bootstrap v3.3.2 (http://getbootstrap.com) * Copyright 2011-2015 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) */ /*! normalize.css v3.0.2 | MIT License | git.io/normalize */ html { font-family: sans-serif; -webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%; } body { margin: 0; } article, aside, details, figcaption, figure, footer, header, hgroup, main, menu, nav, section, summary { display: block; } audio, canvas, progress, video { display: inline-block; vertical-align: baseline; } audio:not([controls]) { display: none; height: 0; } [hidden], template { display: none; } a { background-color: transparent; } a:active, a:hover { outline: 0; } abbr[title] { border-bottom: 1px dotted; } b, strong { font-weight: bold; } dfn { font-style: italic; } h1 { margin: .67em 0; font-size: 2em; } mark { color: #000; background: #ff0; } small { font-size: 80%; } sub, sup { position: relative; font-size: 75%; line-height: 0; vertical-align: baseline; } sup { top: -.5em; } sub { bottom: -.25em; } img { border: 0; } svg:not(:root) { overflow: hidden; } figure { margin: 1em 40px; } hr { height: 0; -webkit-box-sizing: content-box; -moz-box-sizing: content-box; box-sizing: content-box; } pre { overflow: auto; } code, kbd, pre, samp { font-family: monospace, monospace; font-size: 1em; } button, input, optgroup, select, textarea { margin: 0; font: inherit; color: inherit; } button { overflow: visible; } button, select { text-transform: none; } button, html input[type="button"], input[type="reset"], input[type="submit"] { -webkit-appearance: button; cursor: pointer; } button[disabled], html input[disabled] { cursor: default; } button::-moz-focus-inner, input::-moz-focus-inner { padding: 0; border: 0; } input { line-height: normal; } input[type="checkbox"], input[type="radio"] { -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box; padding: 0; } input[type="number"]::-webkit-inner-spin-button, input[type="number"]::-webkit-outer-spin-button { height: auto; } input[type="search"] { -webkit-box-sizing: content-box; -moz-box-sizing: content-box; box-sizing: content-box; -webkit-appearance: textfield; } input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-decoration { -webkit-appearance: none; } fieldset { padding: .35em .625em .75em; margin: 0 2px; border: 1px solid #c0c0c0; } legend { padding: 0; border: 0; } textarea { overflow: auto; } optgroup { font-weight: bold; } table { border-spacing: 0; border-collapse: collapse; } td, th { padding: 0; } /*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */ @media print { *, *:before, *:after { color: #000 !important; text-shadow: none !important; background: transparent !important; -webkit-box-shadow: none !important; box-shadow: none !important; } a, a:visited { text-decoration: underline; } a[href]:after { content: " (" attr(href) ")"; } abbr[title]:after { content: " (" attr(title) ")"; } a[href^="#"]:after, a[href^="javascript:"]:after { content: ""; } pre, blockquote { border: 1px solid #999; page-break-inside: avoid; } thead { display: table-header-group; } tr, img { page-break-inside: avoid; } img { max-width: 100% !important; } p, h2, h3 { orphans: 3; widows: 3; } h2, h3 { page-break-after: avoid; } select { background: #fff !important; } .navbar { display: none; } .btn > .caret, .dropup > .btn > .caret { border-top-color: #000 !important; } .label { border: 1px solid #000; } .table { border-collapse: collapse !important; } .table td, .table th { background-color: #fff !important; } .table-bordered th, .table-bordered td { border: 1px solid #ddd !important; } } @font-face { font-family: 'Glyphicons Halflings'; src: url('../fonts/glyphicons-halflings-regular.eot'); src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg'); } .glyphicon { position: relative; top: 1px; display: inline-block; font-family: 'Glyphicons Halflings'; font-style: normal; font-weight: normal; line-height: 1; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; } .glyphicon-asterisk:before { content: "\2a"; } .glyphicon-plus:before { content: "\2b"; } .glyphicon-euro:before, .glyphicon-eur:before { content: "\20ac"; } .glyphicon-minus:before { content: "\2212"; } .glyphicon-cloud:before { content: "\2601"; } .glyphicon-envelope:before { content: "\2709"; } .glyphicon-pencil:before { content: "\270f"; } .glyphicon-glass:before { content: "\e001"; } .glyphicon-music:before { content: "\e002"; } .glyphicon-search:before { content: "\e003"; } .glyphicon-heart:before { content: "\e005"; } .glyphicon-star:before { content: "\e006"; } .glyphicon-star-empty:before { content: "\e007"; } .glyphicon-user:before { content: "\e008"; } .glyphicon-film:before { content: "\e009"; } .glyphicon-th-large:before { content: "\e010"; } .glyphicon-th:before { content: "\e011"; } .glyphicon-th-list:before { content: "\e012"; } .glyphicon-ok:before { content: "\e013"; } .glyphicon-remove:before { content: "\e014"; } .glyphicon-zoom-in:before { content: "\e015"; } .glyphicon-zoom-out:before { content: "\e016"; } .glyphicon-off:before { content: "\e017"; } .glyphicon-signal:before { content: "\e018"; } .glyphicon-cog:before { content: "\e019"; } .glyphicon-trash:before { content: "\e020"; } .glyphicon-home:before { content: "\e021"; } .glyphicon-file:before { content: "\e022"; } .glyphicon-time:before { content: "\e023"; } .glyphicon-road:before { content: "\e024"; } .glyphicon-download-alt:before { content: "\e025"; } .glyphicon-download:before { content: "\e026"; } .glyphicon-upload:before { content: "\e027"; } .glyphicon-inbox:before { content: "\e028"; } .glyphicon-play-circle:before { content: "\e029"; } .glyphicon-repeat:before { content: "\e030"; } .glyphicon-refresh:before { content: "\e031"; } .glyphicon-list-alt:before { content: "\e032"; } .glyphicon-lock:before { content: "\e033"; } .glyphicon-flag:before { content: "\e034"; } .glyphicon-headphones:before { content: "\e035"; } .glyphicon-volume-off:before { content: "\e036"; } .glyphicon-volume-down:before { content: "\e037"; } .glyphicon-volume-up:before { content: "\e038"; } .glyphicon-qrcode:before { content: "\e039"; } .glyphicon-barcode:before { content: "\e040"; } .glyphicon-tag:before { content: "\e041"; } .glyphicon-tags:before { content: "\e042"; } .glyphicon-book:before { content: "\e043"; } .glyphicon-bookmark:before { content: "\e044"; } .glyphicon-print:before { content: "\e045"; } .glyphicon-camera:before { content: "\e046"; } .glyphicon-font:before { content: "\e047"; } .glyphicon-bold:before { content: "\e048"; } .glyphicon-italic:before { content: "\e049"; } .glyphicon-text-height:before { content: "\e050"; } .glyphicon-text-width:before { content: "\e051"; } .glyphicon-align-left:before { content: "\e052"; } .glyphicon-align-center:before { content: "\e053"; } .glyphicon-align-right:before { content: "\e054"; } .glyphicon-align-justify:before { content: "\e055"; } .glyphicon-list:before { content: "\e056"; } .glyphicon-indent-left:before { content: "\e057"; } .glyphicon-indent-right:before { content: "\e058"; } .glyphicon-facetime-video:before { content: "\e059"; } .glyphicon-picture:before { content: "\e060"; } .glyphicon-map-marker:before { content: "\e062"; } .glyphicon-adjust:before { content: "\e063"; } .glyphicon-tint:before { content: "\e064"; } .glyphicon-edit:before { content: "\e065"; } .glyphicon-share:before { content: "\e066"; } .glyphicon-check:before { content: "\e067"; } .glyphicon-move:before { content: "\e068"; } .glyphicon-step-backward:before { content: "\e069"; } .glyphicon-fast-backward:before { content: "\e070"; } .glyphicon-backward:before { content: "\e071"; } .glyphicon-play:before { content: "\e072"; } .glyphicon-pause:before { content: "\e073"; } .glyphicon-stop:before { content: "\e074"; } .glyphicon-forward:before { content: "\e075"; } .glyphicon-fast-forward:before { content: "\e076"; } .glyphicon-step-forward:before { content: "\e077"; } .glyphicon-eject:before { content: "\e078"; } .glyphicon-chevron-left:before { content: "\e079"; } .glyphicon-chevron-right:before { content: "\e080"; } .glyphicon-plus-sign:before { content: "\e081"; } .glyphicon-minus-sign:before { content: "\e082"; } .glyphicon-remove-sign:before { content: "\e083"; } .glyphicon-ok-sign:before { content: "\e084"; } .glyphicon-question-sign:before { content: "\e085"; } .glyphicon-info-sign:before { content: "\e086"; } .glyphicon-screenshot:before { content: "\e087"; } .glyphicon-remove-circle:before { content: "\e088"; } .glyphicon-ok-circle:before { content: "\e089"; } .glyphicon-ban-circle:before { content: "\e090"; } .glyphicon-arrow-left:before { content: "\e091"; } .glyphicon-arrow-right:before { content: "\e092"; } .glyphicon-arrow-up:before { content: "\e093"; } .glyphicon-arrow-down:before { content: "\e094"; } .glyphicon-share-alt:before { content: "\e095"; } .glyphicon-resize-full:before { content: "\e096"; } .glyphicon-resize-small:before { content: "\e097"; } .glyphicon-exclamation-sign:before { content: "\e101"; } .glyphicon-gift:before { content: "\e102"; } .glyphicon-leaf:before { content: "\e103"; } .glyphicon-fire:before { content: "\e104"; } .glyphicon-eye-open:before { content: "\e105"; } .glyphicon-eye-close:before { content: "\e106"; } .glyphicon-warning-sign:before { content: "\e107"; } .glyphicon-plane:before { content: "\e108"; } .glyphicon-calendar:before { content: "\e109"; } .glyphicon-random:before { content: "\e110"; } .glyphicon-comment:before { content: "\e111"; } .glyphicon-magnet:before { content: "\e112"; } .glyphicon-chevron-up:before { content: "\e113"; } .glyphicon-chevron-down:before { content: "\e114"; } .glyphicon-retweet:before { content: "\e115"; } .glyphicon-shopping-cart:before { content: "\e116"; } .glyphicon-folder-close:before { content: "\e117"; } .glyphicon-folder-open:before { content: "\e118"; } .glyphicon-resize-vertical:before { content: "\e119"; } .glyphicon-resize-horizontal:before { content: "\e120"; } .glyphicon-hdd:before { content: "\e121"; } .glyphicon-bullhorn:before { content: "\e122"; } .glyphicon-bell:before { content: "\e123"; } .glyphicon-certificate:before { content: "\e124"; } .glyphicon-thumbs-up:before { content: "\e125"; } .glyphicon-thumbs-down:before { content: "\e126"; } .glyphicon-hand-right:before { content: "\e127"; } .glyphicon-hand-left:before { content: "\e128"; } .glyphicon-hand-up:before { content: "\e129"; } .glyphicon-hand-down:before { content: "\e130"; } .glyphicon-circle-arrow-right:before { content: "\e131"; } .glyphicon-circle-arrow-left:before { content: "\e132"; } .glyphicon-circle-arrow-up:before { content: "\e133"; } .glyphicon-circle-arrow-down:before { content: "\e134"; } .glyphicon-globe:before { content: "\e135"; } .glyphicon-wrench:before { content: "\e136"; } .glyphicon-tasks:before { content: "\e137"; } .glyphicon-filter:before { content: "\e138"; } .glyphicon-briefcase:before { content: "\e139"; } .glyphicon-fullscreen:before { content: "\e140"; } .glyphicon-dashboard:before { content: "\e141"; } .glyphicon-paperclip:before { content: "\e142"; } .glyphicon-heart-empty:before { content: "\e143"; } .glyphicon-link:before { content: "\e144"; } .glyphicon-phone:before { content: "\e145"; } .glyphicon-pushpin:before { content: "\e146"; } .glyphicon-usd:before { content: "\e148"; } .glyphicon-gbp:before { content: "\e149"; } .glyphicon-sort:before { content: "\e150"; } .glyphicon-sort-by-alphabet:before { content: "\e151"; } .glyphicon-sort-by-alphabet-alt:before { content: "\e152"; } .glyphicon-sort-by-order:before { content: "\e153"; } .glyphicon-sort-by-order-alt:before { content: "\e154"; } .glyphicon-sort-by-attributes:before { content: "\e155"; } .glyphicon-sort-by-attributes-alt:before { content: "\e156"; } .glyphicon-unchecked:before { content: "\e157"; } .glyphicon-expand:before { content: "\e158"; } .glyphicon-collapse-down:before { content: "\e159"; } .glyphicon-collapse-up:before { content: "\e160"; } .glyphicon-log-in:before { content: "\e161"; } .glyphicon-flash:before { content: "\e162"; } .glyphicon-log-out:before { content: "\e163"; } .glyphicon-new-window:before { content: "\e164"; } .glyphicon-record:before { content: "\e165"; } .glyphicon-save:before { content: "\e166"; } .glyphicon-open:before { content: "\e167"; } .glyphicon-saved:before { content: "\e168"; } .glyphicon-import:before { content: "\e169"; } .glyphicon-export:before { content: "\e170"; } .glyphicon-send:before { content: "\e171"; } .glyphicon-floppy-disk:before { content: "\e172"; } .glyphicon-floppy-saved:before { content: "\e173"; } .glyphicon-floppy-remove:before { content: "\e174"; } .glyphicon-floppy-save:before { content: "\e175"; } .glyphicon-floppy-open:before { content: "\e176"; } .glyphicon-credit-card:before { content: "\e177"; } .glyphicon-transfer:before { content: "\e178"; } .glyphicon-cutlery:before { content: "\e179"; } .glyphicon-header:before { content: "\e180"; } .glyphicon-compressed:before { content: "\e181"; } .glyphicon-earphone:before { content: "\e182"; } .glyphicon-phone-alt:before { content: "\e183"; } .glyphicon-tower:before { content: "\e184"; } .glyphicon-stats:before { content: "\e185"; } .glyphicon-sd-video:before { content: "\e186"; } .glyphicon-hd-video:before { content: "\e187"; } .glyphicon-subtitles:before { content: "\e188"; } .glyphicon-sound-stereo:before { content: "\e189"; } .glyphicon-sound-dolby:before { content: "\e190"; } .glyphicon-sound-5-1:before { content: "\e191"; } .glyphicon-sound-6-1:before { content: "\e192"; } .glyphicon-sound-7-1:before { content: "\e193"; } .glyphicon-copyright-mark:before { content: "\e194"; } .glyphicon-registration-mark:before { content: "\e195"; } .glyphicon-cloud-download:before { content: "\e197"; } .glyphicon-cloud-upload:before { content: "\e198"; } .glyphicon-tree-conifer:before { content: "\e199"; } .glyphicon-tree-deciduous:before { content: "\e200"; } .glyphicon-cd:before { content: "\e201"; } .glyphicon-save-file:before { content: "\e202"; } .glyphicon-open-file:before { content: "\e203"; } .glyphicon-level-up:before { content: "\e204"; } .glyphicon-copy:before { content: "\e205"; } .glyphicon-paste:before { content: "\e206"; } .glyphicon-alert:before { content: "\e209"; } .glyphicon-equalizer:before { content: "\e210"; } .glyphicon-king:before { content: "\e211"; } .glyphicon-queen:before { content: "\e212"; } .glyphicon-pawn:before { content: "\e213"; } .glyphicon-bishop:before { content: "\e214"; } .glyphicon-knight:before { content: "\e215"; } .glyphicon-baby-formula:before { content: "\e216"; } .glyphicon-tent:before { content: "\26fa"; } .glyphicon-blackboard:before { content: "\e218"; } .glyphicon-bed:before { content: "\e219"; } .glyphicon-apple:before { content: "\f8ff"; } .glyphicon-erase:before { content: "\e221"; } .glyphicon-hourglass:before { content: "\231b"; } .glyphicon-lamp:before { content: "\e223"; } .glyphicon-duplicate:before { content: "\e224"; } .glyphicon-piggy-bank:before { content: "\e225"; } .glyphicon-scissors:before { content: "\e226"; } .glyphicon-bitcoin:before { content: "\e227"; } .glyphicon-yen:before { content: "\00a5"; } .glyphicon-ruble:before { content: "\20bd"; } .glyphicon-scale:before { content: "\e230"; } .glyphicon-ice-lolly:before { content: "\e231"; } .glyphicon-ice-lolly-tasted:before { content: "\e232"; } .glyphicon-education:before { content: "\e233"; } .glyphicon-option-horizontal:before { content: "\e234"; } .glyphicon-option-vertical:before { content: "\e235"; } .glyphicon-menu-hamburger:before { content: "\e236"; } .glyphicon-modal-window:before { content: "\e237"; } .glyphicon-oil:before { content: "\e238"; } .glyphicon-grain:before { content: "\e239"; } .glyphicon-sunglasses:before { content: "\e240"; } .glyphicon-text-size:before { content: "\e241"; } .glyphicon-text-color:before { content: "\e242"; } .glyphicon-text-background:before { content: "\e243"; } .glyphicon-object-align-top:before { content: "\e244"; } .glyphicon-object-align-bottom:before { content: "\e245"; } .glyphicon-object-align-horizontal:before { content: "\e246"; } .glyphicon-object-align-left:before { content: "\e247"; } .glyphicon-object-align-vertical:before { content: "\e248"; } .glyphicon-object-align-right:before { content: "\e249"; } .glyphicon-triangle-right:before { content: "\e250"; } .glyphicon-triangle-left:before { content: "\e251"; } .glyphicon-triangle-bottom:before { content: "\e252"; } .glyphicon-triangle-top:before { content: "\e253"; } .glyphicon-console:before { content: "\e254"; } .glyphicon-superscript:before { content: "\e255"; } .glyphicon-subscript:before { content: "\e256"; } .glyphicon-menu-left:before { content: "\e257"; } .glyphicon-menu-right:before { content: "\e258"; } .glyphicon-menu-down:before { content: "\e259"; } .glyphicon-menu-up:before { content: "\e260"; } * { -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box; } *:before, *:after { -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box; } html { font-size: 10px; -webkit-tap-highlight-color: rgba(0, 0, 0, 0); } body { font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; font-size: 14px; line-height: 1.42857143; color: #333; background-color: #fff; } input, button, select, textarea { font-family: inherit; font-size: inherit; line-height: inherit; } a { color: #337ab7; text-decoration: none; } a:hover, a:focus { color: #23527c; text-decoration: underline; } a:focus { outline: thin dotted; outline: 5px auto -webkit-focus-ring-color; outline-offset: -2px; } figure { margin: 0; } img { vertical-align: middle; } .img-responsive, .thumbnail > img, .thumbnail a > img, .carousel-inner > .item > img, .carousel-inner > .item > a > img { display: block; max-width: 100%; height: auto; } .img-rounded { border-radius: 6px; } .img-thumbnail { display: inline-block; max-width: 100%; height: auto; padding: 4px; line-height: 1.42857143; background-color: #fff; border: 1px solid #ddd; border-radius: 4px; -webkit-transition: all .2s ease-in-out; -o-transition: all .2s ease-in-out; transition: all .2s ease-in-out; } .img-circle { border-radius: 50%; } hr { margin-top: 20px; margin-bottom: 20px; border: 0; border-top: 1px solid #eee; } .sr-only { position: absolute; width: 1px; height: 1px; padding: 0; margin: -1px; overflow: hidden; clip: rect(0, 0, 0, 0); border: 0; } .sr-only-focusable:active, .sr-only-focusable:focus { position: static; width: auto; height: auto; margin: 0; overflow: visible; clip: auto; } h1, h2, h3, h4, h5, h6, .h1, .h2, .h3, .h4, .h5, .h6 { font-family: inherit; font-weight: 500; line-height: 1.1; color: inherit; } h1 small, h2 small, h3 small, h4 small, h5 small, h6 small, .h1 small, .h2 small, .h3 small, .h4 small, .h5 small, .h6 small, h1 .small, h2 .small, h3 .small, h4 .small, h5 .small, h6 .small, .h1 .small, .h2 .small, .h3 .small, .h4 .small, .h5 .small, .h6 .small { font-weight: normal; line-height: 1; color: #777; } h1, .h1, h2, .h2, h3, .h3 { margin-top: 20px; margin-bottom: 10px; } h1 small, .h1 small, h2 small, .h2 small, h3 small, .h3 small, h1 .small, .h1 .small, h2 .small, .h2 .small, h3 .small, .h3 .small { font-size: 65%; } h4, .h4, h5, .h5, h6, .h6 { margin-top: 10px; margin-bottom: 10px; } h4 small, .h4 small, h5 small, .h5 small, h6 small, .h6 small, h4 .small, .h4 .small, h5 .small, .h5 .small, h6 .small, .h6 .small { font-size: 75%; } h1, .h1 { font-size: 36px; } h2, .h2 { font-size: 30px; } h3, .h3 { font-size: 24px; } h4, .h4 { font-size: 18px; } h5, .h5 { font-size: 14px; } h6, .h6 { font-size: 12px; } p { margin: 0 0 10px; } .lead { margin-bottom: 20px; font-size: 16px; font-weight: 300; line-height: 1.4; } @media (min-width: 768px) { .lead { font-size: 21px; } } small, .small { font-size: 85%; } mark, .mark { padding: .2em; background-color: #fcf8e3; } .text-left { text-align: left; } .text-right { text-align: right; } .text-center { text-align: center; } .text-justify { text-align: justify; } .text-nowrap { white-space: nowrap; } .text-lowercase { text-transform: lowercase; } .text-uppercase { text-transform: uppercase; } .text-capitalize { text-transform: capitalize; } .text-muted { color: #777; } .text-primary { color: #337ab7; } a.text-primary:hover { color: #286090; } .text-success { color: #3c763d; } a.text-success:hover { color: #2b542c; } .text-info { color: #31708f; } a.text-info:hover { color: #245269; } .text-warning { color: #8a6d3b; } a.text-warning:hover { color: #66512c; } .text-danger { color: #a94442; } a.text-danger:hover { color: #843534; } .bg-primary { color: #fff; background-color: #337ab7; } a.bg-primary:hover { background-color: #286090; } .bg-success { background-color: #dff0d8; } a.bg-success:hover { background-color: #c1e2b3; } .bg-info { background-color: #d9edf7; } a.bg-info:hover { background-color: #afd9ee; } .bg-warning { background-color: #fcf8e3; } a.bg-warning:hover { background-color: #f7ecb5; } .bg-danger { background-color: #f2dede; } a.bg-danger:hover { background-color: #e4b9b9; } .page-header { padding-bottom: 9px; margin: 40px 0 20px; border-bottom: 1px solid #eee; } ul, ol { margin-top: 0; margin-bottom: 10px; } ul ul, ol ul, ul ol, ol ol { margin-bottom: 0; } .list-unstyled { padding-left: 0; list-style: none; } .list-inline { padding-left: 0; margin-left: -5px; list-style: none; } .list-inline > li { display: inline-block; padding-right: 5px; padding-left: 5px; } dl { margin-top: 0; margin-bottom: 20px; } dt, dd { line-height: 1.42857143; } dt { font-weight: bold; } dd { margin-left: 0; } @media (min-width: 768px) { .dl-horizontal dt { float: left; width: 160px; overflow: hidden; clear: left; text-align: right; text-overflow: ellipsis; white-space: nowrap; } .dl-horizontal dd { margin-left: 180px; } } abbr[title], abbr[data-original-title] { cursor: help; border-bottom: 1px dotted #777; } .initialism { font-size: 90%; text-transform: uppercase; } blockquote { padding: 10px 20px; margin: 0 0 20px; font-size: 17.5px; border-left: 5px solid #eee; } blockquote p:last-child, blockquote ul:last-child, blockquote ol:last-child { margin-bottom: 0; } blockquote footer, blockquote small, blockquote .small { display: block; font-size: 80%; line-height: 1.42857143; color: #777; } blockquote footer:before, blockquote small:before, blockquote .small:before { content: '\2014 \00A0'; } .blockquote-reverse, blockquote.pull-right { padding-right: 15px; padding-left: 0; text-align: right; border-right: 5px solid #eee; border-left: 0; } .blockquote-reverse footer:before, blockquote.pull-right footer:before, .blockquote-reverse small:before, blockquote.pull-right small:before, .blockquote-reverse .small:before, blockquote.pull-right .small:before { content: ''; } .blockquote-reverse footer:after, blockquote.pull-right footer:after, .blockquote-reverse small:after, blockquote.pull-right small:after, .blockquote-reverse .small:after, blockquote.pull-right .small:after { content: '\00A0 \2014'; } address { margin-bottom: 20px; font-style: normal; line-height: 1.42857143; } code, kbd, pre, samp { font-family: Menlo, Monaco, Consolas, "Courier New", monospace; } code { padding: 2px 4px; font-size: 90%; color: #c7254e; background-color: #f9f2f4; border-radius: 4px; } kbd { padding: 2px 4px; font-size: 90%; color: #fff; background-color: #333; border-radius: 3px; -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25); box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25); } kbd kbd { padding: 0; font-size: 100%; font-weight: bold; -webkit-box-shadow: none; box-shadow: none; } pre { display: block; padding: 9.5px; margin: 0 0 10px; font-size: 13px; line-height: 1.42857143; color: #333; word-break: break-all; word-wrap: break-word; background-color: #f5f5f5; border: 1px solid #ccc; border-radius: 4px; } pre code { padding: 0; font-size: inherit; color: inherit; white-space: pre-wrap; background-color: transparent; border-radius: 0; } .pre-scrollable { max-height: 340px; overflow-y: scroll; } .container { padding-right: 15px; padding-left: 15px; margin-right: auto; margin-left: auto; } @media (min-width: 768px) { .container { width: 750px; } } @media (min-width: 992px) { .container { width: 970px; } } @media (min-width: 1200px) { .container { width: 1170px; } } .container-fluid { padding-right: 15px; padding-left: 15px; margin-right: auto; margin-left: auto; } .row { margin-right: -15px; margin-left: -15px; } .col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 { position: relative; min-height: 1px; padding-right: 15px; padding-left: 15px; } .col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 { float: left; } .col-xs-12 { width: 100%; } .col-xs-11 { width: 91.66666667%; } .col-xs-10 { width: 83.33333333%; } .col-xs-9 { width: 75%; } .col-xs-8 { width: 66.66666667%; } .col-xs-7 { width: 58.33333333%; } .col-xs-6 { width: 50%; } .col-xs-5 { width: 41.66666667%; } .col-xs-4 { width: 33.33333333%; } .col-xs-3 { width: 25%; } .col-xs-2 { width: 16.66666667%; } .col-xs-1 { width: 8.33333333%; } .col-xs-pull-12 { right: 100%; } .col-xs-pull-11 { right: 91.66666667%; } .col-xs-pull-10 { right: 83.33333333%; } .col-xs-pull-9 { right: 75%; } .col-xs-pull-8 { right: 66.66666667%; } .col-xs-pull-7 { right: 58.33333333%; } .col-xs-pull-6 { right: 50%; } .col-xs-pull-5 { right: 41.66666667%; } .col-xs-pull-4 { right: 33.33333333%; } .col-xs-pull-3 { right: 25%; } .col-xs-pull-2 { right: 16.66666667%; } .col-xs-pull-1 { right: 8.33333333%; } .col-xs-pull-0 { right: auto; } .col-xs-push-12 { left: 100%; } .col-xs-push-11 { left: 91.66666667%; } .col-xs-push-10 { left: 83.33333333%; } .col-xs-push-9 { left: 75%; } .col-xs-push-8 { left: 66.66666667%; } .col-xs-push-7 { left: 58.33333333%; } .col-xs-push-6 { left: 50%; } .col-xs-push-5 { left: 41.66666667%; } .col-xs-push-4 { left: 33.33333333%; } .col-xs-push-3 { left: 25%; } .col-xs-push-2 { left: 16.66666667%; } .col-xs-push-1 { left: 8.33333333%; } .col-xs-push-0 { left: auto; } .col-xs-offset-12 { margin-left: 100%; } .col-xs-offset-11 { margin-left: 91.66666667%; } .col-xs-offset-10 { margin-left: 83.33333333%; } .col-xs-offset-9 { margin-left: 75%; } .col-xs-offset-8 { margin-left: 66.66666667%; } .col-xs-offset-7 { margin-left: 58.33333333%; } .col-xs-offset-6 { margin-left: 50%; } .col-xs-offset-5 { margin-left: 41.66666667%; } .col-xs-offset-4 { margin-left: 33.33333333%; } .col-xs-offset-3 { margin-left: 25%; } .col-xs-offset-2 { margin-left: 16.66666667%; } .col-xs-offset-1 { margin-left: 8.33333333%; } .col-xs-offset-0 { margin-left: 0; } @media (min-width: 768px) { .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 { float: left; } .col-sm-12 { width: 100%; } .col-sm-11 { width: 91.66666667%; } .col-sm-10 { width: 83.33333333%; } .col-sm-9 { width: 75%; } .col-sm-8 { width: 66.66666667%; } .col-sm-7 { width: 58.33333333%; } .col-sm-6 { width: 50%; } .col-sm-5 { width: 41.66666667%; } .col-sm-4 { width: 33.33333333%; } .col-sm-3 { width: 25%; } .col-sm-2 { width: 16.66666667%; } .col-sm-1 { width: 8.33333333%; } .col-sm-pull-12 { right: 100%; } .col-sm-pull-11 { right: 91.66666667%; } .col-sm-pull-10 { right: 83.33333333%; } .col-sm-pull-9 { right: 75%; } .col-sm-pull-8 { right: 66.66666667%; } .col-sm-pull-7 { right: 58.33333333%; } .col-sm-pull-6 { right: 50%; } .col-sm-pull-5 { right: 41.66666667%; } .col-sm-pull-4 { right: 33.33333333%; } .col-sm-pull-3 { right: 25%; } .col-sm-pull-2 { right: 16.66666667%; } .col-sm-pull-1 { right: 8.33333333%; } .col-sm-pull-0 { right: auto; } .col-sm-push-12 { left: 100%; } .col-sm-push-11 { left: 91.66666667%; } .col-sm-push-10 { left: 83.33333333%; } .col-sm-push-9 { left: 75%; } .col-sm-push-8 { left: 66.66666667%; } .col-sm-push-7 { left: 58.33333333%; } .col-sm-push-6 { left: 50%; } .col-sm-push-5 { left: 41.66666667%; } .col-sm-push-4 { left: 33.33333333%; } .col-sm-push-3 { left: 25%; } .col-sm-push-2 { left: 16.66666667%; } .col-sm-push-1 { left: 8.33333333%; } .col-sm-push-0 { left: auto; } .col-sm-offset-12 { margin-left: 100%; } .col-sm-offset-11 { margin-left: 91.66666667%; } .col-sm-offset-10 { margin-left: 83.33333333%; } .col-sm-offset-9 { margin-left: 75%; } .col-sm-offset-8 { margin-left: 66.66666667%; } .col-sm-offset-7 { margin-left: 58.33333333%; } .col-sm-offset-6 { margin-left: 50%; } .col-sm-offset-5 { margin-left: 41.66666667%; } .col-sm-offset-4 { margin-left: 33.33333333%; } .col-sm-offset-3 { margin-left: 25%; } .col-sm-offset-2 { margin-left: 16.66666667%; } .col-sm-offset-1 { margin-left: 8.33333333%; } .col-sm-offset-0 { margin-left: 0; } } @media (min-width: 992px) { .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 { float: left; } .col-md-12 { width: 100%; } .col-md-11 { width: 91.66666667%; } .col-md-10 { width: 83.33333333%; } .col-md-9 { width: 75%; } .col-md-8 { width: 66.66666667%; } .col-md-7 { width: 58.33333333%; } .col-md-6 { width: 50%; } .col-md-5 { width: 41.66666667%; } .col-md-4 { width: 33.33333333%; } .col-md-3 { width: 25%; } .col-md-2 { width: 16.66666667%; } .col-md-1 { width: 8.33333333%; } .col-md-pull-12 { right: 100%; } .col-md-pull-11 { right: 91.66666667%; } .col-md-pull-10 { right: 83.33333333%; } .col-md-pull-9 { right: 75%; } .col-md-pull-8 { right: 66.66666667%; } .col-md-pull-7 { right: 58.33333333%; } .col-md-pull-6 { right: 50%; } .col-md-pull-5 { right: 41.66666667%; } .col-md-pull-4 { right: 33.33333333%; } .col-md-pull-3 { right: 25%; } .col-md-pull-2 { right: 16.66666667%; } .col-md-pull-1 { right: 8.33333333%; } .col-md-pull-0 { right: auto; } .col-md-push-12 { left: 100%; } .col-md-push-11 { left: 91.66666667%; } .col-md-push-10 { left: 83.33333333%; } .col-md-push-9 { left: 75%; } .col-md-push-8 { left: 66.66666667%; } .col-md-push-7 { left: 58.33333333%; } .col-md-push-6 { left: 50%; } .col-md-push-5 { left: 41.66666667%; } .col-md-push-4 { left: 33.33333333%; } .col-md-push-3 { left: 25%; } .col-md-push-2 { left: 16.66666667%; } .col-md-push-1 { left: 8.33333333%; } .col-md-push-0 { left: auto; } .col-md-offset-12 { margin-left: 100%; } .col-md-offset-11 { margin-left: 91.66666667%; } .col-md-offset-10 { margin-left: 83.33333333%; } .col-md-offset-9 { margin-left: 75%; } .col-md-offset-8 { margin-left: 66.66666667%; } .col-md-offset-7 { margin-left: 58.33333333%; } .col-md-offset-6 { margin-left: 50%; } .col-md-offset-5 { margin-left: 41.66666667%; } .col-md-offset-4 { margin-left: 33.33333333%; } .col-md-offset-3 { margin-left: 25%; } .col-md-offset-2 { margin-left: 16.66666667%; } .col-md-offset-1 { margin-left: 8.33333333%; } .col-md-offset-0 { margin-left: 0; } } @media (min-width: 1200px) { .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 { float: left; } .col-lg-12 { width: 100%; } .col-lg-11 { width: 91.66666667%; } .col-lg-10 { width: 83.33333333%; } .col-lg-9 { width: 75%; } .col-lg-8 { width: 66.66666667%; } .col-lg-7 { width: 58.33333333%; } .col-lg-6 { width: 50%; } .col-lg-5 { width: 41.66666667%; } .col-lg-4 { width: 33.33333333%; } .col-lg-3 { width: 25%; } .col-lg-2 { width: 16.66666667%; } .col-lg-1 { width: 8.33333333%; } .col-lg-pull-12 { right: 100%; } .col-lg-pull-11 { right: 91.66666667%; } .col-lg-pull-10 { right: 83.33333333%; } .col-lg-pull-9 { right: 75%; } .col-lg-pull-8 { right: 66.66666667%; } .col-lg-pull-7 { right: 58.33333333%; } .col-lg-pull-6 { right: 50%; } .col-lg-pull-5 { right: 41.66666667%; } .col-lg-pull-4 { right: 33.33333333%; } .col-lg-pull-3 { right: 25%; } .col-lg-pull-2 { right: 16.66666667%; } .col-lg-pull-1 { right: 8.33333333%; } .col-lg-pull-0 { right: auto; } .col-lg-push-12 { left: 100%; } .col-lg-push-11 { left: 91.66666667%; } .col-lg-push-10 { left: 83.33333333%; } .col-lg-push-9 { left: 75%; } .col-lg-push-8 { left: 66.66666667%; } .col-lg-push-7 { left: 58.33333333%; } .col-lg-push-6 { left: 50%; } .col-lg-push-5 { left: 41.66666667%; } .col-lg-push-4 { left: 33.33333333%; } .col-lg-push-3 { left: 25%; } .col-lg-push-2 { left: 16.66666667%; } .col-lg-push-1 { left: 8.33333333%; } .col-lg-push-0 { left: auto; } .col-lg-offset-12 { margin-left: 100%; } .col-lg-offset-11 { margin-left: 91.66666667%; } .col-lg-offset-10 { margin-left: 83.33333333%; } .col-lg-offset-9 { margin-left: 75%; } .col-lg-offset-8 { margin-left: 66.66666667%; } .col-lg-offset-7 { margin-left: 58.33333333%; } .col-lg-offset-6 { margin-left: 50%; } .col-lg-offset-5 { margin-left: 41.66666667%; } .col-lg-offset-4 { margin-left: 33.33333333%; } .col-lg-offset-3 { margin-left: 25%; } .col-lg-offset-2 { margin-left: 16.66666667%; } .col-lg-offset-1 { margin-left: 8.33333333%; } .col-lg-offset-0 { margin-left: 0; } } table { background-color: transparent; } caption { padding-top: 8px; padding-bottom: 8px; color: #777; text-align: left; } th { text-align: left; } .table { width: 100%; max-width: 100%; margin-bottom: 20px; } .table > thead > tr > th, .table > tbody > tr > th, .table > tfoot > tr > th, .table > thead > tr > td, .table > tbody > tr > td, .table > tfoot > tr > td { padding: 8px; line-height: 1.42857143; vertical-align: top; border-top: 1px solid #ddd; } .table > thead > tr > th { vertical-align: bottom; border-bottom: 2px solid #ddd; } .table > caption + thead > tr:first-child > th, .table > colgroup + thead > tr:first-child > th, .table > thead:first-child > tr:first-child > th, .table > caption + thead > tr:first-child > td, .table > colgroup + thead > tr:first-child > td, .table > thead:first-child > tr:first-child > td { border-top: 0; } .table > tbody + tbody { border-top: 2px solid #ddd; } .table .table { background-color: #fff; } .table-condensed > thead > tr > th, .table-condensed > tbody > tr > th, .table-condensed > tfoot > tr > th, .table-condensed > thead > tr > td, .table-condensed > tbody > tr > td, .table-condensed > tfoot > tr > td { padding: 5px; } .table-bordered { border: 1px solid #ddd; } .table-bordered > thead > tr > th, .table-bordered > tbody > tr > th, .table-bordered > tfoot > tr > th, .table-bordered > thead > tr > td, .table-bordered > tbody > tr > td, .table-bordered > tfoot > tr > td { border: 1px solid #ddd; } .table-bordered > thead > tr > th, .table-bordered > thead > tr > td { border-bottom-width: 2px; } .table-striped > tbody > tr:nth-of-type(odd) { background-color: #f9f9f9; } .table-hover > tbody > tr:hover { background-color: #f5f5f5; } table col[class*="col-"] { position: static; display: table-column; float: none; } table td[class*="col-"], table th[class*="col-"] { position: static; display: table-cell; float: none; } .table > thead > tr > td.active, .table > tbody > tr > td.active, .table > tfoot > tr > td.active, .table > thead > tr > th.active, .table > tbody > tr > th.active, .table > tfoot > tr > th.active, .table > thead > tr.active > td, .table > tbody > tr.active > td, .table > tfoot > tr.active > td, .table > thead > tr.active > th, .table > tbody > tr.active > th, .table > tfoot > tr.active > th { background-color: #f5f5f5; } .table-hover > tbody > tr > td.active:hover, .table-hover > tbody > tr > th.active:hover, .table-hover > tbody > tr.active:hover > td, .table-hover > tbody > tr:hover > .active, .table-hover > tbody > tr.active:hover > th { background-color: #e8e8e8; } .table > thead > tr > td.success, .table > tbody > tr > td.success, .table > tfoot > tr > td.success, .table > thead > tr > th.success, .table > tbody > tr > th.success, .table > tfoot > tr > th.success, .table > thead > tr.success > td, .table > tbody > tr.success > td, .table > tfoot > tr.success > td, .table > thead > tr.success > th, .table > tbody > tr.success > th, .table > tfoot > tr.success > th { background-color: #dff0d8; } .table-hover > tbody > tr > td.success:hover, .table-hover > tbody > tr > th.success:hover, .table-hover > tbody > tr.success:hover > td, .table-hover > tbody > tr:hover > .success, .table-hover > tbody > tr.success:hover > th { background-color: #d0e9c6; } .table > thead > tr > td.info, .table > tbody > tr > td.info, .table > tfoot > tr > td.info, .table > thead > tr > th.info, .table > tbody > tr > th.info, .table > tfoot > tr > th.info, .table > thead > tr.info > td, .table > tbody > tr.info > td, .table > tfoot > tr.info > td, .table > thead > tr.info > th, .table > tbody > tr.info > th, .table > tfoot > tr.info > th { background-color: #d9edf7; } .table-hover > tbody > tr > td.info:hover, .table-hover > tbody > tr > th.info:hover, .table-hover > tbody > tr.info:hover > td, .table-hover > tbody > tr:hover > .info, .table-hover > tbody > tr.info:hover > th { background-color: #c4e3f3; } .table > thead > tr > td.warning, .table > tbody > tr > td.warning, .table > tfoot > tr > td.warning, .table > thead > tr > th.warning, .table > tbody > tr > th.warning, .table > tfoot > tr > th.warning, .table > thead > tr.warning > td, .table > tbody > tr.warning > td, .table > tfoot > tr.warning > td, .table > thead > tr.warning > th, .table > tbody > tr.warning > th, .table > tfoot > tr.warning > th { background-color: #fcf8e3; } .table-hover > tbody > tr > td.warning:hover, .table-hover > tbody > tr > th.warning:hover, .table-hover > tbody > tr.warning:hover > td, .table-hover > tbody > tr:hover > .warning, .table-hover > tbody > tr.warning:hover > th { background-color: #faf2cc; } .table > thead > tr > td.danger, .table > tbody > tr > td.danger, .table > tfoot > tr > td.danger, .table > thead > tr > th.danger, .table > tbody > tr > th.danger, .table > tfoot > tr > th.danger, .table > thead > tr.danger > td, .table > tbody > tr.danger > td, .table > tfoot > tr.danger > td, .table > thead > tr.danger > th, .table > tbody > tr.danger > th, .table > tfoot > tr.danger > th { background-color: #f2dede; } .table-hover > tbody > tr > td.danger:hover, .table-hover > tbody > tr > th.danger:hover, .table-hover > tbody > tr.danger:hover > td, .table-hover > tbody > tr:hover > .danger, .table-hover > tbody > tr.danger:hover > th { background-color: #ebcccc; } .table-responsive { min-height: .01%; overflow-x: auto; } @media screen and (max-width: 767px) { .table-responsive { width: 100%; margin-bottom: 15px; overflow-y: hidden; -ms-overflow-style: -ms-autohiding-scrollbar; border: 1px solid #ddd; } .table-responsive > .table { margin-bottom: 0; } .table-responsive > .table > thead > tr > th, .table-responsive > .table > tbody > tr > th, .table-responsive > .table > tfoot > tr > th, .table-responsive > .table > thead > tr > td, .table-responsive > .table > tbody > tr > td, .table-responsive > .table > tfoot > tr > td { white-space: nowrap; } .table-responsive > .table-bordered { border: 0; } .table-responsive > .table-bordered > thead > tr > th:first-child, .table-responsive > .table-bordered > tbody > tr > th:first-child, .table-responsive > .table-bordered > tfoot > tr > th:first-child, .table-responsive > .table-bordered > thead > tr > td:first-child, .table-responsive > .table-bordered > tbody > tr > td:first-child, .table-responsive > .table-bordered > tfoot > tr > td:first-child { border-left: 0; } .table-responsive > .table-bordered > thead > tr > th:last-child, .table-responsive > .table-bordered > tbody > tr > th:last-child, .table-responsive > .table-bordered > tfoot > tr > th:last-child, .table-responsive > .table-bordered > thead > tr > td:last-child, .table-responsive > .table-bordered > tbody > tr > td:last-child, .table-responsive > .table-bordered > tfoot > tr > td:last-child { border-right: 0; } .table-responsive > .table-bordered > tbody > tr:last-child > th, .table-responsive > .table-bordered > tfoot > tr:last-child > th, .table-responsive > .table-bordered > tbody > tr:last-child > td, .table-responsive > .table-bordered > tfoot > tr:last-child > td { border-bottom: 0; } } fieldset { min-width: 0; padding: 0; margin: 0; border: 0; } legend { display: block; width: 100%; padding: 0; margin-bottom: 20px; font-size: 21px; line-height: inherit; color: #333; border: 0; border-bottom: 1px solid #e5e5e5; } label { display: inline-block; max-width: 100%; margin-bottom: 5px; font-weight: bold; } input[type="search"] { -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box; } input[type="radio"], input[type="checkbox"] { margin: 4px 0 0; margin-top: 1px \9; line-height: normal; } input[type="file"] { display: block; } input[type="range"] { display: block; width: 100%; } select[multiple], select[size] { height: auto; } input[type="file"]:focus, input[type="radio"]:focus, input[type="checkbox"]:focus { outline: thin dotted; outline: 5px auto -webkit-focus-ring-color; outline-offset: -2px; } output { display: block; padding-top: 7px; font-size: 14px; line-height: 1.42857143; color: #555; } .form-control { display: block; width: 100%; height: 34px; padding: 6px 12px; font-size: 14px; line-height: 1.42857143; color: #555; background-color: #fff; background-image: none; border: 1px solid #ccc; border-radius: 4px; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); -webkit-transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s; -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s; transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s; } .form-control:focus { border-color: #66afe9; outline: 0; -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6); box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6); } .form-control::-moz-placeholder { color: #999; opacity: 1; } .form-control:-ms-input-placeholder { color: #999; } .form-control::-webkit-input-placeholder { color: #999; } .form-control[disabled], .form-control[readonly], fieldset[disabled] .form-control { cursor: not-allowed; background-color: #eee; opacity: 1; } textarea.form-control { height: auto; } input[type="search"] { -webkit-appearance: none; } @media screen and (-webkit-min-device-pixel-ratio: 0) { input[type="date"], input[type="time"], input[type="datetime-local"], input[type="month"] { line-height: 34px; } input[type="date"].input-sm, input[type="time"].input-sm, input[type="datetime-local"].input-sm, input[type="month"].input-sm, .input-group-sm input[type="date"], .input-group-sm input[type="time"], .input-group-sm input[type="datetime-local"], .input-group-sm input[type="month"] { line-height: 30px; } input[type="date"].input-lg, input[type="time"].input-lg, input[type="datetime-local"].input-lg, input[type="month"].input-lg, .input-group-lg input[type="date"], .input-group-lg input[type="time"], .input-group-lg input[type="datetime-local"], .input-group-lg input[type="month"] { line-height: 46px; } } .form-group { margin-bottom: 15px; } .radio, .checkbox { position: relative; display: block; margin-top: 10px; margin-bottom: 10px; } .radio label, .checkbox label { min-height: 20px; padding-left: 20px; margin-bottom: 0; font-weight: normal; cursor: pointer; } .radio input[type="radio"], .radio-inline input[type="radio"], .checkbox input[type="checkbox"], .checkbox-inline input[type="checkbox"] { position: absolute; margin-top: 4px \9; margin-left: -20px; } .radio + .radio, .checkbox + .checkbox { margin-top: -5px; } .radio-inline, .checkbox-inline { display: inline-block; padding-left: 20px; margin-bottom: 0; font-weight: normal; vertical-align: middle; cursor: pointer; } .radio-inline + .radio-inline, .checkbox-inline + .checkbox-inline { margin-top: 0; margin-left: 10px; } input[type="radio"][disabled], input[type="checkbox"][disabled], input[type="radio"].disabled, input[type="checkbox"].disabled, fieldset[disabled] input[type="radio"], fieldset[disabled] input[type="checkbox"] { cursor: not-allowed; } .radio-inline.disabled, .checkbox-inline.disabled, fieldset[disabled] .radio-inline, fieldset[disabled] .checkbox-inline { cursor: not-allowed; } .radio.disabled label, .checkbox.disabled label, fieldset[disabled] .radio label, fieldset[disabled] .checkbox label { cursor: not-allowed; } .form-control-static { padding-top: 7px; padding-bottom: 7px; margin-bottom: 0; } .form-control-static.input-lg, .form-control-static.input-sm { padding-right: 0; padding-left: 0; } .input-sm { height: 30px; padding: 5px 10px; font-size: 12px; line-height: 1.5; border-radius: 3px; } select.input-sm { height: 30px; line-height: 30px; } textarea.input-sm, select[multiple].input-sm { height: auto; } .form-group-sm .form-control { height: 30px; padding: 5px 10px; font-size: 12px; line-height: 1.5; border-radius: 3px; } select.form-group-sm .form-control { height: 30px; line-height: 30px; } textarea.form-group-sm .form-control, select[multiple].form-group-sm .form-control { height: auto; } .form-group-sm .form-control-static { height: 30px; padding: 5px 10px; font-size: 12px; line-height: 1.5; } .input-lg { height: 46px; padding: 10px 16px; font-size: 18px; line-height: 1.3333333; border-radius: 6px; } select.input-lg { height: 46px; line-height: 46px; } textarea.input-lg, select[multiple].input-lg { height: auto; } .form-group-lg .form-control { height: 46px; padding: 10px 16px; font-size: 18px; line-height: 1.3333333; border-radius: 6px; } select.form-group-lg .form-control { height: 46px; line-height: 46px; } textarea.form-group-lg .form-control, select[multiple].form-group-lg .form-control { height: auto; } .form-group-lg .form-control-static { height: 46px; padding: 10px 16px; font-size: 18px; line-height: 1.3333333; } .has-feedback { position: relative; } .has-feedback .form-control { padding-right: 42.5px; } .form-control-feedback { position: absolute; top: 0; right: 0; z-index: 2; display: block; width: 34px; height: 34px; line-height: 34px; text-align: center; pointer-events: none; } .input-lg + .form-control-feedback { width: 46px; height: 46px; line-height: 46px; } .input-sm + .form-control-feedback { width: 30px; height: 30px; line-height: 30px; } .has-success .help-block, .has-success .control-label, .has-success .radio, .has-success .checkbox, .has-success .radio-inline, .has-success .checkbox-inline, .has-success.radio label, .has-success.checkbox label, .has-success.radio-inline label, .has-success.checkbox-inline label { color: #3c763d; } .has-success .form-control { border-color: #3c763d; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); } .has-success .form-control:focus { border-color: #2b542c; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168; box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168; } .has-success .input-group-addon { color: #3c763d; background-color: #dff0d8; border-color: #3c763d; } .has-success .form-control-feedback { color: #3c763d; } .has-warning .help-block, .has-warning .control-label, .has-warning .radio, .has-warning .checkbox, .has-warning .radio-inline, .has-warning .checkbox-inline, .has-warning.radio label, .has-warning.checkbox label, .has-warning.radio-inline label, .has-warning.checkbox-inline label { color: #8a6d3b; } .has-warning .form-control { border-color: #8a6d3b; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); } .has-warning .form-control:focus { border-color: #66512c; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b; box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b; } .has-warning .input-group-addon { color: #8a6d3b; background-color: #fcf8e3; border-color: #8a6d3b; } .has-warning .form-control-feedback { color: #8a6d3b; } .has-error .help-block, .has-error .control-label, .has-error .radio, .has-error .checkbox, .has-error .radio-inline, .has-error .checkbox-inline, .has-error.radio label, .has-error.checkbox label, .has-error.radio-inline label, .has-error.checkbox-inline label { color: #a94442; } .has-error .form-control { border-color: #a94442; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); } .has-error .form-control:focus { border-color: #843534; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483; box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483; } .has-error .input-group-addon { color: #a94442; background-color: #f2dede; border-color: #a94442; } .has-error .form-control-feedback { color: #a94442; } .has-feedback label ~ .form-control-feedback { top: 25px; } .has-feedback label.sr-only ~ .form-control-feedback { top: 0; } .help-block { display: block; margin-top: 5px; margin-bottom: 10px; color: #737373; } @media (min-width: 768px) { .form-inline .form-group { display: inline-block; margin-bottom: 0; vertical-align: middle; } .form-inline .form-control { display: inline-block; width: auto; vertical-align: middle; } .form-inline .form-control-static { display: inline-block; } .form-inline .input-group { display: inline-table; vertical-align: middle; } .form-inline .input-group .input-group-addon, .form-inline .input-group .input-group-btn, .form-inline .input-group .form-control { width: auto; } .form-inline .input-group > .form-control { width: 100%; } .form-inline .control-label { margin-bottom: 0; vertical-align: middle; } .form-inline .radio, .form-inline .checkbox { display: inline-block; margin-top: 0; margin-bottom: 0; vertical-align: middle; } .form-inline .radio label, .form-inline .checkbox label { padding-left: 0; } .form-inline .radio input[type="radio"], .form-inline .checkbox input[type="checkbox"] { position: relative; margin-left: 0; } .form-inline .has-feedback .form-control-feedback { top: 0; } } .form-horizontal .radio, .form-horizontal .checkbox, .form-horizontal .radio-inline, .form-horizontal .checkbox-inline { padding-top: 7px; margin-top: 0; margin-bottom: 0; } .form-horizontal .radio, .form-horizontal .checkbox { min-height: 27px; } .form-horizontal .form-group { margin-right: -15px; margin-left: -15px; } @media (min-width: 768px) { .form-horizontal .control-label { padding-top: 7px; margin-bottom: 0; text-align: right; } } .form-horizontal .has-feedback .form-control-feedback { right: 15px; } @media (min-width: 768px) { .form-horizontal .form-group-lg .control-label { padding-top: 14.333333px; } } @media (min-width: 768px) { .form-horizontal .form-group-sm .control-label { padding-top: 6px; } } .btn { display: inline-block; padding: 6px 12px; margin-bottom: 0; font-size: 14px; font-weight: normal; line-height: 1.42857143; text-align: center; white-space: nowrap; vertical-align: middle; -ms-touch-action: manipulation; touch-action: manipulation; cursor: pointer; -webkit-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; background-image: none; border: 1px solid transparent; border-radius: 4px; } .btn:focus, .btn:active:focus, .btn.active:focus, .btn.focus, .btn:active.focus, .btn.active.focus { outline: thin dotted; outline: 5px auto -webkit-focus-ring-color; outline-offset: -2px; } .btn:hover, .btn:focus, .btn.focus { color: #333; text-decoration: none; } .btn:active, .btn.active { background-image: none; outline: 0; -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); } .btn.disabled, .btn[disabled], fieldset[disabled] .btn { pointer-events: none; cursor: not-allowed; filter: alpha(opacity=65); -webkit-box-shadow: none; box-shadow: none; opacity: .65; } .btn-default { color: #333; background-color: #fff; border-color: #ccc; } .btn-default:hover, .btn-default:focus, .btn-default.focus, .btn-default:active, .btn-default.active, .open > .dropdown-toggle.btn-default { color: #333; background-color: #e6e6e6; border-color: #adadad; } .btn-default:active, .btn-default.active, .open > .dropdown-toggle.btn-default { background-image: none; } .btn-default.disabled, .btn-default[disabled], fieldset[disabled] .btn-default, .btn-default.disabled:hover, .btn-default[disabled]:hover, fieldset[disabled] .btn-default:hover, .btn-default.disabled:focus, .btn-default[disabled]:focus, fieldset[disabled] .btn-default:focus, .btn-default.disabled.focus, .btn-default[disabled].focus, fieldset[disabled] .btn-default.focus, .btn-default.disabled:active, .btn-default[disabled]:active, fieldset[disabled] .btn-default:active, .btn-default.disabled.active, .btn-default[disabled].active, fieldset[disabled] .btn-default.active { background-color: #fff; border-color: #ccc; } .btn-default .badge { color: #fff; background-color: #333; } .btn-primary { color: #fff; background-color: #337ab7; border-color: #2e6da4; } .btn-primary:hover, .btn-primary:focus, .btn-primary.focus, .btn-primary:active, .btn-primary.active, .open > .dropdown-toggle.btn-primary { color: #fff; background-color: #286090; border-color: #204d74; } .btn-primary:active, .btn-primary.active, .open > .dropdown-toggle.btn-primary { background-image: none; } .btn-primary.disabled, .btn-primary[disabled], fieldset[disabled] .btn-primary, .btn-primary.disabled:hover, .btn-primary[disabled]:hover, fieldset[disabled] .btn-primary:hover, .btn-primary.disabled:focus, .btn-primary[disabled]:focus, fieldset[disabled] .btn-primary:focus, .btn-primary.disabled.focus, .btn-primary[disabled].focus, fieldset[disabled] .btn-primary.focus, .btn-primary.disabled:active, .btn-primary[disabled]:active, fieldset[disabled] .btn-primary:active, .btn-primary.disabled.active, .btn-primary[disabled].active, fieldset[disabled] .btn-primary.active { background-color: #337ab7; border-color: #2e6da4; } .btn-primary .badge { color: #337ab7; background-color: #fff; } .btn-success { color: #fff; background-color: #5cb85c; border-color: #4cae4c; } .btn-success:hover, .btn-success:focus, .btn-success.focus, .btn-success:active, .btn-success.active, .open > .dropdown-toggle.btn-success { color: #fff; background-color: #449d44; border-color: #398439; } .btn-success:active, .btn-success.active, .open > .dropdown-toggle.btn-success { background-image: none; } .btn-success.disabled, .btn-success[disabled], fieldset[disabled] .btn-success, .btn-success.disabled:hover, .btn-success[disabled]:hover, fieldset[disabled] .btn-success:hover, .btn-success.disabled:focus, .btn-success[disabled]:focus, fieldset[disabled] .btn-success:focus, .btn-success.disabled.focus, .btn-success[disabled].focus, fieldset[disabled] .btn-success.focus, .btn-success.disabled:active, .btn-success[disabled]:active, fieldset[disabled] .btn-success:active, .btn-success.disabled.active, .btn-success[disabled].active, fieldset[disabled] .btn-success.active { background-color: #5cb85c; border-color: #4cae4c; } .btn-success .badge { color: #5cb85c; background-color: #fff; } .btn-info { color: #fff; background-color: #5bc0de; border-color: #46b8da; } .btn-info:hover, .btn-info:focus, .btn-info.focus, .btn-info:active, .btn-info.active, .open > .dropdown-toggle.btn-info { color: #fff; background-color: #31b0d5; border-color: #269abc; } .btn-info:active, .btn-info.active, .open > .dropdown-toggle.btn-info { background-image: none; } .btn-info.disabled, .btn-info[disabled], fieldset[disabled] .btn-info, .btn-info.disabled:hover, .btn-info[disabled]:hover, fieldset[disabled] .btn-info:hover, .btn-info.disabled:focus, .btn-info[disabled]:focus, fieldset[disabled] .btn-info:focus, .btn-info.disabled.focus, .btn-info[disabled].focus, fieldset[disabled] .btn-info.focus, .btn-info.disabled:active, .btn-info[disabled]:active, fieldset[disabled] .btn-info:active, .btn-info.disabled.active, .btn-info[disabled].active, fieldset[disabled] .btn-info.active { background-color: #5bc0de; border-color: #46b8da; } .btn-info .badge { color: #5bc0de; background-color: #fff; } .btn-warning { color: #fff; background-color: #f0ad4e; border-color: #eea236; } .btn-warning:hover, .btn-warning:focus, .btn-warning.focus, .btn-warning:active, .btn-warning.active, .open > .dropdown-toggle.btn-warning { color: #fff; background-color: #ec971f; border-color: #d58512; } .btn-warning:active, .btn-warning.active, .open > .dropdown-toggle.btn-warning { background-image: none; } .btn-warning.disabled, .btn-warning[disabled], fieldset[disabled] .btn-warning, .btn-warning.disabled:hover, .btn-warning[disabled]:hover, fieldset[disabled] .btn-warning:hover, .btn-warning.disabled:focus, .btn-warning[disabled]:focus, fieldset[disabled] .btn-warning:focus, .btn-warning.disabled.focus, .btn-warning[disabled].focus, fieldset[disabled] .btn-warning.focus, .btn-warning.disabled:active, .btn-warning[disabled]:active, fieldset[disabled] .btn-warning:active, .btn-warning.disabled.active, .btn-warning[disabled].active, fieldset[disabled] .btn-warning.active { background-color: #f0ad4e; border-color: #eea236; } .btn-warning .badge { color: #f0ad4e; background-color: #fff; } .btn-danger { color: #fff; background-color: #d9534f; border-color: #d43f3a; } .btn-danger:hover, .btn-danger:focus, .btn-danger.focus, .btn-danger:active, .btn-danger.active, .open > .dropdown-toggle.btn-danger { color: #fff; background-color: #c9302c; border-color: #ac2925; } .btn-danger:active, .btn-danger.active, .open > .dropdown-toggle.btn-danger { background-image: none; } .btn-danger.disabled, .btn-danger[disabled], fieldset[disabled] .btn-danger, .btn-danger.disabled:hover, .btn-danger[disabled]:hover, fieldset[disabled] .btn-danger:hover, .btn-danger.disabled:focus, .btn-danger[disabled]:focus, fieldset[disabled] .btn-danger:focus, .btn-danger.disabled.focus, .btn-danger[disabled].focus, fieldset[disabled] .btn-danger.focus, .btn-danger.disabled:active, .btn-danger[disabled]:active, fieldset[disabled] .btn-danger:active, .btn-danger.disabled.active, .btn-danger[disabled].active, fieldset[disabled] .btn-danger.active { background-color: #d9534f; border-color: #d43f3a; } .btn-danger .badge { color: #d9534f; background-color: #fff; } .btn-link { font-weight: normal; color: #337ab7; border-radius: 0; } .btn-link, .btn-link:active, .btn-link.active, .btn-link[disabled], fieldset[disabled] .btn-link { background-color: transparent; -webkit-box-shadow: none; box-shadow: none; } .btn-link, .btn-link:hover, .btn-link:focus, .btn-link:active { border-color: transparent; } .btn-link:hover, .btn-link:focus { color: #23527c; text-decoration: underline; background-color: transparent; } .btn-link[disabled]:hover, fieldset[disabled] .btn-link:hover, .btn-link[disabled]:focus, fieldset[disabled] .btn-link:focus { color: #777; text-decoration: none; } .btn-lg, .btn-group-lg > .btn { padding: 10px 16px; font-size: 18px; line-height: 1.3333333; border-radius: 6px; } .btn-sm, .btn-group-sm > .btn { padding: 5px 10px; font-size: 12px; line-height: 1.5; border-radius: 3px; } .btn-xs, .btn-group-xs > .btn { padding: 1px 5px; font-size: 12px; line-height: 1.5; border-radius: 3px; } .btn-block { display: block; width: 100%; } .btn-block + .btn-block { margin-top: 5px; } input[type="submit"].btn-block, input[type="reset"].btn-block, input[type="button"].btn-block { width: 100%; } .fade { opacity: 0; -webkit-transition: opacity .15s linear; -o-transition: opacity .15s linear; transition: opacity .15s linear; } .fade.in { opacity: 1; } .collapse { display: none; visibility: hidden; } .collapse.in { display: block; visibility: visible; } tr.collapse.in { display: table-row; } tbody.collapse.in { display: table-row-group; } .collapsing { position: relative; height: 0; overflow: hidden; -webkit-transition-timing-function: ease; -o-transition-timing-function: ease; transition-timing-function: ease; -webkit-transition-duration: .35s; -o-transition-duration: .35s; transition-duration: .35s; -webkit-transition-property: height, visibility; -o-transition-property: height, visibility; transition-property: height, visibility; } .caret { display: inline-block; width: 0; height: 0; margin-left: 2px; vertical-align: middle; border-top: 4px solid; border-right: 4px solid transparent; border-left: 4px solid transparent; } .dropup, .dropdown { position: relative; } .dropdown-toggle:focus { outline: 0; } .dropdown-menu { position: absolute; top: 100%; left: 0; z-index: 1000; display: none; float: left; min-width: 160px; padding: 5px 0; margin: 2px 0 0; font-size: 14px; text-align: left; list-style: none; background-color: #fff; -webkit-background-clip: padding-box; background-clip: padding-box; border: 1px solid #ccc; border: 1px solid rgba(0, 0, 0, .15); border-radius: 4px; -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, .175); box-shadow: 0 6px 12px rgba(0, 0, 0, .175); } .dropdown-menu.pull-right { right: 0; left: auto; } .dropdown-menu .divider { height: 1px; margin: 9px 0; overflow: hidden; background-color: #e5e5e5; } .dropdown-menu > li > a { display: block; padding: 3px 20px; clear: both; font-weight: normal; line-height: 1.42857143; color: #333; white-space: nowrap; } .dropdown-menu > li > a:hover, .dropdown-menu > li > a:focus { color: #262626; text-decoration: none; background-color: #f5f5f5; } .dropdown-menu > .active > a, .dropdown-menu > .active > a:hover, .dropdown-menu > .active > a:focus { color: #fff; text-decoration: none; background-color: #337ab7; outline: 0; } .dropdown-menu > .disabled > a, .dropdown-menu > .disabled > a:hover, .dropdown-menu > .disabled > a:focus { color: #777; } .dropdown-menu > .disabled > a:hover, .dropdown-menu > .disabled > a:focus { text-decoration: none; cursor: not-allowed; background-color: transparent; background-image: none; filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); } .open > .dropdown-menu { display: block; } .open > a { outline: 0; } .dropdown-menu-right { right: 0; left: auto; } .dropdown-menu-left { right: auto; left: 0; } .dropdown-header { display: block; padding: 3px 20px; font-size: 12px; line-height: 1.42857143; color: #777; white-space: nowrap; } .dropdown-backdrop { position: fixed; top: 0; right: 0; bottom: 0; left: 0; z-index: 990; } .pull-right > .dropdown-menu { right: 0; left: auto; } .dropup .caret, .navbar-fixed-bottom .dropdown .caret { content: ""; border-top: 0; border-bottom: 4px solid; } .dropup .dropdown-menu, .navbar-fixed-bottom .dropdown .dropdown-menu { top: auto; bottom: 100%; margin-bottom: 2px; } @media (min-width: 768px) { .navbar-right .dropdown-menu { right: 0; left: auto; } .navbar-right .dropdown-menu-left { right: auto; left: 0; } } .btn-group, .btn-group-vertical { position: relative; display: inline-block; vertical-align: middle; } .btn-group > .btn, .btn-group-vertical > .btn { position: relative; float: left; } .btn-group > .btn:hover, .btn-group-vertical > .btn:hover, .btn-group > .btn:focus, .btn-group-vertical > .btn:focus, .btn-group > .btn:active, .btn-group-vertical > .btn:active, .btn-group > .btn.active, .btn-group-vertical > .btn.active { z-index: 2; } .btn-group .btn + .btn, .btn-group .btn + .btn-group, .btn-group .btn-group + .btn, .btn-group .btn-group + .btn-group { margin-left: -1px; } .btn-toolbar { margin-left: -5px; } .btn-toolbar .btn-group, .btn-toolbar .input-group { float: left; } .btn-toolbar > .btn, .btn-toolbar > .btn-group, .btn-toolbar > .input-group { margin-left: 5px; } .btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) { border-radius: 0; } .btn-group > .btn:first-child { margin-left: 0; } .btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) { border-top-right-radius: 0; border-bottom-right-radius: 0; } .btn-group > .btn:last-child:not(:first-child), .btn-group > .dropdown-toggle:not(:first-child) { border-top-left-radius: 0; border-bottom-left-radius: 0; } .btn-group > .btn-group { float: left; } .btn-group > .btn-group:not(:first-child):not(:last-child) > .btn { border-radius: 0; } .btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child, .btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle { border-top-right-radius: 0; border-bottom-right-radius: 0; } .btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child { border-top-left-radius: 0; border-bottom-left-radius: 0; } .btn-group .dropdown-toggle:active, .btn-group.open .dropdown-toggle { outline: 0; } .btn-group > .btn + .dropdown-toggle { padding-right: 8px; padding-left: 8px; } .btn-group > .btn-lg + .dropdown-toggle { padding-right: 12px; padding-left: 12px; } .btn-group.open .dropdown-toggle { -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); } .btn-group.open .dropdown-toggle.btn-link { -webkit-box-shadow: none; box-shadow: none; } .btn .caret { margin-left: 0; } .btn-lg .caret { border-width: 5px 5px 0; border-bottom-width: 0; } .dropup .btn-lg .caret { border-width: 0 5px 5px; } .btn-group-vertical > .btn, .btn-group-vertical > .btn-group, .btn-group-vertical > .btn-group > .btn { display: block; float: none; width: 100%; max-width: 100%; } .btn-group-vertical > .btn-group > .btn { float: none; } .btn-group-vertical > .btn + .btn, .btn-group-vertical > .btn + .btn-group, .btn-group-vertical > .btn-group + .btn, .btn-group-vertical > .btn-group + .btn-group { margin-top: -1px; margin-left: 0; } .btn-group-vertical > .btn:not(:first-child):not(:last-child) { border-radius: 0; } .btn-group-vertical > .btn:first-child:not(:last-child) { border-top-right-radius: 4px; border-bottom-right-radius: 0; border-bottom-left-radius: 0; } .btn-group-vertical > .btn:last-child:not(:first-child) { border-top-left-radius: 0; border-top-right-radius: 0; border-bottom-left-radius: 4px; } .btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn { border-radius: 0; } .btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child, .btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle { border-bottom-right-radius: 0; border-bottom-left-radius: 0; } .btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child { border-top-left-radius: 0; border-top-right-radius: 0; } .btn-group-justified { display: table; width: 100%; table-layout: fixed; border-collapse: separate; } .btn-group-justified > .btn, .btn-group-justified > .btn-group { display: table-cell; float: none; width: 1%; } .btn-group-justified > .btn-group .btn { width: 100%; } .btn-group-justified > .btn-group .dropdown-menu { left: auto; } [data-toggle="buttons"] > .btn input[type="radio"], [data-toggle="buttons"] > .btn-group > .btn input[type="radio"], [data-toggle="buttons"] > .btn input[type="checkbox"], [data-toggle="buttons"] > .btn-group > .btn input[type="checkbox"] { position: absolute; clip: rect(0, 0, 0, 0); pointer-events: none; } .input-group { position: relative; display: table; border-collapse: separate; } .input-group[class*="col-"] { float: none; padding-right: 0; padding-left: 0; } .input-group .form-control { position: relative; z-index: 2; float: left; width: 100%; margin-bottom: 0; } .input-group-lg > .form-control, .input-group-lg > .input-group-addon, .input-group-lg > .input-group-btn > .btn { height: 46px; padding: 10px 16px; font-size: 18px; line-height: 1.3333333; border-radius: 6px; } select.input-group-lg > .form-control, select.input-group-lg > .input-group-addon, select.input-group-lg > .input-group-btn > .btn { height: 46px; line-height: 46px; } textarea.input-group-lg > .form-control, textarea.input-group-lg > .input-group-addon, textarea.input-group-lg > .input-group-btn > .btn, select[multiple].input-group-lg > .form-control, select[multiple].input-group-lg > .input-group-addon, select[multiple].input-group-lg > .input-group-btn > .btn { height: auto; } .input-group-sm > .form-control, .input-group-sm > .input-group-addon, .input-group-sm > .input-group-btn > .btn { height: 30px; padding: 5px 10px; font-size: 12px; line-height: 1.5; border-radius: 3px; } select.input-group-sm > .form-control, select.input-group-sm > .input-group-addon, select.input-group-sm > .input-group-btn > .btn { height: 30px; line-height: 30px; } textarea.input-group-sm > .form-control, textarea.input-group-sm > .input-group-addon, textarea.input-group-sm > .input-group-btn > .btn, select[multiple].input-group-sm > .form-control, select[multiple].input-group-sm > .input-group-addon, select[multiple].input-group-sm > .input-group-btn > .btn { height: auto; } .input-group-addon, .input-group-btn, .input-group .form-control { display: table-cell; } .input-group-addon:not(:first-child):not(:last-child), .input-group-btn:not(:first-child):not(:last-child), .input-group .form-control:not(:first-child):not(:last-child) { border-radius: 0; } .input-group-addon, .input-group-btn { width: 1%; white-space: nowrap; vertical-align: middle; } .input-group-addon { padding: 6px 12px; font-size: 14px; font-weight: normal; line-height: 1; color: #555; text-align: center; background-color: #eee; border: 1px solid #ccc; border-radius: 4px; } .input-group-addon.input-sm { padding: 5px 10px; font-size: 12px; border-radius: 3px; } .input-group-addon.input-lg { padding: 10px 16px; font-size: 18px; border-radius: 6px; } .input-group-addon input[type="radio"], .input-group-addon input[type="checkbox"] { margin-top: 0; } .input-group .form-control:first-child, .input-group-addon:first-child, .input-group-btn:first-child > .btn, .input-group-btn:first-child > .btn-group > .btn, .input-group-btn:first-child > .dropdown-toggle, .input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle), .input-group-btn:last-child > .btn-group:not(:last-child) > .btn { border-top-right-radius: 0; border-bottom-right-radius: 0; } .input-group-addon:first-child { border-right: 0; } .input-group .form-control:last-child, .input-group-addon:last-child, .input-group-btn:last-child > .btn, .input-group-btn:last-child > .btn-group > .btn, .input-group-btn:last-child > .dropdown-toggle, .input-group-btn:first-child > .btn:not(:first-child), .input-group-btn:first-child > .btn-group:not(:first-child) > .btn { border-top-left-radius: 0; border-bottom-left-radius: 0; } .input-group-addon:last-child { border-left: 0; } .input-group-btn { position: relative; font-size: 0; white-space: nowrap; } .input-group-btn > .btn { position: relative; } .input-group-btn > .btn + .btn { margin-left: -1px; } .input-group-btn > .btn:hover, .input-group-btn > .btn:focus, .input-group-btn > .btn:active { z-index: 2; } .input-group-btn:first-child > .btn, .input-group-btn:first-child > .btn-group { margin-right: -1px; } .input-group-btn:last-child > .btn, .input-group-btn:last-child > .btn-group { margin-left: -1px; } .nav { padding-left: 0; margin-bottom: 0; list-style: none; } .nav > li { position: relative; display: block; } .nav > li > a { position: relative; display: block; padding: 10px 15px; } .nav > li > a:hover, .nav > li > a:focus { text-decoration: none; background-color: #eee; } .nav > li.disabled > a { color: #777; } .nav > li.disabled > a:hover, .nav > li.disabled > a:focus { color: #777; text-decoration: none; cursor: not-allowed; background-color: transparent; } .nav .open > a, .nav .open > a:hover, .nav .open > a:focus { background-color: #eee; border-color: #337ab7; } .nav .nav-divider { height: 1px; margin: 9px 0; overflow: hidden; background-color: #e5e5e5; } .nav > li > a > img { max-width: none; } .nav-tabs { border-bottom: 1px solid #ddd; } .nav-tabs > li { float: left; margin-bottom: -1px; } .nav-tabs > li > a { margin-right: 2px; line-height: 1.42857143; border: 1px solid transparent; border-radius: 4px 4px 0 0; } .nav-tabs > li > a:hover { border-color: #eee #eee #ddd; } .nav-tabs > li.active > a, .nav-tabs > li.active > a:hover, .nav-tabs > li.active > a:focus { color: #555; cursor: default; background-color: #fff; border: 1px solid #ddd; border-bottom-color: transparent; } .nav-tabs.nav-justified { width: 100%; border-bottom: 0; } .nav-tabs.nav-justified > li { float: none; } .nav-tabs.nav-justified > li > a { margin-bottom: 5px; text-align: center; } .nav-tabs.nav-justified > .dropdown .dropdown-menu { top: auto; left: auto; } @media (min-width: 768px) { .nav-tabs.nav-justified > li { display: table-cell; width: 1%; } .nav-tabs.nav-justified > li > a { margin-bottom: 0; } } .nav-tabs.nav-justified > li > a { margin-right: 0; border-radius: 4px; } .nav-tabs.nav-justified > .active > a, .nav-tabs.nav-justified > .active > a:hover, .nav-tabs.nav-justified > .active > a:focus { border: 1px solid #ddd; } @media (min-width: 768px) { .nav-tabs.nav-justified > li > a { border-bottom: 1px solid #ddd; border-radius: 4px 4px 0 0; } .nav-tabs.nav-justified > .active > a, .nav-tabs.nav-justified > .active > a:hover, .nav-tabs.nav-justified > .active > a:focus { border-bottom-color: #fff; } } .nav-pills > li { float: left; } .nav-pills > li > a { border-radius: 4px; } .nav-pills > li + li { margin-left: 2px; } .nav-pills > li.active > a, .nav-pills > li.active > a:hover, .nav-pills > li.active > a:focus { color: #fff; background-color: #337ab7; } .nav-stacked > li { float: none; } .nav-stacked > li + li { margin-top: 2px; margin-left: 0; } .nav-justified { width: 100%; } .nav-justified > li { float: none; } .nav-justified > li > a { margin-bottom: 5px; text-align: center; } .nav-justified > .dropdown .dropdown-menu { top: auto; left: auto; } @media (min-width: 768px) { .nav-justified > li { display: table-cell; width: 1%; } .nav-justified > li > a { margin-bottom: 0; } } .nav-tabs-justified { border-bottom: 0; } .nav-tabs-justified > li > a { margin-right: 0; border-radius: 4px; } .nav-tabs-justified > .active > a, .nav-tabs-justified > .active > a:hover, .nav-tabs-justified > .active > a:focus { border: 1px solid #ddd; } @media (min-width: 768px) { .nav-tabs-justified > li > a { border-bottom: 1px solid #ddd; border-radius: 4px 4px 0 0; } .nav-tabs-justified > .active > a, .nav-tabs-justified > .active > a:hover, .nav-tabs-justified > .active > a:focus { border-bottom-color: #fff; } } .tab-content > .tab-pane { display: none; visibility: hidden; } .tab-content > .active { display: block; visibility: visible; } .nav-tabs .dropdown-menu { margin-top: -1px; border-top-left-radius: 0; border-top-right-radius: 0; } .navbar { position: relative; min-height: 50px; margin-bottom: 20px; border: 1px solid transparent; } @media (min-width: 768px) { .navbar { border-radius: 4px; } } @media (min-width: 768px) { .navbar-header { float: left; } } .navbar-collapse { padding-right: 15px; padding-left: 15px; overflow-x: visible; -webkit-overflow-scrolling: touch; border-top: 1px solid transparent; -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1); box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1); } .navbar-collapse.in { overflow-y: auto; } @media (min-width: 768px) { .navbar-collapse { width: auto; border-top: 0; -webkit-box-shadow: none; box-shadow: none; } .navbar-collapse.collapse { display: block !important; height: auto !important; padding-bottom: 0; overflow: visible !important; visibility: visible !important; } .navbar-collapse.in { overflow-y: visible; } .navbar-fixed-top .navbar-collapse, .navbar-static-top .navbar-collapse, .navbar-fixed-bottom .navbar-collapse { padding-right: 0; padding-left: 0; } } .navbar-fixed-top .navbar-collapse, .navbar-fixed-bottom .navbar-collapse { max-height: 340px; } @media (max-device-width: 480px) and (orientation: landscape) { .navbar-fixed-top .navbar-collapse, .navbar-fixed-bottom .navbar-collapse { max-height: 200px; } } .container > .navbar-header, .container-fluid > .navbar-header, .container > .navbar-collapse, .container-fluid > .navbar-collapse { margin-right: -15px; margin-left: -15px; } @media (min-width: 768px) { .container > .navbar-header, .container-fluid > .navbar-header, .container > .navbar-collapse, .container-fluid > .navbar-collapse { margin-right: 0; margin-left: 0; } } .navbar-static-top { z-index: 1000; border-width: 0 0 1px; } @media (min-width: 768px) { .navbar-static-top { border-radius: 0; } } .navbar-fixed-top, .navbar-fixed-bottom { position: fixed; right: 0; left: 0; z-index: 1030; } @media (min-width: 768px) { .navbar-fixed-top, .navbar-fixed-bottom { border-radius: 0; } } .navbar-fixed-top { top: 0; border-width: 0 0 1px; } .navbar-fixed-bottom { bottom: 0; margin-bottom: 0; border-width: 1px 0 0; } .navbar-brand { float: left; height: 50px; padding: 15px 15px; font-size: 18px; line-height: 20px; } .navbar-brand:hover, .navbar-brand:focus { text-decoration: none; } .navbar-brand > img { display: block; } @media (min-width: 768px) { .navbar > .container .navbar-brand, .navbar > .container-fluid .navbar-brand { margin-left: -15px; } } .navbar-toggle { position: relative; float: right; padding: 9px 10px; margin-top: 8px; margin-right: 15px; margin-bottom: 8px; background-color: transparent; background-image: none; border: 1px solid transparent; border-radius: 4px; } .navbar-toggle:focus { outline: 0; } .navbar-toggle .icon-bar { display: block; width: 22px; height: 2px; border-radius: 1px; } .navbar-toggle .icon-bar + .icon-bar { margin-top: 4px; } @media (min-width: 768px) { .navbar-toggle { display: none; } } .navbar-nav { margin: 7.5px -15px; } .navbar-nav > li > a { padding-top: 10px; padding-bottom: 10px; line-height: 20px; } @media (max-width: 767px) { .navbar-nav .open .dropdown-menu { position: static; float: none; width: auto; margin-top: 0; background-color: transparent; border: 0; -webkit-box-shadow: none; box-shadow: none; } .navbar-nav .open .dropdown-menu > li > a, .navbar-nav .open .dropdown-menu .dropdown-header { padding: 5px 15px 5px 25px; } .navbar-nav .open .dropdown-menu > li > a { line-height: 20px; } .navbar-nav .open .dropdown-menu > li > a:hover, .navbar-nav .open .dropdown-menu > li > a:focus { background-image: none; } } @media (min-width: 768px) { .navbar-nav { float: left; margin: 0; } .navbar-nav > li { float: left; } .navbar-nav > li > a { padding-top: 15px; padding-bottom: 15px; } } .navbar-form { padding: 10px 15px; margin-top: 8px; margin-right: -15px; margin-bottom: 8px; margin-left: -15px; border-top: 1px solid transparent; border-bottom: 1px solid transparent; -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1); box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1); } @media (min-width: 768px) { .navbar-form .form-group { display: inline-block; margin-bottom: 0; vertical-align: middle; } .navbar-form .form-control { display: inline-block; width: auto; vertical-align: middle; } .navbar-form .form-control-static { display: inline-block; } .navbar-form .input-group { display: inline-table; vertical-align: middle; } .navbar-form .input-group .input-group-addon, .navbar-form .input-group .input-group-btn, .navbar-form .input-group .form-control { width: auto; } .navbar-form .input-group > .form-control { width: 100%; } .navbar-form .control-label { margin-bottom: 0; vertical-align: middle; } .navbar-form .radio, .navbar-form .checkbox { display: inline-block; margin-top: 0; margin-bottom: 0; vertical-align: middle; } .navbar-form .radio label, .navbar-form .checkbox label { padding-left: 0; } .navbar-form .radio input[type="radio"], .navbar-form .checkbox input[type="checkbox"] { position: relative; margin-left: 0; } .navbar-form .has-feedback .form-control-feedback { top: 0; } } @media (max-width: 767px) { .navbar-form .form-group { margin-bottom: 5px; } .navbar-form .form-group:last-child { margin-bottom: 0; } } @media (min-width: 768px) { .navbar-form { width: auto; padding-top: 0; padding-bottom: 0; margin-right: 0; margin-left: 0; border: 0; -webkit-box-shadow: none; box-shadow: none; } } .navbar-nav > li > .dropdown-menu { margin-top: 0; border-top-left-radius: 0; border-top-right-radius: 0; } .navbar-fixed-bottom .navbar-nav > li > .dropdown-menu { margin-bottom: 0; border-top-left-radius: 4px; border-top-right-radius: 4px; border-bottom-right-radius: 0; border-bottom-left-radius: 0; } .navbar-btn { margin-top: 8px; margin-bottom: 8px; } .navbar-btn.btn-sm { margin-top: 10px; margin-bottom: 10px; } .navbar-btn.btn-xs { margin-top: 14px; margin-bottom: 14px; } .navbar-text { margin-top: 15px; margin-bottom: 15px; } @media (min-width: 768px) { .navbar-text { float: left; margin-right: 15px; margin-left: 15px; } } @media (min-width: 768px) { .navbar-left { float: left !important; } .navbar-right { float: right !important; margin-right: -15px; } .navbar-right ~ .navbar-right { margin-right: 0; } } .navbar-default { background-color: #f8f8f8; border-color: #e7e7e7; } .navbar-default .navbar-brand { color: #777; } .navbar-default .navbar-brand:hover, .navbar-default .navbar-brand:focus { color: #5e5e5e; background-color: transparent; } .navbar-default .navbar-text { color: #777; } .navbar-default .navbar-nav > li > a { color: #777; } .navbar-default .navbar-nav > li > a:hover, .navbar-default .navbar-nav > li > a:focus { color: #333; background-color: transparent; } .navbar-default .navbar-nav > .active > a, .navbar-default .navbar-nav > .active > a:hover, .navbar-default .navbar-nav > .active > a:focus { color: #555; background-color: #e7e7e7; } .navbar-default .navbar-nav > .disabled > a, .navbar-default .navbar-nav > .disabled > a:hover, .navbar-default .navbar-nav > .disabled > a:focus { color: #ccc; background-color: transparent; } .navbar-default .navbar-toggle { border-color: #ddd; } .navbar-default .navbar-toggle:hover, .navbar-default .navbar-toggle:focus { background-color: #ddd; } .navbar-default .navbar-toggle .icon-bar { background-color: #888; } .navbar-default .navbar-collapse, .navbar-default .navbar-form { border-color: #e7e7e7; } .navbar-default .navbar-nav > .open > a, .navbar-default .navbar-nav > .open > a:hover, .navbar-default .navbar-nav > .open > a:focus { color: #555; background-color: #e7e7e7; } @media (max-width: 767px) { .navbar-default .navbar-nav .open .dropdown-menu > li > a { color: #777; } .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover, .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus { color: #333; background-color: transparent; } .navbar-default .navbar-nav .open .dropdown-menu > .active > a, .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover, .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus { color: #555; background-color: #e7e7e7; } .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a, .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover, .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus { color: #ccc; background-color: transparent; } } .navbar-default .navbar-link { color: #777; } .navbar-default .navbar-link:hover { color: #333; } .navbar-default .btn-link { color: #777; } .navbar-default .btn-link:hover, .navbar-default .btn-link:focus { color: #333; } .navbar-default .btn-link[disabled]:hover, fieldset[disabled] .navbar-default .btn-link:hover, .navbar-default .btn-link[disabled]:focus, fieldset[disabled] .navbar-default .btn-link:focus { color: #ccc; } .navbar-inverse { background-color: #222; border-color: #080808; } .navbar-inverse .navbar-brand { color: #9d9d9d; } .navbar-inverse .navbar-brand:hover, .navbar-inverse .navbar-brand:focus { color: #fff; background-color: transparent; } .navbar-inverse .navbar-text { color: #9d9d9d; } .navbar-inverse .navbar-nav > li > a { color: #9d9d9d; } .navbar-inverse .navbar-nav > li > a:hover, .navbar-inverse .navbar-nav > li > a:focus { color: #fff; background-color: transparent; } .navbar-inverse .navbar-nav > .active > a, .navbar-inverse .navbar-nav > .active > a:hover, .navbar-inverse .navbar-nav > .active > a:focus { color: #fff; background-color: #080808; } .navbar-inverse .navbar-nav > .disabled > a, .navbar-inverse .navbar-nav > .disabled > a:hover, .navbar-inverse .navbar-nav > .disabled > a:focus { color: #444; background-color: transparent; } .navbar-inverse .navbar-toggle { border-color: #333; } .navbar-inverse .navbar-toggle:hover, .navbar-inverse .navbar-toggle:focus { background-color: #333; } .navbar-inverse .navbar-toggle .icon-bar { background-color: #fff; } .navbar-inverse .navbar-collapse, .navbar-inverse .navbar-form { border-color: #101010; } .navbar-inverse .navbar-nav > .open > a, .navbar-inverse .navbar-nav > .open > a:hover, .navbar-inverse .navbar-nav > .open > a:focus { color: #fff; background-color: #080808; } @media (max-width: 767px) { .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header { border-color: #080808; } .navbar-inverse .navbar-nav .open .dropdown-menu .divider { background-color: #080808; } .navbar-inverse .navbar-nav .open .dropdown-menu > li > a { color: #9d9d9d; } .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover, .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus { color: #fff; background-color: transparent; } .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a, .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover, .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus { color: #fff; background-color: #080808; } .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a, .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover, .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus { color: #444; background-color: transparent; } } .navbar-inverse .navbar-link { color: #9d9d9d; } .navbar-inverse .navbar-link:hover { color: #fff; } .navbar-inverse .btn-link { color: #9d9d9d; } .navbar-inverse .btn-link:hover, .navbar-inverse .btn-link:focus { color: #fff; } .navbar-inverse .btn-link[disabled]:hover, fieldset[disabled] .navbar-inverse .btn-link:hover, .navbar-inverse .btn-link[disabled]:focus, fieldset[disabled] .navbar-inverse .btn-link:focus { color: #444; } .breadcrumb { padding: 8px 15px; margin-bottom: 20px; list-style: none; background-color: #f5f5f5; border-radius: 4px; } .breadcrumb > li { display: inline-block; } .breadcrumb > li + li:before { padding: 0 5px; color: #ccc; content: "/\00a0"; } .breadcrumb > .active { color: #777; } .pagination { display: inline-block; padding-left: 0; margin: 20px 0; border-radius: 4px; } .pagination > li { display: inline; } .pagination > li > a, .pagination > li > span { position: relative; float: left; padding: 6px 12px; margin-left: -1px; line-height: 1.42857143; color: #337ab7; text-decoration: none; background-color: #fff; border: 1px solid #ddd; } .pagination > li:first-child > a, .pagination > li:first-child > span { margin-left: 0; border-top-left-radius: 4px; border-bottom-left-radius: 4px; } .pagination > li:last-child > a, .pagination > li:last-child > span { border-top-right-radius: 4px; border-bottom-right-radius: 4px; } .pagination > li > a:hover, .pagination > li > span:hover, .pagination > li > a:focus, .pagination > li > span:focus { color: #23527c; background-color: #eee; border-color: #ddd; } .pagination > .active > a, .pagination > .active > span, .pagination > .active > a:hover, .pagination > .active > span:hover, .pagination > .active > a:focus, .pagination > .active > span:focus { z-index: 2; color: #fff; cursor: default; background-color: #337ab7; border-color: #337ab7; } .pagination > .disabled > span, .pagination > .disabled > span:hover, .pagination > .disabled > span:focus, .pagination > .disabled > a, .pagination > .disabled > a:hover, .pagination > .disabled > a:focus { color: #777; cursor: not-allowed; background-color: #fff; border-color: #ddd; } .pagination-lg > li > a, .pagination-lg > li > span { padding: 10px 16px; font-size: 18px; } .pagination-lg > li:first-child > a, .pagination-lg > li:first-child > span { border-top-left-radius: 6px; border-bottom-left-radius: 6px; } .pagination-lg > li:last-child > a, .pagination-lg > li:last-child > span { border-top-right-radius: 6px; border-bottom-right-radius: 6px; } .pagination-sm > li > a, .pagination-sm > li > span { padding: 5px 10px; font-size: 12px; } .pagination-sm > li:first-child > a, .pagination-sm > li:first-child > span { border-top-left-radius: 3px; border-bottom-left-radius: 3px; } .pagination-sm > li:last-child > a, .pagination-sm > li:last-child > span { border-top-right-radius: 3px; border-bottom-right-radius: 3px; } .pager { padding-left: 0; margin: 20px 0; text-align: center; list-style: none; } .pager li { display: inline; } .pager li > a, .pager li > span { display: inline-block; padding: 5px 14px; background-color: #fff; border: 1px solid #ddd; border-radius: 15px; } .pager li > a:hover, .pager li > a:focus { text-decoration: none; background-color: #eee; } .pager .next > a, .pager .next > span { float: right; } .pager .previous > a, .pager .previous > span { float: left; } .pager .disabled > a, .pager .disabled > a:hover, .pager .disabled > a:focus, .pager .disabled > span { color: #777; cursor: not-allowed; background-color: #fff; } .label { display: inline; padding: .2em .6em .3em; font-size: 75%; font-weight: bold; line-height: 1; color: #fff; text-align: center; white-space: nowrap; vertical-align: baseline; border-radius: .25em; } a.label:hover, a.label:focus { color: #fff; text-decoration: none; cursor: pointer; } .label:empty { display: none; } .btn .label { position: relative; top: -1px; } .label-default { background-color: #777; } .label-default[href]:hover, .label-default[href]:focus { background-color: #5e5e5e; } .label-primary { background-color: #337ab7; } .label-primary[href]:hover, .label-primary[href]:focus { background-color: #286090; } .label-success { background-color: #5cb85c; } .label-success[href]:hover, .label-success[href]:focus { background-color: #449d44; } .label-info { background-color: #5bc0de; } .label-info[href]:hover, .label-info[href]:focus { background-color: #31b0d5; } .label-warning { background-color: #f0ad4e; } .label-warning[href]:hover, .label-warning[href]:focus { background-color: #ec971f; } .label-danger { background-color: #d9534f; } .label-danger[href]:hover, .label-danger[href]:focus { background-color: #c9302c; } .badge { display: inline-block; min-width: 10px; padding: 3px 7px; font-size: 12px; font-weight: bold; line-height: 1; color: #fff; text-align: center; white-space: nowrap; vertical-align: baseline; background-color: #777; border-radius: 10px; } .badge:empty { display: none; } .btn .badge { position: relative; top: -1px; } .btn-xs .badge { top: 0; padding: 1px 5px; } a.badge:hover, a.badge:focus { color: #fff; text-decoration: none; cursor: pointer; } .list-group-item.active > .badge, .nav-pills > .active > a > .badge { color: #337ab7; background-color: #fff; } .list-group-item > .badge { float: right; } .list-group-item > .badge + .badge { margin-right: 5px; } .nav-pills > li > a > .badge { margin-left: 3px; } .jumbotron { padding: 30px 15px; margin-bottom: 30px; color: inherit; background-color: #eee; } .jumbotron h1, .jumbotron .h1 { color: inherit; } .jumbotron p { margin-bottom: 15px; font-size: 21px; font-weight: 200; } .jumbotron > hr { border-top-color: #d5d5d5; } .container .jumbotron, .container-fluid .jumbotron { border-radius: 6px; } .jumbotron .container { max-width: 100%; } @media screen and (min-width: 768px) { .jumbotron { padding: 48px 0; } .container .jumbotron, .container-fluid .jumbotron { padding-right: 60px; padding-left: 60px; } .jumbotron h1, .jumbotron .h1 { font-size: 63px; } } .thumbnail { display: block; padding: 4px; margin-bottom: 20px; line-height: 1.42857143; background-color: #fff; border: 1px solid #ddd; border-radius: 4px; -webkit-transition: border .2s ease-in-out; -o-transition: border .2s ease-in-out; transition: border .2s ease-in-out; } .thumbnail > img, .thumbnail a > img { margin-right: auto; margin-left: auto; } a.thumbnail:hover, a.thumbnail:focus, a.thumbnail.active { border-color: #337ab7; } .thumbnail .caption { padding: 9px; color: #333; } .alert { padding: 15px; margin-bottom: 20px; border: 1px solid transparent; border-radius: 4px; } .alert h4 { margin-top: 0; color: inherit; } .alert .alert-link { font-weight: bold; } .alert > p, .alert > ul { margin-bottom: 0; } .alert > p + p { margin-top: 5px; } .alert-dismissable, .alert-dismissible { padding-right: 35px; } .alert-dismissable .close, .alert-dismissible .close { position: relative; top: -2px; right: -21px; color: inherit; } .alert-success { color: #3c763d; background-color: #dff0d8; border-color: #d6e9c6; } .alert-success hr { border-top-color: #c9e2b3; } .alert-success .alert-link { color: #2b542c; } .alert-info { color: #31708f; background-color: #d9edf7; border-color: #bce8f1; } .alert-info hr { border-top-color: #a6e1ec; } .alert-info .alert-link { color: #245269; } .alert-warning { color: #8a6d3b; background-color: #fcf8e3; border-color: #faebcc; } .alert-warning hr { border-top-color: #f7e1b5; } .alert-warning .alert-link { color: #66512c; } .alert-danger { color: #a94442; background-color: #f2dede; border-color: #ebccd1; } .alert-danger hr { border-top-color: #e4b9c0; } .alert-danger .alert-link { color: #843534; } @-webkit-keyframes progress-bar-stripes { from { background-position: 40px 0; } to { background-position: 0 0; } } @-o-keyframes progress-bar-stripes { from { background-position: 40px 0; } to { background-position: 0 0; } } @keyframes progress-bar-stripes { from { background-position: 40px 0; } to { background-position: 0 0; } } .progress { height: 20px; margin-bottom: 20px; overflow: hidden; background-color: #f5f5f5; border-radius: 4px; -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1); box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1); } .progress-bar { float: left; width: 0; height: 100%; font-size: 12px; line-height: 20px; color: #fff; text-align: center; background-color: #337ab7; -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15); box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15); -webkit-transition: width .6s ease; -o-transition: width .6s ease; transition: width .6s ease; } .progress-striped .progress-bar, .progress-bar-striped { background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); -webkit-background-size: 40px 40px; background-size: 40px 40px; } .progress.active .progress-bar, .progress-bar.active { -webkit-animation: progress-bar-stripes 2s linear infinite; -o-animation: progress-bar-stripes 2s linear infinite; animation: progress-bar-stripes 2s linear infinite; } .progress-bar-success { background-color: #5cb85c; } .progress-striped .progress-bar-success { background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); } .progress-bar-info { background-color: #5bc0de; } .progress-striped .progress-bar-info { background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); } .progress-bar-warning { background-color: #f0ad4e; } .progress-striped .progress-bar-warning { background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); } .progress-bar-danger { background-color: #d9534f; } .progress-striped .progress-bar-danger { background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); } .media { margin-top: 15px; } .media:first-child { margin-top: 0; } .media, .media-body { overflow: hidden; zoom: 1; } .media-body { width: 10000px; } .media-object { display: block; } .media-right, .media > .pull-right { padding-left: 10px; } .media-left, .media > .pull-left { padding-right: 10px; } .media-left, .media-right, .media-body { display: table-cell; vertical-align: top; } .media-middle { vertical-align: middle; } .media-bottom { vertical-align: bottom; } .media-heading { margin-top: 0; margin-bottom: 5px; } .media-list { padding-left: 0; list-style: none; } .list-group { padding-left: 0; margin-bottom: 20px; } .list-group-item { position: relative; display: block; padding: 10px 15px; margin-bottom: -1px; background-color: #fff; border: 1px solid #ddd; } .list-group-item:first-child { border-top-left-radius: 4px; border-top-right-radius: 4px; } .list-group-item:last-child { margin-bottom: 0; border-bottom-right-radius: 4px; border-bottom-left-radius: 4px; } a.list-group-item { color: #555; } a.list-group-item .list-group-item-heading { color: #333; } a.list-group-item:hover, a.list-group-item:focus { color: #555; text-decoration: none; background-color: #f5f5f5; } .list-group-item.disabled, .list-group-item.disabled:hover, .list-group-item.disabled:focus { color: #777; cursor: not-allowed; background-color: #eee; } .list-group-item.disabled .list-group-item-heading, .list-group-item.disabled:hover .list-group-item-heading, .list-group-item.disabled:focus .list-group-item-heading { color: inherit; } .list-group-item.disabled .list-group-item-text, .list-group-item.disabled:hover .list-group-item-text, .list-group-item.disabled:focus .list-group-item-text { color: #777; } .list-group-item.active, .list-group-item.active:hover, .list-group-item.active:focus { z-index: 2; color: #fff; background-color: #337ab7; border-color: #337ab7; } .list-group-item.active .list-group-item-heading, .list-group-item.active:hover .list-group-item-heading, .list-group-item.active:focus .list-group-item-heading, .list-group-item.active .list-group-item-heading > small, .list-group-item.active:hover .list-group-item-heading > small, .list-group-item.active:focus .list-group-item-heading > small, .list-group-item.active .list-group-item-heading > .small, .list-group-item.active:hover .list-group-item-heading > .small, .list-group-item.active:focus .list-group-item-heading > .small { color: inherit; } .list-group-item.active .list-group-item-text, .list-group-item.active:hover .list-group-item-text, .list-group-item.active:focus .list-group-item-text { color: #c7ddef; } .list-group-item-success { color: #3c763d; background-color: #dff0d8; } a.list-group-item-success { color: #3c763d; } a.list-group-item-success .list-group-item-heading { color: inherit; } a.list-group-item-success:hover, a.list-group-item-success:focus { color: #3c763d; background-color: #d0e9c6; } a.list-group-item-success.active, a.list-group-item-success.active:hover, a.list-group-item-success.active:focus { color: #fff; background-color: #3c763d; border-color: #3c763d; } .list-group-item-info { color: #31708f; background-color: #d9edf7; } a.list-group-item-info { color: #31708f; } a.list-group-item-info .list-group-item-heading { color: inherit; } a.list-group-item-info:hover, a.list-group-item-info:focus { color: #31708f; background-color: #c4e3f3; } a.list-group-item-info.active, a.list-group-item-info.active:hover, a.list-group-item-info.active:focus { color: #fff; background-color: #31708f; border-color: #31708f; } .list-group-item-warning { color: #8a6d3b; background-color: #fcf8e3; } a.list-group-item-warning { color: #8a6d3b; } a.list-group-item-warning .list-group-item-heading { color: inherit; } a.list-group-item-warning:hover, a.list-group-item-warning:focus { color: #8a6d3b; background-color: #faf2cc; } a.list-group-item-warning.active, a.list-group-item-warning.active:hover, a.list-group-item-warning.active:focus { color: #fff; background-color: #8a6d3b; border-color: #8a6d3b; } .list-group-item-danger { color: #a94442; background-color: #f2dede; } a.list-group-item-danger { color: #a94442; } a.list-group-item-danger .list-group-item-heading { color: inherit; } a.list-group-item-danger:hover, a.list-group-item-danger:focus { color: #a94442; background-color: #ebcccc; } a.list-group-item-danger.active, a.list-group-item-danger.active:hover, a.list-group-item-danger.active:focus { color: #fff; background-color: #a94442; border-color: #a94442; } .list-group-item-heading { margin-top: 0; margin-bottom: 5px; } .list-group-item-text { margin-bottom: 0; line-height: 1.3; } .panel { margin-bottom: 20px; background-color: #fff; border: 1px solid transparent; border-radius: 4px; -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, .05); box-shadow: 0 1px 1px rgba(0, 0, 0, .05); } .panel-body { padding: 15px; } .panel-heading { padding: 10px 15px; border-bottom: 1px solid transparent; border-top-left-radius: 3px; border-top-right-radius: 3px; } .panel-heading > .dropdown .dropdown-toggle { color: inherit; } .panel-title { margin-top: 0; margin-bottom: 0; font-size: 16px; color: inherit; } .panel-title > a, .panel-title > small, .panel-title > .small, .panel-title > small > a, .panel-title > .small > a { color: inherit; } .panel-footer { padding: 10px 15px; background-color: #f5f5f5; border-top: 1px solid #ddd; border-bottom-right-radius: 3px; border-bottom-left-radius: 3px; } .panel > .list-group, .panel > .panel-collapse > .list-group { margin-bottom: 0; } .panel > .list-group .list-group-item, .panel > .panel-collapse > .list-group .list-group-item { border-width: 1px 0; border-radius: 0; } .panel > .list-group:first-child .list-group-item:first-child, .panel > .panel-collapse > .list-group:first-child .list-group-item:first-child { border-top: 0; border-top-left-radius: 3px; border-top-right-radius: 3px; } .panel > .list-group:last-child .list-group-item:last-child, .panel > .panel-collapse > .list-group:last-child .list-group-item:last-child { border-bottom: 0; border-bottom-right-radius: 3px; border-bottom-left-radius: 3px; } .panel-heading + .list-group .list-group-item:first-child { border-top-width: 0; } .list-group + .panel-footer { border-top-width: 0; } .panel > .table, .panel > .table-responsive > .table, .panel > .panel-collapse > .table { margin-bottom: 0; } .panel > .table caption, .panel > .table-responsive > .table caption, .panel > .panel-collapse > .table caption { padding-right: 15px; padding-left: 15px; } .panel > .table:first-child, .panel > .table-responsive:first-child > .table:first-child { border-top-left-radius: 3px; border-top-right-radius: 3px; } .panel > .table:first-child > thead:first-child > tr:first-child, .panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child, .panel > .table:first-child > tbody:first-child > tr:first-child, .panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child { border-top-left-radius: 3px; border-top-right-radius: 3px; } .panel > .table:first-child > thead:first-child > tr:first-child td:first-child, .panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child, .panel > .table:first-child > tbody:first-child > tr:first-child td:first-child, .panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child, .panel > .table:first-child > thead:first-child > tr:first-child th:first-child, .panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child, .panel > .table:first-child > tbody:first-child > tr:first-child th:first-child, .panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child { border-top-left-radius: 3px; } .panel > .table:first-child > thead:first-child > tr:first-child td:last-child, .panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child, .panel > .table:first-child > tbody:first-child > tr:first-child td:last-child, .panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child, .panel > .table:first-child > thead:first-child > tr:first-child th:last-child, .panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child, .panel > .table:first-child > tbody:first-child > tr:first-child th:last-child, .panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child { border-top-right-radius: 3px; } .panel > .table:last-child, .panel > .table-responsive:last-child > .table:last-child { border-bottom-right-radius: 3px; border-bottom-left-radius: 3px; } .panel > .table:last-child > tbody:last-child > tr:last-child, .panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child, .panel > .table:last-child > tfoot:last-child > tr:last-child, .panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child { border-bottom-right-radius: 3px; border-bottom-left-radius: 3px; } .panel > .table:last-child > tbody:last-child > tr:last-child td:first-child, .panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child, .panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child, .panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child, .panel > .table:last-child > tbody:last-child > tr:last-child th:first-child, .panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child, .panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child, .panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child { border-bottom-left-radius: 3px; } .panel > .table:last-child > tbody:last-child > tr:last-child td:last-child, .panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child, .panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child, .panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child, .panel > .table:last-child > tbody:last-child > tr:last-child th:last-child, .panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child, .panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child, .panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child { border-bottom-right-radius: 3px; } .panel > .panel-body + .table, .panel > .panel-body + .table-responsive, .panel > .table + .panel-body, .panel > .table-responsive + .panel-body { border-top: 1px solid #ddd; } .panel > .table > tbody:first-child > tr:first-child th, .panel > .table > tbody:first-child > tr:first-child td { border-top: 0; } .panel > .table-bordered, .panel > .table-responsive > .table-bordered { border: 0; } .panel > .table-bordered > thead > tr > th:first-child, .panel > .table-responsive > .table-bordered > thead > tr > th:first-child, .panel > .table-bordered > tbody > tr > th:first-child, .panel > .table-responsive > .table-bordered > tbody > tr > th:first-child, .panel > .table-bordered > tfoot > tr > th:first-child, .panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child, .panel > .table-bordered > thead > tr > td:first-child, .panel > .table-responsive > .table-bordered > thead > tr > td:first-child, .panel > .table-bordered > tbody > tr > td:first-child, .panel > .table-responsive > .table-bordered > tbody > tr > td:first-child, .panel > .table-bordered > tfoot > tr > td:first-child, .panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child { border-left: 0; } .panel > .table-bordered > thead > tr > th:last-child, .panel > .table-responsive > .table-bordered > thead > tr > th:last-child, .panel > .table-bordered > tbody > tr > th:last-child, .panel > .table-responsive > .table-bordered > tbody > tr > th:last-child, .panel > .table-bordered > tfoot > tr > th:last-child, .panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child, .panel > .table-bordered > thead > tr > td:last-child, .panel > .table-responsive > .table-bordered > thead > tr > td:last-child, .panel > .table-bordered > tbody > tr > td:last-child, .panel > .table-responsive > .table-bordered > tbody > tr > td:last-child, .panel > .table-bordered > tfoot > tr > td:last-child, .panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child { border-right: 0; } .panel > .table-bordered > thead > tr:first-child > td, .panel > .table-responsive > .table-bordered > thead > tr:first-child > td, .panel > .table-bordered > tbody > tr:first-child > td, .panel > .table-responsive > .table-bordered > tbody > tr:first-child > td, .panel > .table-bordered > thead > tr:first-child > th, .panel > .table-responsive > .table-bordered > thead > tr:first-child > th, .panel > .table-bordered > tbody > tr:first-child > th, .panel > .table-responsive > .table-bordered > tbody > tr:first-child > th { border-bottom: 0; } .panel > .table-bordered > tbody > tr:last-child > td, .panel > .table-responsive > .table-bordered > tbody > tr:last-child > td, .panel > .table-bordered > tfoot > tr:last-child > td, .panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td, .panel > .table-bordered > tbody > tr:last-child > th, .panel > .table-responsive > .table-bordered > tbody > tr:last-child > th, .panel > .table-bordered > tfoot > tr:last-child > th, .panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th { border-bottom: 0; } .panel > .table-responsive { margin-bottom: 0; border: 0; } .panel-group { margin-bottom: 20px; } .panel-group .panel { margin-bottom: 0; border-radius: 4px; } .panel-group .panel + .panel { margin-top: 5px; } .panel-group .panel-heading { border-bottom: 0; } .panel-group .panel-heading + .panel-collapse > .panel-body, .panel-group .panel-heading + .panel-collapse > .list-group { border-top: 1px solid #ddd; } .panel-group .panel-footer { border-top: 0; } .panel-group .panel-footer + .panel-collapse .panel-body { border-bottom: 1px solid #ddd; } .panel-default { border-color: #ddd; } .panel-default > .panel-heading { color: #333; background-color: #f5f5f5; border-color: #ddd; } .panel-default > .panel-heading + .panel-collapse > .panel-body { border-top-color: #ddd; } .panel-default > .panel-heading .badge { color: #f5f5f5; background-color: #333; } .panel-default > .panel-footer + .panel-collapse > .panel-body { border-bottom-color: #ddd; } .panel-primary { border-color: #337ab7; } .panel-primary > .panel-heading { color: #fff; background-color: #337ab7; border-color: #337ab7; } .panel-primary > .panel-heading + .panel-collapse > .panel-body { border-top-color: #337ab7; } .panel-primary > .panel-heading .badge { color: #337ab7; background-color: #fff; } .panel-primary > .panel-footer + .panel-collapse > .panel-body { border-bottom-color: #337ab7; } .panel-success { border-color: #d6e9c6; } .panel-success > .panel-heading { color: #3c763d; background-color: #dff0d8; border-color: #d6e9c6; } .panel-success > .panel-heading + .panel-collapse > .panel-body { border-top-color: #d6e9c6; } .panel-success > .panel-heading .badge { color: #dff0d8; background-color: #3c763d; } .panel-success > .panel-footer + .panel-collapse > .panel-body { border-bottom-color: #d6e9c6; } .panel-info { border-color: #bce8f1; } .panel-info > .panel-heading { color: #31708f; background-color: #d9edf7; border-color: #bce8f1; } .panel-info > .panel-heading + .panel-collapse > .panel-body { border-top-color: #bce8f1; } .panel-info > .panel-heading .badge { color: #d9edf7; background-color: #31708f; } .panel-info > .panel-footer + .panel-collapse > .panel-body { border-bottom-color: #bce8f1; } .panel-warning { border-color: #faebcc; } .panel-warning > .panel-heading { color: #8a6d3b; background-color: #fcf8e3; border-color: #faebcc; } .panel-warning > .panel-heading + .panel-collapse > .panel-body { border-top-color: #faebcc; } .panel-warning > .panel-heading .badge { color: #fcf8e3; background-color: #8a6d3b; } .panel-warning > .panel-footer + .panel-collapse > .panel-body { border-bottom-color: #faebcc; } .panel-danger { border-color: #ebccd1; } .panel-danger > .panel-heading { color: #a94442; background-color: #f2dede; border-color: #ebccd1; } .panel-danger > .panel-heading + .panel-collapse > .panel-body { border-top-color: #ebccd1; } .panel-danger > .panel-heading .badge { color: #f2dede; background-color: #a94442; } .panel-danger > .panel-footer + .panel-collapse > .panel-body { border-bottom-color: #ebccd1; } .embed-responsive { position: relative; display: block; height: 0; padding: 0; overflow: hidden; } .embed-responsive .embed-responsive-item, .embed-responsive iframe, .embed-responsive embed, .embed-responsive object, .embed-responsive video { position: absolute; top: 0; bottom: 0; left: 0; width: 100%; height: 100%; border: 0; } .embed-responsive.embed-responsive-16by9 { padding-bottom: 56.25%; } .embed-responsive.embed-responsive-4by3 { padding-bottom: 75%; } .well { min-height: 20px; padding: 19px; margin-bottom: 20px; background-color: #f5f5f5; border: 1px solid #e3e3e3; border-radius: 4px; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05); box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05); } .well blockquote { border-color: #ddd; border-color: rgba(0, 0, 0, .15); } .well-lg { padding: 24px; border-radius: 6px; } .well-sm { padding: 9px; border-radius: 3px; } .close { float: right; font-size: 21px; font-weight: bold; line-height: 1; color: #000; text-shadow: 0 1px 0 #fff; filter: alpha(opacity=20); opacity: .2; } .close:hover, .close:focus { color: #000; text-decoration: none; cursor: pointer; filter: alpha(opacity=50); opacity: .5; } button.close { -webkit-appearance: none; padding: 0; cursor: pointer; background: transparent; border: 0; } .modal-open { overflow: hidden; } .modal { position: fixed; top: 0; right: 0; bottom: 0; left: 0; z-index: 1040; display: none; overflow: hidden; -webkit-overflow-scrolling: touch; outline: 0; } .modal.fade .modal-dialog { -webkit-transition: -webkit-transform .3s ease-out; -o-transition: -o-transform .3s ease-out; transition: transform .3s ease-out; -webkit-transform: translate(0, -25%); -ms-transform: translate(0, -25%); -o-transform: translate(0, -25%); transform: translate(0, -25%); } .modal.in .modal-dialog { -webkit-transform: translate(0, 0); -ms-transform: translate(0, 0); -o-transform: translate(0, 0); transform: translate(0, 0); } .modal-open .modal { overflow-x: hidden; overflow-y: auto; } .modal-dialog { position: relative; width: auto; margin: 10px; } .modal-content { position: relative; background-color: #fff; -webkit-background-clip: padding-box; background-clip: padding-box; border: 1px solid #999; border: 1px solid rgba(0, 0, 0, .2); border-radius: 6px; outline: 0; -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, .5); box-shadow: 0 3px 9px rgba(0, 0, 0, .5); } .modal-backdrop { position: absolute; top: 0; right: 0; left: 0; background-color: #000; } .modal-backdrop.fade { filter: alpha(opacity=0); opacity: 0; } .modal-backdrop.in { filter: alpha(opacity=50); opacity: .5; } .modal-header { min-height: 16.42857143px; padding: 15px; border-bottom: 1px solid #e5e5e5; } .modal-header .close { margin-top: -2px; } .modal-title { margin: 0; line-height: 1.42857143; } .modal-body { position: relative; padding: 15px; } .modal-footer { padding: 15px; text-align: right; border-top: 1px solid #e5e5e5; } .modal-footer .btn + .btn { margin-bottom: 0; margin-left: 5px; } .modal-footer .btn-group .btn + .btn { margin-left: -1px; } .modal-footer .btn-block + .btn-block { margin-left: 0; } .modal-scrollbar-measure { position: absolute; top: -9999px; width: 50px; height: 50px; overflow: scroll; } @media (min-width: 768px) { .modal-dialog { width: 600px; margin: 30px auto; } .modal-content { -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, .5); box-shadow: 0 5px 15px rgba(0, 0, 0, .5); } .modal-sm { width: 300px; } } @media (min-width: 992px) { .modal-lg { width: 900px; } } .tooltip { position: absolute; z-index: 1070; display: block; font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; font-size: 12px; font-weight: normal; line-height: 1.4; visibility: visible; filter: alpha(opacity=0); opacity: 0; } .tooltip.in { filter: alpha(opacity=90); opacity: .9; } .tooltip.top { padding: 5px 0; margin-top: -3px; } .tooltip.right { padding: 0 5px; margin-left: 3px; } .tooltip.bottom { padding: 5px 0; margin-top: 3px; } .tooltip.left { padding: 0 5px; margin-left: -3px; } .tooltip-inner { max-width: 200px; padding: 3px 8px; color: #fff; text-align: center; text-decoration: none; background-color: #000; border-radius: 4px; } .tooltip-arrow { position: absolute; width: 0; height: 0; border-color: transparent; border-style: solid; } .tooltip.top .tooltip-arrow { bottom: 0; left: 50%; margin-left: -5px; border-width: 5px 5px 0; border-top-color: #000; } .tooltip.top-left .tooltip-arrow { right: 5px; bottom: 0; margin-bottom: -5px; border-width: 5px 5px 0; border-top-color: #000; } .tooltip.top-right .tooltip-arrow { bottom: 0; left: 5px; margin-bottom: -5px; border-width: 5px 5px 0; border-top-color: #000; } .tooltip.right .tooltip-arrow { top: 50%; left: 0; margin-top: -5px; border-width: 5px 5px 5px 0; border-right-color: #000; } .tooltip.left .tooltip-arrow { top: 50%; right: 0; margin-top: -5px; border-width: 5px 0 5px 5px; border-left-color: #000; } .tooltip.bottom .tooltip-arrow { top: 0; left: 50%; margin-left: -5px; border-width: 0 5px 5px; border-bottom-color: #000; } .tooltip.bottom-left .tooltip-arrow { top: 0; right: 5px; margin-top: -5px; border-width: 0 5px 5px; border-bottom-color: #000; } .tooltip.bottom-right .tooltip-arrow { top: 0; left: 5px; margin-top: -5px; border-width: 0 5px 5px; border-bottom-color: #000; } .popover { position: absolute; top: 0; left: 0; z-index: 1060; display: none; max-width: 276px; padding: 1px; font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; font-size: 14px; font-weight: normal; line-height: 1.42857143; text-align: left; white-space: normal; background-color: #fff; -webkit-background-clip: padding-box; background-clip: padding-box; border: 1px solid #ccc; border: 1px solid rgba(0, 0, 0, .2); border-radius: 6px; -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, .2); box-shadow: 0 5px 10px rgba(0, 0, 0, .2); } .popover.top { margin-top: -10px; } .popover.right { margin-left: 10px; } .popover.bottom { margin-top: 10px; } .popover.left { margin-left: -10px; } .popover-title { padding: 8px 14px; margin: 0; font-size: 14px; background-color: #f7f7f7; border-bottom: 1px solid #ebebeb; border-radius: 5px 5px 0 0; } .popover-content { padding: 9px 14px; } .popover > .arrow, .popover > .arrow:after { position: absolute; display: block; width: 0; height: 0; border-color: transparent; border-style: solid; } .popover > .arrow { border-width: 11px; } .popover > .arrow:after { content: ""; border-width: 10px; } .popover.top > .arrow { bottom: -11px; left: 50%; margin-left: -11px; border-top-color: #999; border-top-color: rgba(0, 0, 0, .25); border-bottom-width: 0; } .popover.top > .arrow:after { bottom: 1px; margin-left: -10px; content: " "; border-top-color: #fff; border-bottom-width: 0; } .popover.right > .arrow { top: 50%; left: -11px; margin-top: -11px; border-right-color: #999; border-right-color: rgba(0, 0, 0, .25); border-left-width: 0; } .popover.right > .arrow:after { bottom: -10px; left: 1px; content: " "; border-right-color: #fff; border-left-width: 0; } .popover.bottom > .arrow { top: -11px; left: 50%; margin-left: -11px; border-top-width: 0; border-bottom-color: #999; border-bottom-color: rgba(0, 0, 0, .25); } .popover.bottom > .arrow:after { top: 1px; margin-left: -10px; content: " "; border-top-width: 0; border-bottom-color: #fff; } .popover.left > .arrow { top: 50%; right: -11px; margin-top: -11px; border-right-width: 0; border-left-color: #999; border-left-color: rgba(0, 0, 0, .25); } .popover.left > .arrow:after { right: 1px; bottom: -10px; content: " "; border-right-width: 0; border-left-color: #fff; } .carousel { position: relative; } .carousel-inner { position: relative; width: 100%; overflow: hidden; } .carousel-inner > .item { position: relative; display: none; -webkit-transition: .6s ease-in-out left; -o-transition: .6s ease-in-out left; transition: .6s ease-in-out left; } .carousel-inner > .item > img, .carousel-inner > .item > a > img { line-height: 1; } @media all and (transform-3d), (-webkit-transform-3d) { .carousel-inner > .item { -webkit-transition: -webkit-transform .6s ease-in-out; -o-transition: -o-transform .6s ease-in-out; transition: transform .6s ease-in-out; -webkit-backface-visibility: hidden; backface-visibility: hidden; -webkit-perspective: 1000; perspective: 1000; } .carousel-inner > .item.next, .carousel-inner > .item.active.right { left: 0; -webkit-transform: translate3d(100%, 0, 0); transform: translate3d(100%, 0, 0); } .carousel-inner > .item.prev, .carousel-inner > .item.active.left { left: 0; -webkit-transform: translate3d(-100%, 0, 0); transform: translate3d(-100%, 0, 0); } .carousel-inner > .item.next.left, .carousel-inner > .item.prev.right, .carousel-inner > .item.active { left: 0; -webkit-transform: translate3d(0, 0, 0); transform: translate3d(0, 0, 0); } } .carousel-inner > .active, .carousel-inner > .next, .carousel-inner > .prev { display: block; } .carousel-inner > .active { left: 0; } .carousel-inner > .next, .carousel-inner > .prev { position: absolute; top: 0; width: 100%; } .carousel-inner > .next { left: 100%; } .carousel-inner > .prev { left: -100%; } .carousel-inner > .next.left, .carousel-inner > .prev.right { left: 0; } .carousel-inner > .active.left { left: -100%; } .carousel-inner > .active.right { left: 100%; } .carousel-control { position: absolute; top: 0; bottom: 0; left: 0; width: 15%; font-size: 20px; color: #fff; text-align: center; text-shadow: 0 1px 2px rgba(0, 0, 0, .6); filter: alpha(opacity=50); opacity: .5; } .carousel-control.left { background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%); background-image: -o-linear-gradient(left, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%); background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, .5)), to(rgba(0, 0, 0, .0001))); background-image: linear-gradient(to right, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1); background-repeat: repeat-x; } .carousel-control.right { right: 0; left: auto; background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%); background-image: -o-linear-gradient(left, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%); background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, .0001)), to(rgba(0, 0, 0, .5))); background-image: linear-gradient(to right, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1); background-repeat: repeat-x; } .carousel-control:hover, .carousel-control:focus { color: #fff; text-decoration: none; filter: alpha(opacity=90); outline: 0; opacity: .9; } .carousel-control .icon-prev, .carousel-control .icon-next, .carousel-control .glyphicon-chevron-left, .carousel-control .glyphicon-chevron-right { position: absolute; top: 50%; z-index: 5; display: inline-block; } .carousel-control .icon-prev, .carousel-control .glyphicon-chevron-left { left: 50%; margin-left: -10px; } .carousel-control .icon-next, .carousel-control .glyphicon-chevron-right { right: 50%; margin-right: -10px; } .carousel-control .icon-prev, .carousel-control .icon-next { width: 20px; height: 20px; margin-top: -10px; font-family: serif; line-height: 1; } .carousel-control .icon-prev:before { content: '\2039'; } .carousel-control .icon-next:before { content: '\203a'; } .carousel-indicators { position: absolute; bottom: 10px; left: 50%; z-index: 15; width: 60%; padding-left: 0; margin-left: -30%; text-align: center; list-style: none; } .carousel-indicators li { display: inline-block; width: 10px; height: 10px; margin: 1px; text-indent: -999px; cursor: pointer; background-color: #000 \9; background-color: rgba(0, 0, 0, 0); border: 1px solid #fff; border-radius: 10px; } .carousel-indicators .active { width: 12px; height: 12px; margin: 0; background-color: #fff; } .carousel-caption { position: absolute; right: 15%; bottom: 20px; left: 15%; z-index: 10; padding-top: 20px; padding-bottom: 20px; color: #fff; text-align: center; text-shadow: 0 1px 2px rgba(0, 0, 0, .6); } .carousel-caption .btn { text-shadow: none; } @media screen and (min-width: 768px) { .carousel-control .glyphicon-chevron-left, .carousel-control .glyphicon-chevron-right, .carousel-control .icon-prev, .carousel-control .icon-next { width: 30px; height: 30px; margin-top: -15px; font-size: 30px; } .carousel-control .glyphicon-chevron-left, .carousel-control .icon-prev { margin-left: -15px; } .carousel-control .glyphicon-chevron-right, .carousel-control .icon-next { margin-right: -15px; } .carousel-caption { right: 20%; left: 20%; padding-bottom: 30px; } .carousel-indicators { bottom: 20px; } } .clearfix:before, .clearfix:after, .dl-horizontal dd:before, .dl-horizontal dd:after, .container:before, .container:after, .container-fluid:before, .container-fluid:after, .row:before, .row:after, .form-horizontal .form-group:before, .form-horizontal .form-group:after, .btn-toolbar:before, .btn-toolbar:after, .btn-group-vertical > .btn-group:before, .btn-group-vertical > .btn-group:after, .nav:before, .nav:after, .navbar:before, .navbar:after, .navbar-header:before, .navbar-header:after, .navbar-collapse:before, .navbar-collapse:after, .pager:before, .pager:after, .panel-body:before, .panel-body:after, .modal-footer:before, .modal-footer:after { display: table; content: " "; } .clearfix:after, .dl-horizontal dd:after, .container:after, .container-fluid:after, .row:after, .form-horizontal .form-group:after, .btn-toolbar:after, .btn-group-vertical > .btn-group:after, .nav:after, .navbar:after, .navbar-header:after, .navbar-collapse:after, .pager:after, .panel-body:after, .modal-footer:after { clear: both; } .center-block { display: block; margin-right: auto; margin-left: auto; } .pull-right { float: right !important; } .pull-left { float: left !important; } .hide { display: none !important; } .show { display: block !important; } .invisible { visibility: hidden; } .text-hide { font: 0/0 a; color: transparent; text-shadow: none; background-color: transparent; border: 0; } .hidden { display: none !important; visibility: hidden !important; } .affix { position: fixed; } @-ms-viewport { width: device-width; } .visible-xs, .visible-sm, .visible-md, .visible-lg { display: none !important; } .visible-xs-block, .visible-xs-inline, .visible-xs-inline-block, .visible-sm-block, .visible-sm-inline, .visible-sm-inline-block, .visible-md-block, .visible-md-inline, .visible-md-inline-block, .visible-lg-block, .visible-lg-inline, .visible-lg-inline-block { display: none !important; } @media (max-width: 767px) { .visible-xs { display: block !important; } table.visible-xs { display: table; } tr.visible-xs { display: table-row !important; } th.visible-xs, td.visible-xs { display: table-cell !important; } } @media (max-width: 767px) { .visible-xs-block { display: block !important; } } @media (max-width: 767px) { .visible-xs-inline { display: inline !important; } } @media (max-width: 767px) { .visible-xs-inline-block { display: inline-block !important; } } @media (min-width: 768px) and (max-width: 991px) { .visible-sm { display: block !important; } table.visible-sm { display: table; } tr.visible-sm { display: table-row !important; } th.visible-sm, td.visible-sm { display: table-cell !important; } } @media (min-width: 768px) and (max-width: 991px) { .visible-sm-block { display: block !important; } } @media (min-width: 768px) and (max-width: 991px) { .visible-sm-inline { display: inline !important; } } @media (min-width: 768px) and (max-width: 991px) { .visible-sm-inline-block { display: inline-block !important; } } @media (min-width: 992px) and (max-width: 1199px) { .visible-md { display: block !important; } table.visible-md { display: table; } tr.visible-md { display: table-row !important; } th.visible-md, td.visible-md { display: table-cell !important; } } @media (min-width: 992px) and (max-width: 1199px) { .visible-md-block { display: block !important; } } @media (min-width: 992px) and (max-width: 1199px) { .visible-md-inline { display: inline !important; } } @media (min-width: 992px) and (max-width: 1199px) { .visible-md-inline-block { display: inline-block !important; } } @media (min-width: 1200px) { .visible-lg { display: block !important; } table.visible-lg { display: table; } tr.visible-lg { display: table-row !important; } th.visible-lg, td.visible-lg { display: table-cell !important; } } @media (min-width: 1200px) { .visible-lg-block { display: block !important; } } @media (min-width: 1200px) { .visible-lg-inline { display: inline !important; } } @media (min-width: 1200px) { .visible-lg-inline-block { display: inline-block !important; } } @media (max-width: 767px) { .hidden-xs { display: none !important; } } @media (min-width: 768px) and (max-width: 991px) { .hidden-sm { display: none !important; } } @media (min-width: 992px) and (max-width: 1199px) { .hidden-md { display: none !important; } } @media (min-width: 1200px) { .hidden-lg { display: none !important; } } .visible-print { display: none !important; } @media print { .visible-print { display: block !important; } table.visible-print { display: table; } tr.visible-print { display: table-row !important; } th.visible-print, td.visible-print { display: table-cell !important; } } .visible-print-block { display: none !important; } @media print { .visible-print-block { display: block !important; } } .visible-print-inline { display: none !important; } @media print { .visible-print-inline { display: inline !important; } } .visible-print-inline-block { display: none !important; } @media print { .visible-print-inline-block { display: inline-block !important; } } @media print { .hidden-print { display: none !important; } } /*# sourceMappingURL=bootstrap.css.map */ ================================================ FILE: website/css/main.scss ================================================ --- --- @charset "utf-8"; // Example Defaults // $base-font-family: Helvetica, Arial, sans-serif; // $base-font-size: 16px; // $small-font-size: $base-font-size * 0.875; // $base-line-height: 1.5; // $spacing-unit: 30px; // $text-color: #111; // $background-color: #fdfdfd; // $brand-color: #2a7ae2; // $grey-color: #828282; // $grey-color-light: lighten($grey-color, 40%); // $grey-color-dark: darken($grey-color, 25%); // // Width of the content area // $content-width: 800px; // $on-palm: 600px; // $on-laptop: 800px; // Using media queries with like this: // @include media-query($on-palm) { // .wrapper { // padding-right: $spacing-unit / 2; // padding-left: $spacing-unit / 2; // } // } // variables.less $theme-primary: #F05F40; $theme-dark: #222; // Import partials from `sass_dir` (defaults to `_sass`) @import "mixins", "base" ; ================================================ FILE: website/font-awesome/css/font-awesome.css ================================================ /*! * Font Awesome 4.3.0 by @davegandy - http://fontawesome.io - @fontawesome * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) */ /* FONT PATH * -------------------------- */ @font-face { font-family: 'FontAwesome'; src: url('../fonts/fontawesome-webfont.eot?v=4.3.0'); src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.3.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff2?v=4.3.0') format('woff2'), url('../fonts/fontawesome-webfont.woff?v=4.3.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.3.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.3.0#fontawesomeregular') format('svg'); font-weight: normal; font-style: normal; } .fa { display: inline-block; font: normal normal normal 14px/1 FontAwesome; font-size: inherit; text-rendering: auto; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; transform: translate(0, 0); } /* makes the font 33% larger relative to the icon container */ .fa-lg { font-size: 1.33333333em; line-height: 0.75em; vertical-align: -15%; } .fa-2x { font-size: 2em; } .fa-3x { font-size: 3em; } .fa-4x { font-size: 4em; } .fa-5x { font-size: 5em; } .fa-fw { width: 1.28571429em; text-align: center; } .fa-ul { padding-left: 0; margin-left: 2.14285714em; list-style-type: none; } .fa-ul > li { position: relative; } .fa-li { position: absolute; left: -2.14285714em; width: 2.14285714em; top: 0.14285714em; text-align: center; } .fa-li.fa-lg { left: -1.85714286em; } .fa-border { padding: .2em .25em .15em; border: solid 0.08em #eeeeee; border-radius: .1em; } .pull-right { float: right; } .pull-left { float: left; } .fa.pull-left { margin-right: .3em; } .fa.pull-right { margin-left: .3em; } .fa-spin { -webkit-animation: fa-spin 2s infinite linear; animation: fa-spin 2s infinite linear; } .fa-pulse { -webkit-animation: fa-spin 1s infinite steps(8); animation: fa-spin 1s infinite steps(8); } @-webkit-keyframes fa-spin { 0% { -webkit-transform: rotate(0deg); transform: rotate(0deg); } 100% { -webkit-transform: rotate(359deg); transform: rotate(359deg); } } @keyframes fa-spin { 0% { -webkit-transform: rotate(0deg); transform: rotate(0deg); } 100% { -webkit-transform: rotate(359deg); transform: rotate(359deg); } } .fa-rotate-90 { filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1); -webkit-transform: rotate(90deg); -ms-transform: rotate(90deg); transform: rotate(90deg); } .fa-rotate-180 { filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2); -webkit-transform: rotate(180deg); -ms-transform: rotate(180deg); transform: rotate(180deg); } .fa-rotate-270 { filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3); -webkit-transform: rotate(270deg); -ms-transform: rotate(270deg); transform: rotate(270deg); } .fa-flip-horizontal { filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1); -webkit-transform: scale(-1, 1); -ms-transform: scale(-1, 1); transform: scale(-1, 1); } .fa-flip-vertical { filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1); -webkit-transform: scale(1, -1); -ms-transform: scale(1, -1); transform: scale(1, -1); } :root .fa-rotate-90, :root .fa-rotate-180, :root .fa-rotate-270, :root .fa-flip-horizontal, :root .fa-flip-vertical { filter: none; } .fa-stack { position: relative; display: inline-block; width: 2em; height: 2em; line-height: 2em; vertical-align: middle; } .fa-stack-1x, .fa-stack-2x { position: absolute; left: 0; width: 100%; text-align: center; } .fa-stack-1x { line-height: inherit; } .fa-stack-2x { font-size: 2em; } .fa-inverse { color: #ffffff; } /* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen readers do not read off random characters that represent icons */ .fa-glass:before { content: "\f000"; } .fa-music:before { content: "\f001"; } .fa-search:before { content: "\f002"; } .fa-envelope-o:before { content: "\f003"; } .fa-heart:before { content: "\f004"; } .fa-star:before { content: "\f005"; } .fa-star-o:before { content: "\f006"; } .fa-user:before { content: "\f007"; } .fa-film:before { content: "\f008"; } .fa-th-large:before { content: "\f009"; } .fa-th:before { content: "\f00a"; } .fa-th-list:before { content: "\f00b"; } .fa-check:before { content: "\f00c"; } .fa-remove:before, .fa-close:before, .fa-times:before { content: "\f00d"; } .fa-search-plus:before { content: "\f00e"; } .fa-search-minus:before { content: "\f010"; } .fa-power-off:before { content: "\f011"; } .fa-signal:before { content: "\f012"; } .fa-gear:before, .fa-cog:before { content: "\f013"; } .fa-trash-o:before { content: "\f014"; } .fa-home:before { content: "\f015"; } .fa-file-o:before { content: "\f016"; } .fa-clock-o:before { content: "\f017"; } .fa-road:before { content: "\f018"; } .fa-download:before { content: "\f019"; } .fa-arrow-circle-o-down:before { content: "\f01a"; } .fa-arrow-circle-o-up:before { content: "\f01b"; } .fa-inbox:before { content: "\f01c"; } .fa-play-circle-o:before { content: "\f01d"; } .fa-rotate-right:before, .fa-repeat:before { content: "\f01e"; } .fa-refresh:before { content: "\f021"; } .fa-list-alt:before { content: "\f022"; } .fa-lock:before { content: "\f023"; } .fa-flag:before { content: "\f024"; } .fa-headphones:before { content: "\f025"; } .fa-volume-off:before { content: "\f026"; } .fa-volume-down:before { content: "\f027"; } .fa-volume-up:before { content: "\f028"; } .fa-qrcode:before { content: "\f029"; } .fa-barcode:before { content: "\f02a"; } .fa-tag:before { content: "\f02b"; } .fa-tags:before { content: "\f02c"; } .fa-book:before { content: "\f02d"; } .fa-bookmark:before { content: "\f02e"; } .fa-print:before { content: "\f02f"; } .fa-camera:before { content: "\f030"; } .fa-font:before { content: "\f031"; } .fa-bold:before { content: "\f032"; } .fa-italic:before { content: "\f033"; } .fa-text-height:before { content: "\f034"; } .fa-text-width:before { content: "\f035"; } .fa-align-left:before { content: "\f036"; } .fa-align-center:before { content: "\f037"; } .fa-align-right:before { content: "\f038"; } .fa-align-justify:before { content: "\f039"; } .fa-list:before { content: "\f03a"; } .fa-dedent:before, .fa-outdent:before { content: "\f03b"; } .fa-indent:before { content: "\f03c"; } .fa-video-camera:before { content: "\f03d"; } .fa-photo:before, .fa-image:before, .fa-picture-o:before { content: "\f03e"; } .fa-pencil:before { content: "\f040"; } .fa-map-marker:before { content: "\f041"; } .fa-adjust:before { content: "\f042"; } .fa-tint:before { content: "\f043"; } .fa-edit:before, .fa-pencil-square-o:before { content: "\f044"; } .fa-share-square-o:before { content: "\f045"; } .fa-check-square-o:before { content: "\f046"; } .fa-arrows:before { content: "\f047"; } .fa-step-backward:before { content: "\f048"; } .fa-fast-backward:before { content: "\f049"; } .fa-backward:before { content: "\f04a"; } .fa-play:before { content: "\f04b"; } .fa-pause:before { content: "\f04c"; } .fa-stop:before { content: "\f04d"; } .fa-forward:before { content: "\f04e"; } .fa-fast-forward:before { content: "\f050"; } .fa-step-forward:before { content: "\f051"; } .fa-eject:before { content: "\f052"; } .fa-chevron-left:before { content: "\f053"; } .fa-chevron-right:before { content: "\f054"; } .fa-plus-circle:before { content: "\f055"; } .fa-minus-circle:before { content: "\f056"; } .fa-times-circle:before { content: "\f057"; } .fa-check-circle:before { content: "\f058"; } .fa-question-circle:before { content: "\f059"; } .fa-info-circle:before { content: "\f05a"; } .fa-crosshairs:before { content: "\f05b"; } .fa-times-circle-o:before { content: "\f05c"; } .fa-check-circle-o:before { content: "\f05d"; } .fa-ban:before { content: "\f05e"; } .fa-arrow-left:before { content: "\f060"; } .fa-arrow-right:before { content: "\f061"; } .fa-arrow-up:before { content: "\f062"; } .fa-arrow-down:before { content: "\f063"; } .fa-mail-forward:before, .fa-share:before { content: "\f064"; } .fa-expand:before { content: "\f065"; } .fa-compress:before { content: "\f066"; } .fa-plus:before { content: "\f067"; } .fa-minus:before { content: "\f068"; } .fa-asterisk:before { content: "\f069"; } .fa-exclamation-circle:before { content: "\f06a"; } .fa-gift:before { content: "\f06b"; } .fa-leaf:before { content: "\f06c"; } .fa-fire:before { content: "\f06d"; } .fa-eye:before { content: "\f06e"; } .fa-eye-slash:before { content: "\f070"; } .fa-warning:before, .fa-exclamation-triangle:before { content: "\f071"; } .fa-plane:before { content: "\f072"; } .fa-calendar:before { content: "\f073"; } .fa-random:before { content: "\f074"; } .fa-comment:before { content: "\f075"; } .fa-magnet:before { content: "\f076"; } .fa-chevron-up:before { content: "\f077"; } .fa-chevron-down:before { content: "\f078"; } .fa-retweet:before { content: "\f079"; } .fa-shopping-cart:before { content: "\f07a"; } .fa-folder:before { content: "\f07b"; } .fa-folder-open:before { content: "\f07c"; } .fa-arrows-v:before { content: "\f07d"; } .fa-arrows-h:before { content: "\f07e"; } .fa-bar-chart-o:before, .fa-bar-chart:before { content: "\f080"; } .fa-twitter-square:before { content: "\f081"; } .fa-facebook-square:before { content: "\f082"; } .fa-camera-retro:before { content: "\f083"; } .fa-key:before { content: "\f084"; } .fa-gears:before, .fa-cogs:before { content: "\f085"; } .fa-comments:before { content: "\f086"; } .fa-thumbs-o-up:before { content: "\f087"; } .fa-thumbs-o-down:before { content: "\f088"; } .fa-star-half:before { content: "\f089"; } .fa-heart-o:before { content: "\f08a"; } .fa-sign-out:before { content: "\f08b"; } .fa-linkedin-square:before { content: "\f08c"; } .fa-thumb-tack:before { content: "\f08d"; } .fa-external-link:before { content: "\f08e"; } .fa-sign-in:before { content: "\f090"; } .fa-trophy:before { content: "\f091"; } .fa-github-square:before { content: "\f092"; } .fa-upload:before { content: "\f093"; } .fa-lemon-o:before { content: "\f094"; } .fa-phone:before { content: "\f095"; } .fa-square-o:before { content: "\f096"; } .fa-bookmark-o:before { content: "\f097"; } .fa-phone-square:before { content: "\f098"; } .fa-twitter:before { content: "\f099"; } .fa-facebook-f:before, .fa-facebook:before { content: "\f09a"; } .fa-github:before { content: "\f09b"; } .fa-unlock:before { content: "\f09c"; } .fa-credit-card:before { content: "\f09d"; } .fa-rss:before { content: "\f09e"; } .fa-hdd-o:before { content: "\f0a0"; } .fa-bullhorn:before { content: "\f0a1"; } .fa-bell:before { content: "\f0f3"; } .fa-certificate:before { content: "\f0a3"; } .fa-hand-o-right:before { content: "\f0a4"; } .fa-hand-o-left:before { content: "\f0a5"; } .fa-hand-o-up:before { content: "\f0a6"; } .fa-hand-o-down:before { content: "\f0a7"; } .fa-arrow-circle-left:before { content: "\f0a8"; } .fa-arrow-circle-right:before { content: "\f0a9"; } .fa-arrow-circle-up:before { content: "\f0aa"; } .fa-arrow-circle-down:before { content: "\f0ab"; } .fa-globe:before { content: "\f0ac"; } .fa-wrench:before { content: "\f0ad"; } .fa-tasks:before { content: "\f0ae"; } .fa-filter:before { content: "\f0b0"; } .fa-briefcase:before { content: "\f0b1"; } .fa-arrows-alt:before { content: "\f0b2"; } .fa-group:before, .fa-users:before { content: "\f0c0"; } .fa-chain:before, .fa-link:before { content: "\f0c1"; } .fa-cloud:before { content: "\f0c2"; } .fa-flask:before { content: "\f0c3"; } .fa-cut:before, .fa-scissors:before { content: "\f0c4"; } .fa-copy:before, .fa-files-o:before { content: "\f0c5"; } .fa-paperclip:before { content: "\f0c6"; } .fa-save:before, .fa-floppy-o:before { content: "\f0c7"; } .fa-square:before { content: "\f0c8"; } .fa-navicon:before, .fa-reorder:before, .fa-bars:before { content: "\f0c9"; } .fa-list-ul:before { content: "\f0ca"; } .fa-list-ol:before { content: "\f0cb"; } .fa-strikethrough:before { content: "\f0cc"; } .fa-underline:before { content: "\f0cd"; } .fa-table:before { content: "\f0ce"; } .fa-magic:before { content: "\f0d0"; } .fa-truck:before { content: "\f0d1"; } .fa-pinterest:before { content: "\f0d2"; } .fa-pinterest-square:before { content: "\f0d3"; } .fa-google-plus-square:before { content: "\f0d4"; } .fa-google-plus:before { content: "\f0d5"; } .fa-money:before { content: "\f0d6"; } .fa-caret-down:before { content: "\f0d7"; } .fa-caret-up:before { content: "\f0d8"; } .fa-caret-left:before { content: "\f0d9"; } .fa-caret-right:before { content: "\f0da"; } .fa-columns:before { content: "\f0db"; } .fa-unsorted:before, .fa-sort:before { content: "\f0dc"; } .fa-sort-down:before, .fa-sort-desc:before { content: "\f0dd"; } .fa-sort-up:before, .fa-sort-asc:before { content: "\f0de"; } .fa-envelope:before { content: "\f0e0"; } .fa-linkedin:before { content: "\f0e1"; } .fa-rotate-left:before, .fa-undo:before { content: "\f0e2"; } .fa-legal:before, .fa-gavel:before { content: "\f0e3"; } .fa-dashboard:before, .fa-tachometer:before { content: "\f0e4"; } .fa-comment-o:before { content: "\f0e5"; } .fa-comments-o:before { content: "\f0e6"; } .fa-flash:before, .fa-bolt:before { content: "\f0e7"; } .fa-sitemap:before { content: "\f0e8"; } .fa-umbrella:before { content: "\f0e9"; } .fa-paste:before, .fa-clipboard:before { content: "\f0ea"; } .fa-lightbulb-o:before { content: "\f0eb"; } .fa-exchange:before { content: "\f0ec"; } .fa-cloud-download:before { content: "\f0ed"; } .fa-cloud-upload:before { content: "\f0ee"; } .fa-user-md:before { content: "\f0f0"; } .fa-stethoscope:before { content: "\f0f1"; } .fa-suitcase:before { content: "\f0f2"; } .fa-bell-o:before { content: "\f0a2"; } .fa-coffee:before { content: "\f0f4"; } .fa-cutlery:before { content: "\f0f5"; } .fa-file-text-o:before { content: "\f0f6"; } .fa-building-o:before { content: "\f0f7"; } .fa-hospital-o:before { content: "\f0f8"; } .fa-ambulance:before { content: "\f0f9"; } .fa-medkit:before { content: "\f0fa"; } .fa-fighter-jet:before { content: "\f0fb"; } .fa-beer:before { content: "\f0fc"; } .fa-h-square:before { content: "\f0fd"; } .fa-plus-square:before { content: "\f0fe"; } .fa-angle-double-left:before { content: "\f100"; } .fa-angle-double-right:before { content: "\f101"; } .fa-angle-double-up:before { content: "\f102"; } .fa-angle-double-down:before { content: "\f103"; } .fa-angle-left:before { content: "\f104"; } .fa-angle-right:before { content: "\f105"; } .fa-angle-up:before { content: "\f106"; } .fa-angle-down:before { content: "\f107"; } .fa-desktop:before { content: "\f108"; } .fa-laptop:before { content: "\f109"; } .fa-tablet:before { content: "\f10a"; } .fa-mobile-phone:before, .fa-mobile:before { content: "\f10b"; } .fa-circle-o:before { content: "\f10c"; } .fa-quote-left:before { content: "\f10d"; } .fa-quote-right:before { content: "\f10e"; } .fa-spinner:before { content: "\f110"; } .fa-circle:before { content: "\f111"; } .fa-mail-reply:before, .fa-reply:before { content: "\f112"; } .fa-github-alt:before { content: "\f113"; } .fa-folder-o:before { content: "\f114"; } .fa-folder-open-o:before { content: "\f115"; } .fa-smile-o:before { content: "\f118"; } .fa-frown-o:before { content: "\f119"; } .fa-meh-o:before { content: "\f11a"; } .fa-gamepad:before { content: "\f11b"; } .fa-keyboard-o:before { content: "\f11c"; } .fa-flag-o:before { content: "\f11d"; } .fa-flag-checkered:before { content: "\f11e"; } .fa-terminal:before { content: "\f120"; } .fa-code:before { content: "\f121"; } .fa-mail-reply-all:before, .fa-reply-all:before { content: "\f122"; } .fa-star-half-empty:before, .fa-star-half-full:before, .fa-star-half-o:before { content: "\f123"; } .fa-location-arrow:before { content: "\f124"; } .fa-crop:before { content: "\f125"; } .fa-code-fork:before { content: "\f126"; } .fa-unlink:before, .fa-chain-broken:before { content: "\f127"; } .fa-question:before { content: "\f128"; } .fa-info:before { content: "\f129"; } .fa-exclamation:before { content: "\f12a"; } .fa-superscript:before { content: "\f12b"; } .fa-subscript:before { content: "\f12c"; } .fa-eraser:before { content: "\f12d"; } .fa-puzzle-piece:before { content: "\f12e"; } .fa-microphone:before { content: "\f130"; } .fa-microphone-slash:before { content: "\f131"; } .fa-shield:before { content: "\f132"; } .fa-calendar-o:before { content: "\f133"; } .fa-fire-extinguisher:before { content: "\f134"; } .fa-rocket:before { content: "\f135"; } .fa-maxcdn:before { content: "\f136"; } .fa-chevron-circle-left:before { content: "\f137"; } .fa-chevron-circle-right:before { content: "\f138"; } .fa-chevron-circle-up:before { content: "\f139"; } .fa-chevron-circle-down:before { content: "\f13a"; } .fa-html5:before { content: "\f13b"; } .fa-css3:before { content: "\f13c"; } .fa-anchor:before { content: "\f13d"; } .fa-unlock-alt:before { content: "\f13e"; } .fa-bullseye:before { content: "\f140"; } .fa-ellipsis-h:before { content: "\f141"; } .fa-ellipsis-v:before { content: "\f142"; } .fa-rss-square:before { content: "\f143"; } .fa-play-circle:before { content: "\f144"; } .fa-ticket:before { content: "\f145"; } .fa-minus-square:before { content: "\f146"; } .fa-minus-square-o:before { content: "\f147"; } .fa-level-up:before { content: "\f148"; } .fa-level-down:before { content: "\f149"; } .fa-check-square:before { content: "\f14a"; } .fa-pencil-square:before { content: "\f14b"; } .fa-external-link-square:before { content: "\f14c"; } .fa-share-square:before { content: "\f14d"; } .fa-compass:before { content: "\f14e"; } .fa-toggle-down:before, .fa-caret-square-o-down:before { content: "\f150"; } .fa-toggle-up:before, .fa-caret-square-o-up:before { content: "\f151"; } .fa-toggle-right:before, .fa-caret-square-o-right:before { content: "\f152"; } .fa-euro:before, .fa-eur:before { content: "\f153"; } .fa-gbp:before { content: "\f154"; } .fa-dollar:before, .fa-usd:before { content: "\f155"; } .fa-rupee:before, .fa-inr:before { content: "\f156"; } .fa-cny:before, .fa-rmb:before, .fa-yen:before, .fa-jpy:before { content: "\f157"; } .fa-ruble:before, .fa-rouble:before, .fa-rub:before { content: "\f158"; } .fa-won:before, .fa-krw:before { content: "\f159"; } .fa-bitcoin:before, .fa-btc:before { content: "\f15a"; } .fa-file:before { content: "\f15b"; } .fa-file-text:before { content: "\f15c"; } .fa-sort-alpha-asc:before { content: "\f15d"; } .fa-sort-alpha-desc:before { content: "\f15e"; } .fa-sort-amount-asc:before { content: "\f160"; } .fa-sort-amount-desc:before { content: "\f161"; } .fa-sort-numeric-asc:before { content: "\f162"; } .fa-sort-numeric-desc:before { content: "\f163"; } .fa-thumbs-up:before { content: "\f164"; } .fa-thumbs-down:before { content: "\f165"; } .fa-youtube-square:before { content: "\f166"; } .fa-youtube:before { content: "\f167"; } .fa-xing:before { content: "\f168"; } .fa-xing-square:before { content: "\f169"; } .fa-youtube-play:before { content: "\f16a"; } .fa-dropbox:before { content: "\f16b"; } .fa-stack-overflow:before { content: "\f16c"; } .fa-instagram:before { content: "\f16d"; } .fa-flickr:before { content: "\f16e"; } .fa-adn:before { content: "\f170"; } .fa-bitbucket:before { content: "\f171"; } .fa-bitbucket-square:before { content: "\f172"; } .fa-tumblr:before { content: "\f173"; } .fa-tumblr-square:before { content: "\f174"; } .fa-long-arrow-down:before { content: "\f175"; } .fa-long-arrow-up:before { content: "\f176"; } .fa-long-arrow-left:before { content: "\f177"; } .fa-long-arrow-right:before { content: "\f178"; } .fa-apple:before { content: "\f179"; } .fa-windows:before { content: "\f17a"; } .fa-android:before { content: "\f17b"; } .fa-linux:before { content: "\f17c"; } .fa-dribbble:before { content: "\f17d"; } .fa-skype:before { content: "\f17e"; } .fa-foursquare:before { content: "\f180"; } .fa-trello:before { content: "\f181"; } .fa-female:before { content: "\f182"; } .fa-male:before { content: "\f183"; } .fa-gittip:before, .fa-gratipay:before { content: "\f184"; } .fa-sun-o:before { content: "\f185"; } .fa-moon-o:before { content: "\f186"; } .fa-archive:before { content: "\f187"; } .fa-bug:before { content: "\f188"; } .fa-vk:before { content: "\f189"; } .fa-weibo:before { content: "\f18a"; } .fa-renren:before { content: "\f18b"; } .fa-pagelines:before { content: "\f18c"; } .fa-stack-exchange:before { content: "\f18d"; } .fa-arrow-circle-o-right:before { content: "\f18e"; } .fa-arrow-circle-o-left:before { content: "\f190"; } .fa-toggle-left:before, .fa-caret-square-o-left:before { content: "\f191"; } .fa-dot-circle-o:before { content: "\f192"; } .fa-wheelchair:before { content: "\f193"; } .fa-vimeo-square:before { content: "\f194"; } .fa-turkish-lira:before, .fa-try:before { content: "\f195"; } .fa-plus-square-o:before { content: "\f196"; } .fa-space-shuttle:before { content: "\f197"; } .fa-slack:before { content: "\f198"; } .fa-envelope-square:before { content: "\f199"; } .fa-wordpress:before { content: "\f19a"; } .fa-openid:before { content: "\f19b"; } .fa-institution:before, .fa-bank:before, .fa-university:before { content: "\f19c"; } .fa-mortar-board:before, .fa-graduation-cap:before { content: "\f19d"; } .fa-yahoo:before { content: "\f19e"; } .fa-google:before { content: "\f1a0"; } .fa-reddit:before { content: "\f1a1"; } .fa-reddit-square:before { content: "\f1a2"; } .fa-stumbleupon-circle:before { content: "\f1a3"; } .fa-stumbleupon:before { content: "\f1a4"; } .fa-delicious:before { content: "\f1a5"; } .fa-digg:before { content: "\f1a6"; } .fa-pied-piper:before { content: "\f1a7"; } .fa-pied-piper-alt:before { content: "\f1a8"; } .fa-drupal:before { content: "\f1a9"; } .fa-joomla:before { content: "\f1aa"; } .fa-language:before { content: "\f1ab"; } .fa-fax:before { content: "\f1ac"; } .fa-building:before { content: "\f1ad"; } .fa-child:before { content: "\f1ae"; } .fa-paw:before { content: "\f1b0"; } .fa-spoon:before { content: "\f1b1"; } .fa-cube:before { content: "\f1b2"; } .fa-cubes:before { content: "\f1b3"; } .fa-behance:before { content: "\f1b4"; } .fa-behance-square:before { content: "\f1b5"; } .fa-steam:before { content: "\f1b6"; } .fa-steam-square:before { content: "\f1b7"; } .fa-recycle:before { content: "\f1b8"; } .fa-automobile:before, .fa-car:before { content: "\f1b9"; } .fa-cab:before, .fa-taxi:before { content: "\f1ba"; } .fa-tree:before { content: "\f1bb"; } .fa-spotify:before { content: "\f1bc"; } .fa-deviantart:before { content: "\f1bd"; } .fa-soundcloud:before { content: "\f1be"; } .fa-database:before { content: "\f1c0"; } .fa-file-pdf-o:before { content: "\f1c1"; } .fa-file-word-o:before { content: "\f1c2"; } .fa-file-excel-o:before { content: "\f1c3"; } .fa-file-powerpoint-o:before { content: "\f1c4"; } .fa-file-photo-o:before, .fa-file-picture-o:before, .fa-file-image-o:before { content: "\f1c5"; } .fa-file-zip-o:before, .fa-file-archive-o:before { content: "\f1c6"; } .fa-file-sound-o:before, .fa-file-audio-o:before { content: "\f1c7"; } .fa-file-movie-o:before, .fa-file-video-o:before { content: "\f1c8"; } .fa-file-code-o:before { content: "\f1c9"; } .fa-vine:before { content: "\f1ca"; } .fa-codepen:before { content: "\f1cb"; } .fa-jsfiddle:before { content: "\f1cc"; } .fa-life-bouy:before, .fa-life-buoy:before, .fa-life-saver:before, .fa-support:before, .fa-life-ring:before { content: "\f1cd"; } .fa-circle-o-notch:before { content: "\f1ce"; } .fa-ra:before, .fa-rebel:before { content: "\f1d0"; } .fa-ge:before, .fa-empire:before { content: "\f1d1"; } .fa-git-square:before { content: "\f1d2"; } .fa-git:before { content: "\f1d3"; } .fa-hacker-news:before { content: "\f1d4"; } .fa-tencent-weibo:before { content: "\f1d5"; } .fa-qq:before { content: "\f1d6"; } .fa-wechat:before, .fa-weixin:before { content: "\f1d7"; } .fa-send:before, .fa-paper-plane:before { content: "\f1d8"; } .fa-send-o:before, .fa-paper-plane-o:before { content: "\f1d9"; } .fa-history:before { content: "\f1da"; } .fa-genderless:before, .fa-circle-thin:before { content: "\f1db"; } .fa-header:before { content: "\f1dc"; } .fa-paragraph:before { content: "\f1dd"; } .fa-sliders:before { content: "\f1de"; } .fa-share-alt:before { content: "\f1e0"; } .fa-share-alt-square:before { content: "\f1e1"; } .fa-bomb:before { content: "\f1e2"; } .fa-soccer-ball-o:before, .fa-futbol-o:before { content: "\f1e3"; } .fa-tty:before { content: "\f1e4"; } .fa-binoculars:before { content: "\f1e5"; } .fa-plug:before { content: "\f1e6"; } .fa-slideshare:before { content: "\f1e7"; } .fa-twitch:before { content: "\f1e8"; } .fa-yelp:before { content: "\f1e9"; } .fa-newspaper-o:before { content: "\f1ea"; } .fa-wifi:before { content: "\f1eb"; } .fa-calculator:before { content: "\f1ec"; } .fa-paypal:before { content: "\f1ed"; } .fa-google-wallet:before { content: "\f1ee"; } .fa-cc-visa:before { content: "\f1f0"; } .fa-cc-mastercard:before { content: "\f1f1"; } .fa-cc-discover:before { content: "\f1f2"; } .fa-cc-amex:before { content: "\f1f3"; } .fa-cc-paypal:before { content: "\f1f4"; } .fa-cc-stripe:before { content: "\f1f5"; } .fa-bell-slash:before { content: "\f1f6"; } .fa-bell-slash-o:before { content: "\f1f7"; } .fa-trash:before { content: "\f1f8"; } .fa-copyright:before { content: "\f1f9"; } .fa-at:before { content: "\f1fa"; } .fa-eyedropper:before { content: "\f1fb"; } .fa-paint-brush:before { content: "\f1fc"; } .fa-birthday-cake:before { content: "\f1fd"; } .fa-area-chart:before { content: "\f1fe"; } .fa-pie-chart:before { content: "\f200"; } .fa-line-chart:before { content: "\f201"; } .fa-lastfm:before { content: "\f202"; } .fa-lastfm-square:before { content: "\f203"; } .fa-toggle-off:before { content: "\f204"; } .fa-toggle-on:before { content: "\f205"; } .fa-bicycle:before { content: "\f206"; } .fa-bus:before { content: "\f207"; } .fa-ioxhost:before { content: "\f208"; } .fa-angellist:before { content: "\f209"; } .fa-cc:before { content: "\f20a"; } .fa-shekel:before, .fa-sheqel:before, .fa-ils:before { content: "\f20b"; } .fa-meanpath:before { content: "\f20c"; } .fa-buysellads:before { content: "\f20d"; } .fa-connectdevelop:before { content: "\f20e"; } .fa-dashcube:before { content: "\f210"; } .fa-forumbee:before { content: "\f211"; } .fa-leanpub:before { content: "\f212"; } .fa-sellsy:before { content: "\f213"; } .fa-shirtsinbulk:before { content: "\f214"; } .fa-simplybuilt:before { content: "\f215"; } .fa-skyatlas:before { content: "\f216"; } .fa-cart-plus:before { content: "\f217"; } .fa-cart-arrow-down:before { content: "\f218"; } .fa-diamond:before { content: "\f219"; } .fa-ship:before { content: "\f21a"; } .fa-user-secret:before { content: "\f21b"; } .fa-motorcycle:before { content: "\f21c"; } .fa-street-view:before { content: "\f21d"; } .fa-heartbeat:before { content: "\f21e"; } .fa-venus:before { content: "\f221"; } .fa-mars:before { content: "\f222"; } .fa-mercury:before { content: "\f223"; } .fa-transgender:before { content: "\f224"; } .fa-transgender-alt:before { content: "\f225"; } .fa-venus-double:before { content: "\f226"; } .fa-mars-double:before { content: "\f227"; } .fa-venus-mars:before { content: "\f228"; } .fa-mars-stroke:before { content: "\f229"; } .fa-mars-stroke-v:before { content: "\f22a"; } .fa-mars-stroke-h:before { content: "\f22b"; } .fa-neuter:before { content: "\f22c"; } .fa-facebook-official:before { content: "\f230"; } .fa-pinterest-p:before { content: "\f231"; } .fa-whatsapp:before { content: "\f232"; } .fa-server:before { content: "\f233"; } .fa-user-plus:before { content: "\f234"; } .fa-user-times:before { content: "\f235"; } .fa-hotel:before, .fa-bed:before { content: "\f236"; } .fa-viacoin:before { content: "\f237"; } .fa-train:before { content: "\f238"; } .fa-subway:before { content: "\f239"; } .fa-medium:before { content: "\f23a"; } ================================================ FILE: website/font-awesome/less/animated.less ================================================ // Animated Icons // -------------------------- .@{fa-css-prefix}-spin { -webkit-animation: fa-spin 2s infinite linear; animation: fa-spin 2s infinite linear; } .@{fa-css-prefix}-pulse { -webkit-animation: fa-spin 1s infinite steps(8); animation: fa-spin 1s infinite steps(8); } @-webkit-keyframes fa-spin { 0% { -webkit-transform: rotate(0deg); transform: rotate(0deg); } 100% { -webkit-transform: rotate(359deg); transform: rotate(359deg); } } @keyframes fa-spin { 0% { -webkit-transform: rotate(0deg); transform: rotate(0deg); } 100% { -webkit-transform: rotate(359deg); transform: rotate(359deg); } } ================================================ FILE: website/font-awesome/less/bordered-pulled.less ================================================ // Bordered & Pulled // ------------------------- .@{fa-css-prefix}-border { padding: .2em .25em .15em; border: solid .08em @fa-border-color; border-radius: .1em; } .pull-right { float: right; } .pull-left { float: left; } .@{fa-css-prefix} { &.pull-left { margin-right: .3em; } &.pull-right { margin-left: .3em; } } ================================================ FILE: website/font-awesome/less/core.less ================================================ // Base Class Definition // ------------------------- .@{fa-css-prefix} { display: inline-block; font: normal normal normal @fa-font-size-base/1 FontAwesome; // shortening font declaration font-size: inherit; // can't have font-size inherit on line above, so need to override text-rendering: auto; // optimizelegibility throws things off #1094 -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; transform: translate(0, 0); // ensures no half-pixel rendering in firefox } ================================================ FILE: website/font-awesome/less/fixed-width.less ================================================ // Fixed Width Icons // ------------------------- .@{fa-css-prefix}-fw { width: (18em / 14); text-align: center; } ================================================ FILE: website/font-awesome/less/font-awesome.less ================================================ /*! * Font Awesome 4.3.0 by @davegandy - http://fontawesome.io - @fontawesome * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) */ @import "variables.less"; @import "mixins.less"; @import "path.less"; @import "core.less"; @import "larger.less"; @import "fixed-width.less"; @import "list.less"; @import "bordered-pulled.less"; @import "animated.less"; @import "rotated-flipped.less"; @import "stacked.less"; @import "icons.less"; ================================================ FILE: website/font-awesome/less/icons.less ================================================ /* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen readers do not read off random characters that represent icons */ .@{fa-css-prefix}-glass:before { content: @fa-var-glass; } .@{fa-css-prefix}-music:before { content: @fa-var-music; } .@{fa-css-prefix}-search:before { content: @fa-var-search; } .@{fa-css-prefix}-envelope-o:before { content: @fa-var-envelope-o; } .@{fa-css-prefix}-heart:before { content: @fa-var-heart; } .@{fa-css-prefix}-star:before { content: @fa-var-star; } .@{fa-css-prefix}-star-o:before { content: @fa-var-star-o; } .@{fa-css-prefix}-user:before { content: @fa-var-user; } .@{fa-css-prefix}-film:before { content: @fa-var-film; } .@{fa-css-prefix}-th-large:before { content: @fa-var-th-large; } .@{fa-css-prefix}-th:before { content: @fa-var-th; } .@{fa-css-prefix}-th-list:before { content: @fa-var-th-list; } .@{fa-css-prefix}-check:before { content: @fa-var-check; } .@{fa-css-prefix}-remove:before, .@{fa-css-prefix}-close:before, .@{fa-css-prefix}-times:before { content: @fa-var-times; } .@{fa-css-prefix}-search-plus:before { content: @fa-var-search-plus; } .@{fa-css-prefix}-search-minus:before { content: @fa-var-search-minus; } .@{fa-css-prefix}-power-off:before { content: @fa-var-power-off; } .@{fa-css-prefix}-signal:before { content: @fa-var-signal; } .@{fa-css-prefix}-gear:before, .@{fa-css-prefix}-cog:before { content: @fa-var-cog; } .@{fa-css-prefix}-trash-o:before { content: @fa-var-trash-o; } .@{fa-css-prefix}-home:before { content: @fa-var-home; } .@{fa-css-prefix}-file-o:before { content: @fa-var-file-o; } .@{fa-css-prefix}-clock-o:before { content: @fa-var-clock-o; } .@{fa-css-prefix}-road:before { content: @fa-var-road; } .@{fa-css-prefix}-download:before { content: @fa-var-download; } .@{fa-css-prefix}-arrow-circle-o-down:before { content: @fa-var-arrow-circle-o-down; } .@{fa-css-prefix}-arrow-circle-o-up:before { content: @fa-var-arrow-circle-o-up; } .@{fa-css-prefix}-inbox:before { content: @fa-var-inbox; } .@{fa-css-prefix}-play-circle-o:before { content: @fa-var-play-circle-o; } .@{fa-css-prefix}-rotate-right:before, .@{fa-css-prefix}-repeat:before { content: @fa-var-repeat; } .@{fa-css-prefix}-refresh:before { content: @fa-var-refresh; } .@{fa-css-prefix}-list-alt:before { content: @fa-var-list-alt; } .@{fa-css-prefix}-lock:before { content: @fa-var-lock; } .@{fa-css-prefix}-flag:before { content: @fa-var-flag; } .@{fa-css-prefix}-headphones:before { content: @fa-var-headphones; } .@{fa-css-prefix}-volume-off:before { content: @fa-var-volume-off; } .@{fa-css-prefix}-volume-down:before { content: @fa-var-volume-down; } .@{fa-css-prefix}-volume-up:before { content: @fa-var-volume-up; } .@{fa-css-prefix}-qrcode:before { content: @fa-var-qrcode; } .@{fa-css-prefix}-barcode:before { content: @fa-var-barcode; } .@{fa-css-prefix}-tag:before { content: @fa-var-tag; } .@{fa-css-prefix}-tags:before { content: @fa-var-tags; } .@{fa-css-prefix}-book:before { content: @fa-var-book; } .@{fa-css-prefix}-bookmark:before { content: @fa-var-bookmark; } .@{fa-css-prefix}-print:before { content: @fa-var-print; } .@{fa-css-prefix}-camera:before { content: @fa-var-camera; } .@{fa-css-prefix}-font:before { content: @fa-var-font; } .@{fa-css-prefix}-bold:before { content: @fa-var-bold; } .@{fa-css-prefix}-italic:before { content: @fa-var-italic; } .@{fa-css-prefix}-text-height:before { content: @fa-var-text-height; } .@{fa-css-prefix}-text-width:before { content: @fa-var-text-width; } .@{fa-css-prefix}-align-left:before { content: @fa-var-align-left; } .@{fa-css-prefix}-align-center:before { content: @fa-var-align-center; } .@{fa-css-prefix}-align-right:before { content: @fa-var-align-right; } .@{fa-css-prefix}-align-justify:before { content: @fa-var-align-justify; } .@{fa-css-prefix}-list:before { content: @fa-var-list; } .@{fa-css-prefix}-dedent:before, .@{fa-css-prefix}-outdent:before { content: @fa-var-outdent; } .@{fa-css-prefix}-indent:before { content: @fa-var-indent; } .@{fa-css-prefix}-video-camera:before { content: @fa-var-video-camera; } .@{fa-css-prefix}-photo:before, .@{fa-css-prefix}-image:before, .@{fa-css-prefix}-picture-o:before { content: @fa-var-picture-o; } .@{fa-css-prefix}-pencil:before { content: @fa-var-pencil; } .@{fa-css-prefix}-map-marker:before { content: @fa-var-map-marker; } .@{fa-css-prefix}-adjust:before { content: @fa-var-adjust; } .@{fa-css-prefix}-tint:before { content: @fa-var-tint; } .@{fa-css-prefix}-edit:before, .@{fa-css-prefix}-pencil-square-o:before { content: @fa-var-pencil-square-o; } .@{fa-css-prefix}-share-square-o:before { content: @fa-var-share-square-o; } .@{fa-css-prefix}-check-square-o:before { content: @fa-var-check-square-o; } .@{fa-css-prefix}-arrows:before { content: @fa-var-arrows; } .@{fa-css-prefix}-step-backward:before { content: @fa-var-step-backward; } .@{fa-css-prefix}-fast-backward:before { content: @fa-var-fast-backward; } .@{fa-css-prefix}-backward:before { content: @fa-var-backward; } .@{fa-css-prefix}-play:before { content: @fa-var-play; } .@{fa-css-prefix}-pause:before { content: @fa-var-pause; } .@{fa-css-prefix}-stop:before { content: @fa-var-stop; } .@{fa-css-prefix}-forward:before { content: @fa-var-forward; } .@{fa-css-prefix}-fast-forward:before { content: @fa-var-fast-forward; } .@{fa-css-prefix}-step-forward:before { content: @fa-var-step-forward; } .@{fa-css-prefix}-eject:before { content: @fa-var-eject; } .@{fa-css-prefix}-chevron-left:before { content: @fa-var-chevron-left; } .@{fa-css-prefix}-chevron-right:before { content: @fa-var-chevron-right; } .@{fa-css-prefix}-plus-circle:before { content: @fa-var-plus-circle; } .@{fa-css-prefix}-minus-circle:before { content: @fa-var-minus-circle; } .@{fa-css-prefix}-times-circle:before { content: @fa-var-times-circle; } .@{fa-css-prefix}-check-circle:before { content: @fa-var-check-circle; } .@{fa-css-prefix}-question-circle:before { content: @fa-var-question-circle; } .@{fa-css-prefix}-info-circle:before { content: @fa-var-info-circle; } .@{fa-css-prefix}-crosshairs:before { content: @fa-var-crosshairs; } .@{fa-css-prefix}-times-circle-o:before { content: @fa-var-times-circle-o; } .@{fa-css-prefix}-check-circle-o:before { content: @fa-var-check-circle-o; } .@{fa-css-prefix}-ban:before { content: @fa-var-ban; } .@{fa-css-prefix}-arrow-left:before { content: @fa-var-arrow-left; } .@{fa-css-prefix}-arrow-right:before { content: @fa-var-arrow-right; } .@{fa-css-prefix}-arrow-up:before { content: @fa-var-arrow-up; } .@{fa-css-prefix}-arrow-down:before { content: @fa-var-arrow-down; } .@{fa-css-prefix}-mail-forward:before, .@{fa-css-prefix}-share:before { content: @fa-var-share; } .@{fa-css-prefix}-expand:before { content: @fa-var-expand; } .@{fa-css-prefix}-compress:before { content: @fa-var-compress; } .@{fa-css-prefix}-plus:before { content: @fa-var-plus; } .@{fa-css-prefix}-minus:before { content: @fa-var-minus; } .@{fa-css-prefix}-asterisk:before { content: @fa-var-asterisk; } .@{fa-css-prefix}-exclamation-circle:before { content: @fa-var-exclamation-circle; } .@{fa-css-prefix}-gift:before { content: @fa-var-gift; } .@{fa-css-prefix}-leaf:before { content: @fa-var-leaf; } .@{fa-css-prefix}-fire:before { content: @fa-var-fire; } .@{fa-css-prefix}-eye:before { content: @fa-var-eye; } .@{fa-css-prefix}-eye-slash:before { content: @fa-var-eye-slash; } .@{fa-css-prefix}-warning:before, .@{fa-css-prefix}-exclamation-triangle:before { content: @fa-var-exclamation-triangle; } .@{fa-css-prefix}-plane:before { content: @fa-var-plane; } .@{fa-css-prefix}-calendar:before { content: @fa-var-calendar; } .@{fa-css-prefix}-random:before { content: @fa-var-random; } .@{fa-css-prefix}-comment:before { content: @fa-var-comment; } .@{fa-css-prefix}-magnet:before { content: @fa-var-magnet; } .@{fa-css-prefix}-chevron-up:before { content: @fa-var-chevron-up; } .@{fa-css-prefix}-chevron-down:before { content: @fa-var-chevron-down; } .@{fa-css-prefix}-retweet:before { content: @fa-var-retweet; } .@{fa-css-prefix}-shopping-cart:before { content: @fa-var-shopping-cart; } .@{fa-css-prefix}-folder:before { content: @fa-var-folder; } .@{fa-css-prefix}-folder-open:before { content: @fa-var-folder-open; } .@{fa-css-prefix}-arrows-v:before { content: @fa-var-arrows-v; } .@{fa-css-prefix}-arrows-h:before { content: @fa-var-arrows-h; } .@{fa-css-prefix}-bar-chart-o:before, .@{fa-css-prefix}-bar-chart:before { content: @fa-var-bar-chart; } .@{fa-css-prefix}-twitter-square:before { content: @fa-var-twitter-square; } .@{fa-css-prefix}-facebook-square:before { content: @fa-var-facebook-square; } .@{fa-css-prefix}-camera-retro:before { content: @fa-var-camera-retro; } .@{fa-css-prefix}-key:before { content: @fa-var-key; } .@{fa-css-prefix}-gears:before, .@{fa-css-prefix}-cogs:before { content: @fa-var-cogs; } .@{fa-css-prefix}-comments:before { content: @fa-var-comments; } .@{fa-css-prefix}-thumbs-o-up:before { content: @fa-var-thumbs-o-up; } .@{fa-css-prefix}-thumbs-o-down:before { content: @fa-var-thumbs-o-down; } .@{fa-css-prefix}-star-half:before { content: @fa-var-star-half; } .@{fa-css-prefix}-heart-o:before { content: @fa-var-heart-o; } .@{fa-css-prefix}-sign-out:before { content: @fa-var-sign-out; } .@{fa-css-prefix}-linkedin-square:before { content: @fa-var-linkedin-square; } .@{fa-css-prefix}-thumb-tack:before { content: @fa-var-thumb-tack; } .@{fa-css-prefix}-external-link:before { content: @fa-var-external-link; } .@{fa-css-prefix}-sign-in:before { content: @fa-var-sign-in; } .@{fa-css-prefix}-trophy:before { content: @fa-var-trophy; } .@{fa-css-prefix}-github-square:before { content: @fa-var-github-square; } .@{fa-css-prefix}-upload:before { content: @fa-var-upload; } .@{fa-css-prefix}-lemon-o:before { content: @fa-var-lemon-o; } .@{fa-css-prefix}-phone:before { content: @fa-var-phone; } .@{fa-css-prefix}-square-o:before { content: @fa-var-square-o; } .@{fa-css-prefix}-bookmark-o:before { content: @fa-var-bookmark-o; } .@{fa-css-prefix}-phone-square:before { content: @fa-var-phone-square; } .@{fa-css-prefix}-twitter:before { content: @fa-var-twitter; } .@{fa-css-prefix}-facebook-f:before, .@{fa-css-prefix}-facebook:before { content: @fa-var-facebook; } .@{fa-css-prefix}-github:before { content: @fa-var-github; } .@{fa-css-prefix}-unlock:before { content: @fa-var-unlock; } .@{fa-css-prefix}-credit-card:before { content: @fa-var-credit-card; } .@{fa-css-prefix}-rss:before { content: @fa-var-rss; } .@{fa-css-prefix}-hdd-o:before { content: @fa-var-hdd-o; } .@{fa-css-prefix}-bullhorn:before { content: @fa-var-bullhorn; } .@{fa-css-prefix}-bell:before { content: @fa-var-bell; } .@{fa-css-prefix}-certificate:before { content: @fa-var-certificate; } .@{fa-css-prefix}-hand-o-right:before { content: @fa-var-hand-o-right; } .@{fa-css-prefix}-hand-o-left:before { content: @fa-var-hand-o-left; } .@{fa-css-prefix}-hand-o-up:before { content: @fa-var-hand-o-up; } .@{fa-css-prefix}-hand-o-down:before { content: @fa-var-hand-o-down; } .@{fa-css-prefix}-arrow-circle-left:before { content: @fa-var-arrow-circle-left; } .@{fa-css-prefix}-arrow-circle-right:before { content: @fa-var-arrow-circle-right; } .@{fa-css-prefix}-arrow-circle-up:before { content: @fa-var-arrow-circle-up; } .@{fa-css-prefix}-arrow-circle-down:before { content: @fa-var-arrow-circle-down; } .@{fa-css-prefix}-globe:before { content: @fa-var-globe; } .@{fa-css-prefix}-wrench:before { content: @fa-var-wrench; } .@{fa-css-prefix}-tasks:before { content: @fa-var-tasks; } .@{fa-css-prefix}-filter:before { content: @fa-var-filter; } .@{fa-css-prefix}-briefcase:before { content: @fa-var-briefcase; } .@{fa-css-prefix}-arrows-alt:before { content: @fa-var-arrows-alt; } .@{fa-css-prefix}-group:before, .@{fa-css-prefix}-users:before { content: @fa-var-users; } .@{fa-css-prefix}-chain:before, .@{fa-css-prefix}-link:before { content: @fa-var-link; } .@{fa-css-prefix}-cloud:before { content: @fa-var-cloud; } .@{fa-css-prefix}-flask:before { content: @fa-var-flask; } .@{fa-css-prefix}-cut:before, .@{fa-css-prefix}-scissors:before { content: @fa-var-scissors; } .@{fa-css-prefix}-copy:before, .@{fa-css-prefix}-files-o:before { content: @fa-var-files-o; } .@{fa-css-prefix}-paperclip:before { content: @fa-var-paperclip; } .@{fa-css-prefix}-save:before, .@{fa-css-prefix}-floppy-o:before { content: @fa-var-floppy-o; } .@{fa-css-prefix}-square:before { content: @fa-var-square; } .@{fa-css-prefix}-navicon:before, .@{fa-css-prefix}-reorder:before, .@{fa-css-prefix}-bars:before { content: @fa-var-bars; } .@{fa-css-prefix}-list-ul:before { content: @fa-var-list-ul; } .@{fa-css-prefix}-list-ol:before { content: @fa-var-list-ol; } .@{fa-css-prefix}-strikethrough:before { content: @fa-var-strikethrough; } .@{fa-css-prefix}-underline:before { content: @fa-var-underline; } .@{fa-css-prefix}-table:before { content: @fa-var-table; } .@{fa-css-prefix}-magic:before { content: @fa-var-magic; } .@{fa-css-prefix}-truck:before { content: @fa-var-truck; } .@{fa-css-prefix}-pinterest:before { content: @fa-var-pinterest; } .@{fa-css-prefix}-pinterest-square:before { content: @fa-var-pinterest-square; } .@{fa-css-prefix}-google-plus-square:before { content: @fa-var-google-plus-square; } .@{fa-css-prefix}-google-plus:before { content: @fa-var-google-plus; } .@{fa-css-prefix}-money:before { content: @fa-var-money; } .@{fa-css-prefix}-caret-down:before { content: @fa-var-caret-down; } .@{fa-css-prefix}-caret-up:before { content: @fa-var-caret-up; } .@{fa-css-prefix}-caret-left:before { content: @fa-var-caret-left; } .@{fa-css-prefix}-caret-right:before { content: @fa-var-caret-right; } .@{fa-css-prefix}-columns:before { content: @fa-var-columns; } .@{fa-css-prefix}-unsorted:before, .@{fa-css-prefix}-sort:before { content: @fa-var-sort; } .@{fa-css-prefix}-sort-down:before, .@{fa-css-prefix}-sort-desc:before { content: @fa-var-sort-desc; } .@{fa-css-prefix}-sort-up:before, .@{fa-css-prefix}-sort-asc:before { content: @fa-var-sort-asc; } .@{fa-css-prefix}-envelope:before { content: @fa-var-envelope; } .@{fa-css-prefix}-linkedin:before { content: @fa-var-linkedin; } .@{fa-css-prefix}-rotate-left:before, .@{fa-css-prefix}-undo:before { content: @fa-var-undo; } .@{fa-css-prefix}-legal:before, .@{fa-css-prefix}-gavel:before { content: @fa-var-gavel; } .@{fa-css-prefix}-dashboard:before, .@{fa-css-prefix}-tachometer:before { content: @fa-var-tachometer; } .@{fa-css-prefix}-comment-o:before { content: @fa-var-comment-o; } .@{fa-css-prefix}-comments-o:before { content: @fa-var-comments-o; } .@{fa-css-prefix}-flash:before, .@{fa-css-prefix}-bolt:before { content: @fa-var-bolt; } .@{fa-css-prefix}-sitemap:before { content: @fa-var-sitemap; } .@{fa-css-prefix}-umbrella:before { content: @fa-var-umbrella; } .@{fa-css-prefix}-paste:before, .@{fa-css-prefix}-clipboard:before { content: @fa-var-clipboard; } .@{fa-css-prefix}-lightbulb-o:before { content: @fa-var-lightbulb-o; } .@{fa-css-prefix}-exchange:before { content: @fa-var-exchange; } .@{fa-css-prefix}-cloud-download:before { content: @fa-var-cloud-download; } .@{fa-css-prefix}-cloud-upload:before { content: @fa-var-cloud-upload; } .@{fa-css-prefix}-user-md:before { content: @fa-var-user-md; } .@{fa-css-prefix}-stethoscope:before { content: @fa-var-stethoscope; } .@{fa-css-prefix}-suitcase:before { content: @fa-var-suitcase; } .@{fa-css-prefix}-bell-o:before { content: @fa-var-bell-o; } .@{fa-css-prefix}-coffee:before { content: @fa-var-coffee; } .@{fa-css-prefix}-cutlery:before { content: @fa-var-cutlery; } .@{fa-css-prefix}-file-text-o:before { content: @fa-var-file-text-o; } .@{fa-css-prefix}-building-o:before { content: @fa-var-building-o; } .@{fa-css-prefix}-hospital-o:before { content: @fa-var-hospital-o; } .@{fa-css-prefix}-ambulance:before { content: @fa-var-ambulance; } .@{fa-css-prefix}-medkit:before { content: @fa-var-medkit; } .@{fa-css-prefix}-fighter-jet:before { content: @fa-var-fighter-jet; } .@{fa-css-prefix}-beer:before { content: @fa-var-beer; } .@{fa-css-prefix}-h-square:before { content: @fa-var-h-square; } .@{fa-css-prefix}-plus-square:before { content: @fa-var-plus-square; } .@{fa-css-prefix}-angle-double-left:before { content: @fa-var-angle-double-left; } .@{fa-css-prefix}-angle-double-right:before { content: @fa-var-angle-double-right; } .@{fa-css-prefix}-angle-double-up:before { content: @fa-var-angle-double-up; } .@{fa-css-prefix}-angle-double-down:before { content: @fa-var-angle-double-down; } .@{fa-css-prefix}-angle-left:before { content: @fa-var-angle-left; } .@{fa-css-prefix}-angle-right:before { content: @fa-var-angle-right; } .@{fa-css-prefix}-angle-up:before { content: @fa-var-angle-up; } .@{fa-css-prefix}-angle-down:before { content: @fa-var-angle-down; } .@{fa-css-prefix}-desktop:before { content: @fa-var-desktop; } .@{fa-css-prefix}-laptop:before { content: @fa-var-laptop; } .@{fa-css-prefix}-tablet:before { content: @fa-var-tablet; } .@{fa-css-prefix}-mobile-phone:before, .@{fa-css-prefix}-mobile:before { content: @fa-var-mobile; } .@{fa-css-prefix}-circle-o:before { content: @fa-var-circle-o; } .@{fa-css-prefix}-quote-left:before { content: @fa-var-quote-left; } .@{fa-css-prefix}-quote-right:before { content: @fa-var-quote-right; } .@{fa-css-prefix}-spinner:before { content: @fa-var-spinner; } .@{fa-css-prefix}-circle:before { content: @fa-var-circle; } .@{fa-css-prefix}-mail-reply:before, .@{fa-css-prefix}-reply:before { content: @fa-var-reply; } .@{fa-css-prefix}-github-alt:before { content: @fa-var-github-alt; } .@{fa-css-prefix}-folder-o:before { content: @fa-var-folder-o; } .@{fa-css-prefix}-folder-open-o:before { content: @fa-var-folder-open-o; } .@{fa-css-prefix}-smile-o:before { content: @fa-var-smile-o; } .@{fa-css-prefix}-frown-o:before { content: @fa-var-frown-o; } .@{fa-css-prefix}-meh-o:before { content: @fa-var-meh-o; } .@{fa-css-prefix}-gamepad:before { content: @fa-var-gamepad; } .@{fa-css-prefix}-keyboard-o:before { content: @fa-var-keyboard-o; } .@{fa-css-prefix}-flag-o:before { content: @fa-var-flag-o; } .@{fa-css-prefix}-flag-checkered:before { content: @fa-var-flag-checkered; } .@{fa-css-prefix}-terminal:before { content: @fa-var-terminal; } .@{fa-css-prefix}-code:before { content: @fa-var-code; } .@{fa-css-prefix}-mail-reply-all:before, .@{fa-css-prefix}-reply-all:before { content: @fa-var-reply-all; } .@{fa-css-prefix}-star-half-empty:before, .@{fa-css-prefix}-star-half-full:before, .@{fa-css-prefix}-star-half-o:before { content: @fa-var-star-half-o; } .@{fa-css-prefix}-location-arrow:before { content: @fa-var-location-arrow; } .@{fa-css-prefix}-crop:before { content: @fa-var-crop; } .@{fa-css-prefix}-code-fork:before { content: @fa-var-code-fork; } .@{fa-css-prefix}-unlink:before, .@{fa-css-prefix}-chain-broken:before { content: @fa-var-chain-broken; } .@{fa-css-prefix}-question:before { content: @fa-var-question; } .@{fa-css-prefix}-info:before { content: @fa-var-info; } .@{fa-css-prefix}-exclamation:before { content: @fa-var-exclamation; } .@{fa-css-prefix}-superscript:before { content: @fa-var-superscript; } .@{fa-css-prefix}-subscript:before { content: @fa-var-subscript; } .@{fa-css-prefix}-eraser:before { content: @fa-var-eraser; } .@{fa-css-prefix}-puzzle-piece:before { content: @fa-var-puzzle-piece; } .@{fa-css-prefix}-microphone:before { content: @fa-var-microphone; } .@{fa-css-prefix}-microphone-slash:before { content: @fa-var-microphone-slash; } .@{fa-css-prefix}-shield:before { content: @fa-var-shield; } .@{fa-css-prefix}-calendar-o:before { content: @fa-var-calendar-o; } .@{fa-css-prefix}-fire-extinguisher:before { content: @fa-var-fire-extinguisher; } .@{fa-css-prefix}-rocket:before { content: @fa-var-rocket; } .@{fa-css-prefix}-maxcdn:before { content: @fa-var-maxcdn; } .@{fa-css-prefix}-chevron-circle-left:before { content: @fa-var-chevron-circle-left; } .@{fa-css-prefix}-chevron-circle-right:before { content: @fa-var-chevron-circle-right; } .@{fa-css-prefix}-chevron-circle-up:before { content: @fa-var-chevron-circle-up; } .@{fa-css-prefix}-chevron-circle-down:before { content: @fa-var-chevron-circle-down; } .@{fa-css-prefix}-html5:before { content: @fa-var-html5; } .@{fa-css-prefix}-css3:before { content: @fa-var-css3; } .@{fa-css-prefix}-anchor:before { content: @fa-var-anchor; } .@{fa-css-prefix}-unlock-alt:before { content: @fa-var-unlock-alt; } .@{fa-css-prefix}-bullseye:before { content: @fa-var-bullseye; } .@{fa-css-prefix}-ellipsis-h:before { content: @fa-var-ellipsis-h; } .@{fa-css-prefix}-ellipsis-v:before { content: @fa-var-ellipsis-v; } .@{fa-css-prefix}-rss-square:before { content: @fa-var-rss-square; } .@{fa-css-prefix}-play-circle:before { content: @fa-var-play-circle; } .@{fa-css-prefix}-ticket:before { content: @fa-var-ticket; } .@{fa-css-prefix}-minus-square:before { content: @fa-var-minus-square; } .@{fa-css-prefix}-minus-square-o:before { content: @fa-var-minus-square-o; } .@{fa-css-prefix}-level-up:before { content: @fa-var-level-up; } .@{fa-css-prefix}-level-down:before { content: @fa-var-level-down; } .@{fa-css-prefix}-check-square:before { content: @fa-var-check-square; } .@{fa-css-prefix}-pencil-square:before { content: @fa-var-pencil-square; } .@{fa-css-prefix}-external-link-square:before { content: @fa-var-external-link-square; } .@{fa-css-prefix}-share-square:before { content: @fa-var-share-square; } .@{fa-css-prefix}-compass:before { content: @fa-var-compass; } .@{fa-css-prefix}-toggle-down:before, .@{fa-css-prefix}-caret-square-o-down:before { content: @fa-var-caret-square-o-down; } .@{fa-css-prefix}-toggle-up:before, .@{fa-css-prefix}-caret-square-o-up:before { content: @fa-var-caret-square-o-up; } .@{fa-css-prefix}-toggle-right:before, .@{fa-css-prefix}-caret-square-o-right:before { content: @fa-var-caret-square-o-right; } .@{fa-css-prefix}-euro:before, .@{fa-css-prefix}-eur:before { content: @fa-var-eur; } .@{fa-css-prefix}-gbp:before { content: @fa-var-gbp; } .@{fa-css-prefix}-dollar:before, .@{fa-css-prefix}-usd:before { content: @fa-var-usd; } .@{fa-css-prefix}-rupee:before, .@{fa-css-prefix}-inr:before { content: @fa-var-inr; } .@{fa-css-prefix}-cny:before, .@{fa-css-prefix}-rmb:before, .@{fa-css-prefix}-yen:before, .@{fa-css-prefix}-jpy:before { content: @fa-var-jpy; } .@{fa-css-prefix}-ruble:before, .@{fa-css-prefix}-rouble:before, .@{fa-css-prefix}-rub:before { content: @fa-var-rub; } .@{fa-css-prefix}-won:before, .@{fa-css-prefix}-krw:before { content: @fa-var-krw; } .@{fa-css-prefix}-bitcoin:before, .@{fa-css-prefix}-btc:before { content: @fa-var-btc; } .@{fa-css-prefix}-file:before { content: @fa-var-file; } .@{fa-css-prefix}-file-text:before { content: @fa-var-file-text; } .@{fa-css-prefix}-sort-alpha-asc:before { content: @fa-var-sort-alpha-asc; } .@{fa-css-prefix}-sort-alpha-desc:before { content: @fa-var-sort-alpha-desc; } .@{fa-css-prefix}-sort-amount-asc:before { content: @fa-var-sort-amount-asc; } .@{fa-css-prefix}-sort-amount-desc:before { content: @fa-var-sort-amount-desc; } .@{fa-css-prefix}-sort-numeric-asc:before { content: @fa-var-sort-numeric-asc; } .@{fa-css-prefix}-sort-numeric-desc:before { content: @fa-var-sort-numeric-desc; } .@{fa-css-prefix}-thumbs-up:before { content: @fa-var-thumbs-up; } .@{fa-css-prefix}-thumbs-down:before { content: @fa-var-thumbs-down; } .@{fa-css-prefix}-youtube-square:before { content: @fa-var-youtube-square; } .@{fa-css-prefix}-youtube:before { content: @fa-var-youtube; } .@{fa-css-prefix}-xing:before { content: @fa-var-xing; } .@{fa-css-prefix}-xing-square:before { content: @fa-var-xing-square; } .@{fa-css-prefix}-youtube-play:before { content: @fa-var-youtube-play; } .@{fa-css-prefix}-dropbox:before { content: @fa-var-dropbox; } .@{fa-css-prefix}-stack-overflow:before { content: @fa-var-stack-overflow; } .@{fa-css-prefix}-instagram:before { content: @fa-var-instagram; } .@{fa-css-prefix}-flickr:before { content: @fa-var-flickr; } .@{fa-css-prefix}-adn:before { content: @fa-var-adn; } .@{fa-css-prefix}-bitbucket:before { content: @fa-var-bitbucket; } .@{fa-css-prefix}-bitbucket-square:before { content: @fa-var-bitbucket-square; } .@{fa-css-prefix}-tumblr:before { content: @fa-var-tumblr; } .@{fa-css-prefix}-tumblr-square:before { content: @fa-var-tumblr-square; } .@{fa-css-prefix}-long-arrow-down:before { content: @fa-var-long-arrow-down; } .@{fa-css-prefix}-long-arrow-up:before { content: @fa-var-long-arrow-up; } .@{fa-css-prefix}-long-arrow-left:before { content: @fa-var-long-arrow-left; } .@{fa-css-prefix}-long-arrow-right:before { content: @fa-var-long-arrow-right; } .@{fa-css-prefix}-apple:before { content: @fa-var-apple; } .@{fa-css-prefix}-windows:before { content: @fa-var-windows; } .@{fa-css-prefix}-android:before { content: @fa-var-android; } .@{fa-css-prefix}-linux:before { content: @fa-var-linux; } .@{fa-css-prefix}-dribbble:before { content: @fa-var-dribbble; } .@{fa-css-prefix}-skype:before { content: @fa-var-skype; } .@{fa-css-prefix}-foursquare:before { content: @fa-var-foursquare; } .@{fa-css-prefix}-trello:before { content: @fa-var-trello; } .@{fa-css-prefix}-female:before { content: @fa-var-female; } .@{fa-css-prefix}-male:before { content: @fa-var-male; } .@{fa-css-prefix}-gittip:before, .@{fa-css-prefix}-gratipay:before { content: @fa-var-gratipay; } .@{fa-css-prefix}-sun-o:before { content: @fa-var-sun-o; } .@{fa-css-prefix}-moon-o:before { content: @fa-var-moon-o; } .@{fa-css-prefix}-archive:before { content: @fa-var-archive; } .@{fa-css-prefix}-bug:before { content: @fa-var-bug; } .@{fa-css-prefix}-vk:before { content: @fa-var-vk; } .@{fa-css-prefix}-weibo:before { content: @fa-var-weibo; } .@{fa-css-prefix}-renren:before { content: @fa-var-renren; } .@{fa-css-prefix}-pagelines:before { content: @fa-var-pagelines; } .@{fa-css-prefix}-stack-exchange:before { content: @fa-var-stack-exchange; } .@{fa-css-prefix}-arrow-circle-o-right:before { content: @fa-var-arrow-circle-o-right; } .@{fa-css-prefix}-arrow-circle-o-left:before { content: @fa-var-arrow-circle-o-left; } .@{fa-css-prefix}-toggle-left:before, .@{fa-css-prefix}-caret-square-o-left:before { content: @fa-var-caret-square-o-left; } .@{fa-css-prefix}-dot-circle-o:before { content: @fa-var-dot-circle-o; } .@{fa-css-prefix}-wheelchair:before { content: @fa-var-wheelchair; } .@{fa-css-prefix}-vimeo-square:before { content: @fa-var-vimeo-square; } .@{fa-css-prefix}-turkish-lira:before, .@{fa-css-prefix}-try:before { content: @fa-var-try; } .@{fa-css-prefix}-plus-square-o:before { content: @fa-var-plus-square-o; } .@{fa-css-prefix}-space-shuttle:before { content: @fa-var-space-shuttle; } .@{fa-css-prefix}-slack:before { content: @fa-var-slack; } .@{fa-css-prefix}-envelope-square:before { content: @fa-var-envelope-square; } .@{fa-css-prefix}-wordpress:before { content: @fa-var-wordpress; } .@{fa-css-prefix}-openid:before { content: @fa-var-openid; } .@{fa-css-prefix}-institution:before, .@{fa-css-prefix}-bank:before, .@{fa-css-prefix}-university:before { content: @fa-var-university; } .@{fa-css-prefix}-mortar-board:before, .@{fa-css-prefix}-graduation-cap:before { content: @fa-var-graduation-cap; } .@{fa-css-prefix}-yahoo:before { content: @fa-var-yahoo; } .@{fa-css-prefix}-google:before { content: @fa-var-google; } .@{fa-css-prefix}-reddit:before { content: @fa-var-reddit; } .@{fa-css-prefix}-reddit-square:before { content: @fa-var-reddit-square; } .@{fa-css-prefix}-stumbleupon-circle:before { content: @fa-var-stumbleupon-circle; } .@{fa-css-prefix}-stumbleupon:before { content: @fa-var-stumbleupon; } .@{fa-css-prefix}-delicious:before { content: @fa-var-delicious; } .@{fa-css-prefix}-digg:before { content: @fa-var-digg; } .@{fa-css-prefix}-pied-piper:before { content: @fa-var-pied-piper; } .@{fa-css-prefix}-pied-piper-alt:before { content: @fa-var-pied-piper-alt; } .@{fa-css-prefix}-drupal:before { content: @fa-var-drupal; } .@{fa-css-prefix}-joomla:before { content: @fa-var-joomla; } .@{fa-css-prefix}-language:before { content: @fa-var-language; } .@{fa-css-prefix}-fax:before { content: @fa-var-fax; } .@{fa-css-prefix}-building:before { content: @fa-var-building; } .@{fa-css-prefix}-child:before { content: @fa-var-child; } .@{fa-css-prefix}-paw:before { content: @fa-var-paw; } .@{fa-css-prefix}-spoon:before { content: @fa-var-spoon; } .@{fa-css-prefix}-cube:before { content: @fa-var-cube; } .@{fa-css-prefix}-cubes:before { content: @fa-var-cubes; } .@{fa-css-prefix}-behance:before { content: @fa-var-behance; } .@{fa-css-prefix}-behance-square:before { content: @fa-var-behance-square; } .@{fa-css-prefix}-steam:before { content: @fa-var-steam; } .@{fa-css-prefix}-steam-square:before { content: @fa-var-steam-square; } .@{fa-css-prefix}-recycle:before { content: @fa-var-recycle; } .@{fa-css-prefix}-automobile:before, .@{fa-css-prefix}-car:before { content: @fa-var-car; } .@{fa-css-prefix}-cab:before, .@{fa-css-prefix}-taxi:before { content: @fa-var-taxi; } .@{fa-css-prefix}-tree:before { content: @fa-var-tree; } .@{fa-css-prefix}-spotify:before { content: @fa-var-spotify; } .@{fa-css-prefix}-deviantart:before { content: @fa-var-deviantart; } .@{fa-css-prefix}-soundcloud:before { content: @fa-var-soundcloud; } .@{fa-css-prefix}-database:before { content: @fa-var-database; } .@{fa-css-prefix}-file-pdf-o:before { content: @fa-var-file-pdf-o; } .@{fa-css-prefix}-file-word-o:before { content: @fa-var-file-word-o; } .@{fa-css-prefix}-file-excel-o:before { content: @fa-var-file-excel-o; } .@{fa-css-prefix}-file-powerpoint-o:before { content: @fa-var-file-powerpoint-o; } .@{fa-css-prefix}-file-photo-o:before, .@{fa-css-prefix}-file-picture-o:before, .@{fa-css-prefix}-file-image-o:before { content: @fa-var-file-image-o; } .@{fa-css-prefix}-file-zip-o:before, .@{fa-css-prefix}-file-archive-o:before { content: @fa-var-file-archive-o; } .@{fa-css-prefix}-file-sound-o:before, .@{fa-css-prefix}-file-audio-o:before { content: @fa-var-file-audio-o; } .@{fa-css-prefix}-file-movie-o:before, .@{fa-css-prefix}-file-video-o:before { content: @fa-var-file-video-o; } .@{fa-css-prefix}-file-code-o:before { content: @fa-var-file-code-o; } .@{fa-css-prefix}-vine:before { content: @fa-var-vine; } .@{fa-css-prefix}-codepen:before { content: @fa-var-codepen; } .@{fa-css-prefix}-jsfiddle:before { content: @fa-var-jsfiddle; } .@{fa-css-prefix}-life-bouy:before, .@{fa-css-prefix}-life-buoy:before, .@{fa-css-prefix}-life-saver:before, .@{fa-css-prefix}-support:before, .@{fa-css-prefix}-life-ring:before { content: @fa-var-life-ring; } .@{fa-css-prefix}-circle-o-notch:before { content: @fa-var-circle-o-notch; } .@{fa-css-prefix}-ra:before, .@{fa-css-prefix}-rebel:before { content: @fa-var-rebel; } .@{fa-css-prefix}-ge:before, .@{fa-css-prefix}-empire:before { content: @fa-var-empire; } .@{fa-css-prefix}-git-square:before { content: @fa-var-git-square; } .@{fa-css-prefix}-git:before { content: @fa-var-git; } .@{fa-css-prefix}-hacker-news:before { content: @fa-var-hacker-news; } .@{fa-css-prefix}-tencent-weibo:before { content: @fa-var-tencent-weibo; } .@{fa-css-prefix}-qq:before { content: @fa-var-qq; } .@{fa-css-prefix}-wechat:before, .@{fa-css-prefix}-weixin:before { content: @fa-var-weixin; } .@{fa-css-prefix}-send:before, .@{fa-css-prefix}-paper-plane:before { content: @fa-var-paper-plane; } .@{fa-css-prefix}-send-o:before, .@{fa-css-prefix}-paper-plane-o:before { content: @fa-var-paper-plane-o; } .@{fa-css-prefix}-history:before { content: @fa-var-history; } .@{fa-css-prefix}-genderless:before, .@{fa-css-prefix}-circle-thin:before { content: @fa-var-circle-thin; } .@{fa-css-prefix}-header:before { content: @fa-var-header; } .@{fa-css-prefix}-paragraph:before { content: @fa-var-paragraph; } .@{fa-css-prefix}-sliders:before { content: @fa-var-sliders; } .@{fa-css-prefix}-share-alt:before { content: @fa-var-share-alt; } .@{fa-css-prefix}-share-alt-square:before { content: @fa-var-share-alt-square; } .@{fa-css-prefix}-bomb:before { content: @fa-var-bomb; } .@{fa-css-prefix}-soccer-ball-o:before, .@{fa-css-prefix}-futbol-o:before { content: @fa-var-futbol-o; } .@{fa-css-prefix}-tty:before { content: @fa-var-tty; } .@{fa-css-prefix}-binoculars:before { content: @fa-var-binoculars; } .@{fa-css-prefix}-plug:before { content: @fa-var-plug; } .@{fa-css-prefix}-slideshare:before { content: @fa-var-slideshare; } .@{fa-css-prefix}-twitch:before { content: @fa-var-twitch; } .@{fa-css-prefix}-yelp:before { content: @fa-var-yelp; } .@{fa-css-prefix}-newspaper-o:before { content: @fa-var-newspaper-o; } .@{fa-css-prefix}-wifi:before { content: @fa-var-wifi; } .@{fa-css-prefix}-calculator:before { content: @fa-var-calculator; } .@{fa-css-prefix}-paypal:before { content: @fa-var-paypal; } .@{fa-css-prefix}-google-wallet:before { content: @fa-var-google-wallet; } .@{fa-css-prefix}-cc-visa:before { content: @fa-var-cc-visa; } .@{fa-css-prefix}-cc-mastercard:before { content: @fa-var-cc-mastercard; } .@{fa-css-prefix}-cc-discover:before { content: @fa-var-cc-discover; } .@{fa-css-prefix}-cc-amex:before { content: @fa-var-cc-amex; } .@{fa-css-prefix}-cc-paypal:before { content: @fa-var-cc-paypal; } .@{fa-css-prefix}-cc-stripe:before { content: @fa-var-cc-stripe; } .@{fa-css-prefix}-bell-slash:before { content: @fa-var-bell-slash; } .@{fa-css-prefix}-bell-slash-o:before { content: @fa-var-bell-slash-o; } .@{fa-css-prefix}-trash:before { content: @fa-var-trash; } .@{fa-css-prefix}-copyright:before { content: @fa-var-copyright; } .@{fa-css-prefix}-at:before { content: @fa-var-at; } .@{fa-css-prefix}-eyedropper:before { content: @fa-var-eyedropper; } .@{fa-css-prefix}-paint-brush:before { content: @fa-var-paint-brush; } .@{fa-css-prefix}-birthday-cake:before { content: @fa-var-birthday-cake; } .@{fa-css-prefix}-area-chart:before { content: @fa-var-area-chart; } .@{fa-css-prefix}-pie-chart:before { content: @fa-var-pie-chart; } .@{fa-css-prefix}-line-chart:before { content: @fa-var-line-chart; } .@{fa-css-prefix}-lastfm:before { content: @fa-var-lastfm; } .@{fa-css-prefix}-lastfm-square:before { content: @fa-var-lastfm-square; } .@{fa-css-prefix}-toggle-off:before { content: @fa-var-toggle-off; } .@{fa-css-prefix}-toggle-on:before { content: @fa-var-toggle-on; } .@{fa-css-prefix}-bicycle:before { content: @fa-var-bicycle; } .@{fa-css-prefix}-bus:before { content: @fa-var-bus; } .@{fa-css-prefix}-ioxhost:before { content: @fa-var-ioxhost; } .@{fa-css-prefix}-angellist:before { content: @fa-var-angellist; } .@{fa-css-prefix}-cc:before { content: @fa-var-cc; } .@{fa-css-prefix}-shekel:before, .@{fa-css-prefix}-sheqel:before, .@{fa-css-prefix}-ils:before { content: @fa-var-ils; } .@{fa-css-prefix}-meanpath:before { content: @fa-var-meanpath; } .@{fa-css-prefix}-buysellads:before { content: @fa-var-buysellads; } .@{fa-css-prefix}-connectdevelop:before { content: @fa-var-connectdevelop; } .@{fa-css-prefix}-dashcube:before { content: @fa-var-dashcube; } .@{fa-css-prefix}-forumbee:before { content: @fa-var-forumbee; } .@{fa-css-prefix}-leanpub:before { content: @fa-var-leanpub; } .@{fa-css-prefix}-sellsy:before { content: @fa-var-sellsy; } .@{fa-css-prefix}-shirtsinbulk:before { content: @fa-var-shirtsinbulk; } .@{fa-css-prefix}-simplybuilt:before { content: @fa-var-simplybuilt; } .@{fa-css-prefix}-skyatlas:before { content: @fa-var-skyatlas; } .@{fa-css-prefix}-cart-plus:before { content: @fa-var-cart-plus; } .@{fa-css-prefix}-cart-arrow-down:before { content: @fa-var-cart-arrow-down; } .@{fa-css-prefix}-diamond:before { content: @fa-var-diamond; } .@{fa-css-prefix}-ship:before { content: @fa-var-ship; } .@{fa-css-prefix}-user-secret:before { content: @fa-var-user-secret; } .@{fa-css-prefix}-motorcycle:before { content: @fa-var-motorcycle; } .@{fa-css-prefix}-street-view:before { content: @fa-var-street-view; } .@{fa-css-prefix}-heartbeat:before { content: @fa-var-heartbeat; } .@{fa-css-prefix}-venus:before { content: @fa-var-venus; } .@{fa-css-prefix}-mars:before { content: @fa-var-mars; } .@{fa-css-prefix}-mercury:before { content: @fa-var-mercury; } .@{fa-css-prefix}-transgender:before { content: @fa-var-transgender; } .@{fa-css-prefix}-transgender-alt:before { content: @fa-var-transgender-alt; } .@{fa-css-prefix}-venus-double:before { content: @fa-var-venus-double; } .@{fa-css-prefix}-mars-double:before { content: @fa-var-mars-double; } .@{fa-css-prefix}-venus-mars:before { content: @fa-var-venus-mars; } .@{fa-css-prefix}-mars-stroke:before { content: @fa-var-mars-stroke; } .@{fa-css-prefix}-mars-stroke-v:before { content: @fa-var-mars-stroke-v; } .@{fa-css-prefix}-mars-stroke-h:before { content: @fa-var-mars-stroke-h; } .@{fa-css-prefix}-neuter:before { content: @fa-var-neuter; } .@{fa-css-prefix}-facebook-official:before { content: @fa-var-facebook-official; } .@{fa-css-prefix}-pinterest-p:before { content: @fa-var-pinterest-p; } .@{fa-css-prefix}-whatsapp:before { content: @fa-var-whatsapp; } .@{fa-css-prefix}-server:before { content: @fa-var-server; } .@{fa-css-prefix}-user-plus:before { content: @fa-var-user-plus; } .@{fa-css-prefix}-user-times:before { content: @fa-var-user-times; } .@{fa-css-prefix}-hotel:before, .@{fa-css-prefix}-bed:before { content: @fa-var-bed; } .@{fa-css-prefix}-viacoin:before { content: @fa-var-viacoin; } .@{fa-css-prefix}-train:before { content: @fa-var-train; } .@{fa-css-prefix}-subway:before { content: @fa-var-subway; } .@{fa-css-prefix}-medium:before { content: @fa-var-medium; } ================================================ FILE: website/font-awesome/less/larger.less ================================================ // Icon Sizes // ------------------------- /* makes the font 33% larger relative to the icon container */ .@{fa-css-prefix}-lg { font-size: (4em / 3); line-height: (3em / 4); vertical-align: -15%; } .@{fa-css-prefix}-2x { font-size: 2em; } .@{fa-css-prefix}-3x { font-size: 3em; } .@{fa-css-prefix}-4x { font-size: 4em; } .@{fa-css-prefix}-5x { font-size: 5em; } ================================================ FILE: website/font-awesome/less/list.less ================================================ // List Icons // ------------------------- .@{fa-css-prefix}-ul { padding-left: 0; margin-left: @fa-li-width; list-style-type: none; > li { position: relative; } } .@{fa-css-prefix}-li { position: absolute; left: -@fa-li-width; width: @fa-li-width; top: (2em / 14); text-align: center; &.@{fa-css-prefix}-lg { left: (-@fa-li-width + (4em / 14)); } } ================================================ FILE: website/font-awesome/less/mixins.less ================================================ // Mixins // -------------------------- .fa-icon() { display: inline-block; font: normal normal normal @fa-font-size-base/1 FontAwesome; // shortening font declaration font-size: inherit; // can't have font-size inherit on line above, so need to override text-rendering: auto; // optimizelegibility throws things off #1094 -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; transform: translate(0, 0); // ensures no half-pixel rendering in firefox } .fa-icon-rotate(@degrees, @rotation) { filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=@rotation); -webkit-transform: rotate(@degrees); -ms-transform: rotate(@degrees); transform: rotate(@degrees); } .fa-icon-flip(@horiz, @vert, @rotation) { filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=@rotation, mirror=1); -webkit-transform: scale(@horiz, @vert); -ms-transform: scale(@horiz, @vert); transform: scale(@horiz, @vert); } ================================================ FILE: website/font-awesome/less/path.less ================================================ /* FONT PATH * -------------------------- */ @font-face { font-family: 'FontAwesome'; src: url('@{fa-font-path}/fontawesome-webfont.eot?v=@{fa-version}'); src: url('@{fa-font-path}/fontawesome-webfont.eot?#iefix&v=@{fa-version}') format('embedded-opentype'), url('@{fa-font-path}/fontawesome-webfont.woff2?v=@{fa-version}') format('woff2'), url('@{fa-font-path}/fontawesome-webfont.woff?v=@{fa-version}') format('woff'), url('@{fa-font-path}/fontawesome-webfont.ttf?v=@{fa-version}') format('truetype'), url('@{fa-font-path}/fontawesome-webfont.svg?v=@{fa-version}#fontawesomeregular') format('svg'); // src: url('@{fa-font-path}/FontAwesome.otf') format('opentype'); // used when developing fonts font-weight: normal; font-style: normal; } ================================================ FILE: website/font-awesome/less/rotated-flipped.less ================================================ // Rotated & Flipped Icons // ------------------------- .@{fa-css-prefix}-rotate-90 { .fa-icon-rotate(90deg, 1); } .@{fa-css-prefix}-rotate-180 { .fa-icon-rotate(180deg, 2); } .@{fa-css-prefix}-rotate-270 { .fa-icon-rotate(270deg, 3); } .@{fa-css-prefix}-flip-horizontal { .fa-icon-flip(-1, 1, 0); } .@{fa-css-prefix}-flip-vertical { .fa-icon-flip(1, -1, 2); } // Hook for IE8-9 // ------------------------- :root .@{fa-css-prefix}-rotate-90, :root .@{fa-css-prefix}-rotate-180, :root .@{fa-css-prefix}-rotate-270, :root .@{fa-css-prefix}-flip-horizontal, :root .@{fa-css-prefix}-flip-vertical { filter: none; } ================================================ FILE: website/font-awesome/less/stacked.less ================================================ // Stacked Icons // ------------------------- .@{fa-css-prefix}-stack { position: relative; display: inline-block; width: 2em; height: 2em; line-height: 2em; vertical-align: middle; } .@{fa-css-prefix}-stack-1x, .@{fa-css-prefix}-stack-2x { position: absolute; left: 0; width: 100%; text-align: center; } .@{fa-css-prefix}-stack-1x { line-height: inherit; } .@{fa-css-prefix}-stack-2x { font-size: 2em; } .@{fa-css-prefix}-inverse { color: @fa-inverse; } ================================================ FILE: website/font-awesome/less/variables.less ================================================ // Variables // -------------------------- @fa-font-path: "../fonts"; @fa-font-size-base: 14px; //@fa-font-path: "//netdna.bootstrapcdn.com/font-awesome/4.3.0/fonts"; // for referencing Bootstrap CDN font files directly @fa-css-prefix: fa; @fa-version: "4.3.0"; @fa-border-color: #eee; @fa-inverse: #fff; @fa-li-width: (30em / 14); @fa-var-adjust: "\f042"; @fa-var-adn: "\f170"; @fa-var-align-center: "\f037"; @fa-var-align-justify: "\f039"; @fa-var-align-left: "\f036"; @fa-var-align-right: "\f038"; @fa-var-ambulance: "\f0f9"; @fa-var-anchor: "\f13d"; @fa-var-android: "\f17b"; @fa-var-angellist: "\f209"; @fa-var-angle-double-down: "\f103"; @fa-var-angle-double-left: "\f100"; @fa-var-angle-double-right: "\f101"; @fa-var-angle-double-up: "\f102"; @fa-var-angle-down: "\f107"; @fa-var-angle-left: "\f104"; @fa-var-angle-right: "\f105"; @fa-var-angle-up: "\f106"; @fa-var-apple: "\f179"; @fa-var-archive: "\f187"; @fa-var-area-chart: "\f1fe"; @fa-var-arrow-circle-down: "\f0ab"; @fa-var-arrow-circle-left: "\f0a8"; @fa-var-arrow-circle-o-down: "\f01a"; @fa-var-arrow-circle-o-left: "\f190"; @fa-var-arrow-circle-o-right: "\f18e"; @fa-var-arrow-circle-o-up: "\f01b"; @fa-var-arrow-circle-right: "\f0a9"; @fa-var-arrow-circle-up: "\f0aa"; @fa-var-arrow-down: "\f063"; @fa-var-arrow-left: "\f060"; @fa-var-arrow-right: "\f061"; @fa-var-arrow-up: "\f062"; @fa-var-arrows: "\f047"; @fa-var-arrows-alt: "\f0b2"; @fa-var-arrows-h: "\f07e"; @fa-var-arrows-v: "\f07d"; @fa-var-asterisk: "\f069"; @fa-var-at: "\f1fa"; @fa-var-automobile: "\f1b9"; @fa-var-backward: "\f04a"; @fa-var-ban: "\f05e"; @fa-var-bank: "\f19c"; @fa-var-bar-chart: "\f080"; @fa-var-bar-chart-o: "\f080"; @fa-var-barcode: "\f02a"; @fa-var-bars: "\f0c9"; @fa-var-bed: "\f236"; @fa-var-beer: "\f0fc"; @fa-var-behance: "\f1b4"; @fa-var-behance-square: "\f1b5"; @fa-var-bell: "\f0f3"; @fa-var-bell-o: "\f0a2"; @fa-var-bell-slash: "\f1f6"; @fa-var-bell-slash-o: "\f1f7"; @fa-var-bicycle: "\f206"; @fa-var-binoculars: "\f1e5"; @fa-var-birthday-cake: "\f1fd"; @fa-var-bitbucket: "\f171"; @fa-var-bitbucket-square: "\f172"; @fa-var-bitcoin: "\f15a"; @fa-var-bold: "\f032"; @fa-var-bolt: "\f0e7"; @fa-var-bomb: "\f1e2"; @fa-var-book: "\f02d"; @fa-var-bookmark: "\f02e"; @fa-var-bookmark-o: "\f097"; @fa-var-briefcase: "\f0b1"; @fa-var-btc: "\f15a"; @fa-var-bug: "\f188"; @fa-var-building: "\f1ad"; @fa-var-building-o: "\f0f7"; @fa-var-bullhorn: "\f0a1"; @fa-var-bullseye: "\f140"; @fa-var-bus: "\f207"; @fa-var-buysellads: "\f20d"; @fa-var-cab: "\f1ba"; @fa-var-calculator: "\f1ec"; @fa-var-calendar: "\f073"; @fa-var-calendar-o: "\f133"; @fa-var-camera: "\f030"; @fa-var-camera-retro: "\f083"; @fa-var-car: "\f1b9"; @fa-var-caret-down: "\f0d7"; @fa-var-caret-left: "\f0d9"; @fa-var-caret-right: "\f0da"; @fa-var-caret-square-o-down: "\f150"; @fa-var-caret-square-o-left: "\f191"; @fa-var-caret-square-o-right: "\f152"; @fa-var-caret-square-o-up: "\f151"; @fa-var-caret-up: "\f0d8"; @fa-var-cart-arrow-down: "\f218"; @fa-var-cart-plus: "\f217"; @fa-var-cc: "\f20a"; @fa-var-cc-amex: "\f1f3"; @fa-var-cc-discover: "\f1f2"; @fa-var-cc-mastercard: "\f1f1"; @fa-var-cc-paypal: "\f1f4"; @fa-var-cc-stripe: "\f1f5"; @fa-var-cc-visa: "\f1f0"; @fa-var-certificate: "\f0a3"; @fa-var-chain: "\f0c1"; @fa-var-chain-broken: "\f127"; @fa-var-check: "\f00c"; @fa-var-check-circle: "\f058"; @fa-var-check-circle-o: "\f05d"; @fa-var-check-square: "\f14a"; @fa-var-check-square-o: "\f046"; @fa-var-chevron-circle-down: "\f13a"; @fa-var-chevron-circle-left: "\f137"; @fa-var-chevron-circle-right: "\f138"; @fa-var-chevron-circle-up: "\f139"; @fa-var-chevron-down: "\f078"; @fa-var-chevron-left: "\f053"; @fa-var-chevron-right: "\f054"; @fa-var-chevron-up: "\f077"; @fa-var-child: "\f1ae"; @fa-var-circle: "\f111"; @fa-var-circle-o: "\f10c"; @fa-var-circle-o-notch: "\f1ce"; @fa-var-circle-thin: "\f1db"; @fa-var-clipboard: "\f0ea"; @fa-var-clock-o: "\f017"; @fa-var-close: "\f00d"; @fa-var-cloud: "\f0c2"; @fa-var-cloud-download: "\f0ed"; @fa-var-cloud-upload: "\f0ee"; @fa-var-cny: "\f157"; @fa-var-code: "\f121"; @fa-var-code-fork: "\f126"; @fa-var-codepen: "\f1cb"; @fa-var-coffee: "\f0f4"; @fa-var-cog: "\f013"; @fa-var-cogs: "\f085"; @fa-var-columns: "\f0db"; @fa-var-comment: "\f075"; @fa-var-comment-o: "\f0e5"; @fa-var-comments: "\f086"; @fa-var-comments-o: "\f0e6"; @fa-var-compass: "\f14e"; @fa-var-compress: "\f066"; @fa-var-connectdevelop: "\f20e"; @fa-var-copy: "\f0c5"; @fa-var-copyright: "\f1f9"; @fa-var-credit-card: "\f09d"; @fa-var-crop: "\f125"; @fa-var-crosshairs: "\f05b"; @fa-var-css3: "\f13c"; @fa-var-cube: "\f1b2"; @fa-var-cubes: "\f1b3"; @fa-var-cut: "\f0c4"; @fa-var-cutlery: "\f0f5"; @fa-var-dashboard: "\f0e4"; @fa-var-dashcube: "\f210"; @fa-var-database: "\f1c0"; @fa-var-dedent: "\f03b"; @fa-var-delicious: "\f1a5"; @fa-var-desktop: "\f108"; @fa-var-deviantart: "\f1bd"; @fa-var-diamond: "\f219"; @fa-var-digg: "\f1a6"; @fa-var-dollar: "\f155"; @fa-var-dot-circle-o: "\f192"; @fa-var-download: "\f019"; @fa-var-dribbble: "\f17d"; @fa-var-dropbox: "\f16b"; @fa-var-drupal: "\f1a9"; @fa-var-edit: "\f044"; @fa-var-eject: "\f052"; @fa-var-ellipsis-h: "\f141"; @fa-var-ellipsis-v: "\f142"; @fa-var-empire: "\f1d1"; @fa-var-envelope: "\f0e0"; @fa-var-envelope-o: "\f003"; @fa-var-envelope-square: "\f199"; @fa-var-eraser: "\f12d"; @fa-var-eur: "\f153"; @fa-var-euro: "\f153"; @fa-var-exchange: "\f0ec"; @fa-var-exclamation: "\f12a"; @fa-var-exclamation-circle: "\f06a"; @fa-var-exclamation-triangle: "\f071"; @fa-var-expand: "\f065"; @fa-var-external-link: "\f08e"; @fa-var-external-link-square: "\f14c"; @fa-var-eye: "\f06e"; @fa-var-eye-slash: "\f070"; @fa-var-eyedropper: "\f1fb"; @fa-var-facebook: "\f09a"; @fa-var-facebook-f: "\f09a"; @fa-var-facebook-official: "\f230"; @fa-var-facebook-square: "\f082"; @fa-var-fast-backward: "\f049"; @fa-var-fast-forward: "\f050"; @fa-var-fax: "\f1ac"; @fa-var-female: "\f182"; @fa-var-fighter-jet: "\f0fb"; @fa-var-file: "\f15b"; @fa-var-file-archive-o: "\f1c6"; @fa-var-file-audio-o: "\f1c7"; @fa-var-file-code-o: "\f1c9"; @fa-var-file-excel-o: "\f1c3"; @fa-var-file-image-o: "\f1c5"; @fa-var-file-movie-o: "\f1c8"; @fa-var-file-o: "\f016"; @fa-var-file-pdf-o: "\f1c1"; @fa-var-file-photo-o: "\f1c5"; @fa-var-file-picture-o: "\f1c5"; @fa-var-file-powerpoint-o: "\f1c4"; @fa-var-file-sound-o: "\f1c7"; @fa-var-file-text: "\f15c"; @fa-var-file-text-o: "\f0f6"; @fa-var-file-video-o: "\f1c8"; @fa-var-file-word-o: "\f1c2"; @fa-var-file-zip-o: "\f1c6"; @fa-var-files-o: "\f0c5"; @fa-var-film: "\f008"; @fa-var-filter: "\f0b0"; @fa-var-fire: "\f06d"; @fa-var-fire-extinguisher: "\f134"; @fa-var-flag: "\f024"; @fa-var-flag-checkered: "\f11e"; @fa-var-flag-o: "\f11d"; @fa-var-flash: "\f0e7"; @fa-var-flask: "\f0c3"; @fa-var-flickr: "\f16e"; @fa-var-floppy-o: "\f0c7"; @fa-var-folder: "\f07b"; @fa-var-folder-o: "\f114"; @fa-var-folder-open: "\f07c"; @fa-var-folder-open-o: "\f115"; @fa-var-font: "\f031"; @fa-var-forumbee: "\f211"; @fa-var-forward: "\f04e"; @fa-var-foursquare: "\f180"; @fa-var-frown-o: "\f119"; @fa-var-futbol-o: "\f1e3"; @fa-var-gamepad: "\f11b"; @fa-var-gavel: "\f0e3"; @fa-var-gbp: "\f154"; @fa-var-ge: "\f1d1"; @fa-var-gear: "\f013"; @fa-var-gears: "\f085"; @fa-var-genderless: "\f1db"; @fa-var-gift: "\f06b"; @fa-var-git: "\f1d3"; @fa-var-git-square: "\f1d2"; @fa-var-github: "\f09b"; @fa-var-github-alt: "\f113"; @fa-var-github-square: "\f092"; @fa-var-gittip: "\f184"; @fa-var-glass: "\f000"; @fa-var-globe: "\f0ac"; @fa-var-google: "\f1a0"; @fa-var-google-plus: "\f0d5"; @fa-var-google-plus-square: "\f0d4"; @fa-var-google-wallet: "\f1ee"; @fa-var-graduation-cap: "\f19d"; @fa-var-gratipay: "\f184"; @fa-var-group: "\f0c0"; @fa-var-h-square: "\f0fd"; @fa-var-hacker-news: "\f1d4"; @fa-var-hand-o-down: "\f0a7"; @fa-var-hand-o-left: "\f0a5"; @fa-var-hand-o-right: "\f0a4"; @fa-var-hand-o-up: "\f0a6"; @fa-var-hdd-o: "\f0a0"; @fa-var-header: "\f1dc"; @fa-var-headphones: "\f025"; @fa-var-heart: "\f004"; @fa-var-heart-o: "\f08a"; @fa-var-heartbeat: "\f21e"; @fa-var-history: "\f1da"; @fa-var-home: "\f015"; @fa-var-hospital-o: "\f0f8"; @fa-var-hotel: "\f236"; @fa-var-html5: "\f13b"; @fa-var-ils: "\f20b"; @fa-var-image: "\f03e"; @fa-var-inbox: "\f01c"; @fa-var-indent: "\f03c"; @fa-var-info: "\f129"; @fa-var-info-circle: "\f05a"; @fa-var-inr: "\f156"; @fa-var-instagram: "\f16d"; @fa-var-institution: "\f19c"; @fa-var-ioxhost: "\f208"; @fa-var-italic: "\f033"; @fa-var-joomla: "\f1aa"; @fa-var-jpy: "\f157"; @fa-var-jsfiddle: "\f1cc"; @fa-var-key: "\f084"; @fa-var-keyboard-o: "\f11c"; @fa-var-krw: "\f159"; @fa-var-language: "\f1ab"; @fa-var-laptop: "\f109"; @fa-var-lastfm: "\f202"; @fa-var-lastfm-square: "\f203"; @fa-var-leaf: "\f06c"; @fa-var-leanpub: "\f212"; @fa-var-legal: "\f0e3"; @fa-var-lemon-o: "\f094"; @fa-var-level-down: "\f149"; @fa-var-level-up: "\f148"; @fa-var-life-bouy: "\f1cd"; @fa-var-life-buoy: "\f1cd"; @fa-var-life-ring: "\f1cd"; @fa-var-life-saver: "\f1cd"; @fa-var-lightbulb-o: "\f0eb"; @fa-var-line-chart: "\f201"; @fa-var-link: "\f0c1"; @fa-var-linkedin: "\f0e1"; @fa-var-linkedin-square: "\f08c"; @fa-var-linux: "\f17c"; @fa-var-list: "\f03a"; @fa-var-list-alt: "\f022"; @fa-var-list-ol: "\f0cb"; @fa-var-list-ul: "\f0ca"; @fa-var-location-arrow: "\f124"; @fa-var-lock: "\f023"; @fa-var-long-arrow-down: "\f175"; @fa-var-long-arrow-left: "\f177"; @fa-var-long-arrow-right: "\f178"; @fa-var-long-arrow-up: "\f176"; @fa-var-magic: "\f0d0"; @fa-var-magnet: "\f076"; @fa-var-mail-forward: "\f064"; @fa-var-mail-reply: "\f112"; @fa-var-mail-reply-all: "\f122"; @fa-var-male: "\f183"; @fa-var-map-marker: "\f041"; @fa-var-mars: "\f222"; @fa-var-mars-double: "\f227"; @fa-var-mars-stroke: "\f229"; @fa-var-mars-stroke-h: "\f22b"; @fa-var-mars-stroke-v: "\f22a"; @fa-var-maxcdn: "\f136"; @fa-var-meanpath: "\f20c"; @fa-var-medium: "\f23a"; @fa-var-medkit: "\f0fa"; @fa-var-meh-o: "\f11a"; @fa-var-mercury: "\f223"; @fa-var-microphone: "\f130"; @fa-var-microphone-slash: "\f131"; @fa-var-minus: "\f068"; @fa-var-minus-circle: "\f056"; @fa-var-minus-square: "\f146"; @fa-var-minus-square-o: "\f147"; @fa-var-mobile: "\f10b"; @fa-var-mobile-phone: "\f10b"; @fa-var-money: "\f0d6"; @fa-var-moon-o: "\f186"; @fa-var-mortar-board: "\f19d"; @fa-var-motorcycle: "\f21c"; @fa-var-music: "\f001"; @fa-var-navicon: "\f0c9"; @fa-var-neuter: "\f22c"; @fa-var-newspaper-o: "\f1ea"; @fa-var-openid: "\f19b"; @fa-var-outdent: "\f03b"; @fa-var-pagelines: "\f18c"; @fa-var-paint-brush: "\f1fc"; @fa-var-paper-plane: "\f1d8"; @fa-var-paper-plane-o: "\f1d9"; @fa-var-paperclip: "\f0c6"; @fa-var-paragraph: "\f1dd"; @fa-var-paste: "\f0ea"; @fa-var-pause: "\f04c"; @fa-var-paw: "\f1b0"; @fa-var-paypal: "\f1ed"; @fa-var-pencil: "\f040"; @fa-var-pencil-square: "\f14b"; @fa-var-pencil-square-o: "\f044"; @fa-var-phone: "\f095"; @fa-var-phone-square: "\f098"; @fa-var-photo: "\f03e"; @fa-var-picture-o: "\f03e"; @fa-var-pie-chart: "\f200"; @fa-var-pied-piper: "\f1a7"; @fa-var-pied-piper-alt: "\f1a8"; @fa-var-pinterest: "\f0d2"; @fa-var-pinterest-p: "\f231"; @fa-var-pinterest-square: "\f0d3"; @fa-var-plane: "\f072"; @fa-var-play: "\f04b"; @fa-var-play-circle: "\f144"; @fa-var-play-circle-o: "\f01d"; @fa-var-plug: "\f1e6"; @fa-var-plus: "\f067"; @fa-var-plus-circle: "\f055"; @fa-var-plus-square: "\f0fe"; @fa-var-plus-square-o: "\f196"; @fa-var-power-off: "\f011"; @fa-var-print: "\f02f"; @fa-var-puzzle-piece: "\f12e"; @fa-var-qq: "\f1d6"; @fa-var-qrcode: "\f029"; @fa-var-question: "\f128"; @fa-var-question-circle: "\f059"; @fa-var-quote-left: "\f10d"; @fa-var-quote-right: "\f10e"; @fa-var-ra: "\f1d0"; @fa-var-random: "\f074"; @fa-var-rebel: "\f1d0"; @fa-var-recycle: "\f1b8"; @fa-var-reddit: "\f1a1"; @fa-var-reddit-square: "\f1a2"; @fa-var-refresh: "\f021"; @fa-var-remove: "\f00d"; @fa-var-renren: "\f18b"; @fa-var-reorder: "\f0c9"; @fa-var-repeat: "\f01e"; @fa-var-reply: "\f112"; @fa-var-reply-all: "\f122"; @fa-var-retweet: "\f079"; @fa-var-rmb: "\f157"; @fa-var-road: "\f018"; @fa-var-rocket: "\f135"; @fa-var-rotate-left: "\f0e2"; @fa-var-rotate-right: "\f01e"; @fa-var-rouble: "\f158"; @fa-var-rss: "\f09e"; @fa-var-rss-square: "\f143"; @fa-var-rub: "\f158"; @fa-var-ruble: "\f158"; @fa-var-rupee: "\f156"; @fa-var-save: "\f0c7"; @fa-var-scissors: "\f0c4"; @fa-var-search: "\f002"; @fa-var-search-minus: "\f010"; @fa-var-search-plus: "\f00e"; @fa-var-sellsy: "\f213"; @fa-var-send: "\f1d8"; @fa-var-send-o: "\f1d9"; @fa-var-server: "\f233"; @fa-var-share: "\f064"; @fa-var-share-alt: "\f1e0"; @fa-var-share-alt-square: "\f1e1"; @fa-var-share-square: "\f14d"; @fa-var-share-square-o: "\f045"; @fa-var-shekel: "\f20b"; @fa-var-sheqel: "\f20b"; @fa-var-shield: "\f132"; @fa-var-ship: "\f21a"; @fa-var-shirtsinbulk: "\f214"; @fa-var-shopping-cart: "\f07a"; @fa-var-sign-in: "\f090"; @fa-var-sign-out: "\f08b"; @fa-var-signal: "\f012"; @fa-var-simplybuilt: "\f215"; @fa-var-sitemap: "\f0e8"; @fa-var-skyatlas: "\f216"; @fa-var-skype: "\f17e"; @fa-var-slack: "\f198"; @fa-var-sliders: "\f1de"; @fa-var-slideshare: "\f1e7"; @fa-var-smile-o: "\f118"; @fa-var-soccer-ball-o: "\f1e3"; @fa-var-sort: "\f0dc"; @fa-var-sort-alpha-asc: "\f15d"; @fa-var-sort-alpha-desc: "\f15e"; @fa-var-sort-amount-asc: "\f160"; @fa-var-sort-amount-desc: "\f161"; @fa-var-sort-asc: "\f0de"; @fa-var-sort-desc: "\f0dd"; @fa-var-sort-down: "\f0dd"; @fa-var-sort-numeric-asc: "\f162"; @fa-var-sort-numeric-desc: "\f163"; @fa-var-sort-up: "\f0de"; @fa-var-soundcloud: "\f1be"; @fa-var-space-shuttle: "\f197"; @fa-var-spinner: "\f110"; @fa-var-spoon: "\f1b1"; @fa-var-spotify: "\f1bc"; @fa-var-square: "\f0c8"; @fa-var-square-o: "\f096"; @fa-var-stack-exchange: "\f18d"; @fa-var-stack-overflow: "\f16c"; @fa-var-star: "\f005"; @fa-var-star-half: "\f089"; @fa-var-star-half-empty: "\f123"; @fa-var-star-half-full: "\f123"; @fa-var-star-half-o: "\f123"; @fa-var-star-o: "\f006"; @fa-var-steam: "\f1b6"; @fa-var-steam-square: "\f1b7"; @fa-var-step-backward: "\f048"; @fa-var-step-forward: "\f051"; @fa-var-stethoscope: "\f0f1"; @fa-var-stop: "\f04d"; @fa-var-street-view: "\f21d"; @fa-var-strikethrough: "\f0cc"; @fa-var-stumbleupon: "\f1a4"; @fa-var-stumbleupon-circle: "\f1a3"; @fa-var-subscript: "\f12c"; @fa-var-subway: "\f239"; @fa-var-suitcase: "\f0f2"; @fa-var-sun-o: "\f185"; @fa-var-superscript: "\f12b"; @fa-var-support: "\f1cd"; @fa-var-table: "\f0ce"; @fa-var-tablet: "\f10a"; @fa-var-tachometer: "\f0e4"; @fa-var-tag: "\f02b"; @fa-var-tags: "\f02c"; @fa-var-tasks: "\f0ae"; @fa-var-taxi: "\f1ba"; @fa-var-tencent-weibo: "\f1d5"; @fa-var-terminal: "\f120"; @fa-var-text-height: "\f034"; @fa-var-text-width: "\f035"; @fa-var-th: "\f00a"; @fa-var-th-large: "\f009"; @fa-var-th-list: "\f00b"; @fa-var-thumb-tack: "\f08d"; @fa-var-thumbs-down: "\f165"; @fa-var-thumbs-o-down: "\f088"; @fa-var-thumbs-o-up: "\f087"; @fa-var-thumbs-up: "\f164"; @fa-var-ticket: "\f145"; @fa-var-times: "\f00d"; @fa-var-times-circle: "\f057"; @fa-var-times-circle-o: "\f05c"; @fa-var-tint: "\f043"; @fa-var-toggle-down: "\f150"; @fa-var-toggle-left: "\f191"; @fa-var-toggle-off: "\f204"; @fa-var-toggle-on: "\f205"; @fa-var-toggle-right: "\f152"; @fa-var-toggle-up: "\f151"; @fa-var-train: "\f238"; @fa-var-transgender: "\f224"; @fa-var-transgender-alt: "\f225"; @fa-var-trash: "\f1f8"; @fa-var-trash-o: "\f014"; @fa-var-tree: "\f1bb"; @fa-var-trello: "\f181"; @fa-var-trophy: "\f091"; @fa-var-truck: "\f0d1"; @fa-var-try: "\f195"; @fa-var-tty: "\f1e4"; @fa-var-tumblr: "\f173"; @fa-var-tumblr-square: "\f174"; @fa-var-turkish-lira: "\f195"; @fa-var-twitch: "\f1e8"; @fa-var-twitter: "\f099"; @fa-var-twitter-square: "\f081"; @fa-var-umbrella: "\f0e9"; @fa-var-underline: "\f0cd"; @fa-var-undo: "\f0e2"; @fa-var-university: "\f19c"; @fa-var-unlink: "\f127"; @fa-var-unlock: "\f09c"; @fa-var-unlock-alt: "\f13e"; @fa-var-unsorted: "\f0dc"; @fa-var-upload: "\f093"; @fa-var-usd: "\f155"; @fa-var-user: "\f007"; @fa-var-user-md: "\f0f0"; @fa-var-user-plus: "\f234"; @fa-var-user-secret: "\f21b"; @fa-var-user-times: "\f235"; @fa-var-users: "\f0c0"; @fa-var-venus: "\f221"; @fa-var-venus-double: "\f226"; @fa-var-venus-mars: "\f228"; @fa-var-viacoin: "\f237"; @fa-var-video-camera: "\f03d"; @fa-var-vimeo-square: "\f194"; @fa-var-vine: "\f1ca"; @fa-var-vk: "\f189"; @fa-var-volume-down: "\f027"; @fa-var-volume-off: "\f026"; @fa-var-volume-up: "\f028"; @fa-var-warning: "\f071"; @fa-var-wechat: "\f1d7"; @fa-var-weibo: "\f18a"; @fa-var-weixin: "\f1d7"; @fa-var-whatsapp: "\f232"; @fa-var-wheelchair: "\f193"; @fa-var-wifi: "\f1eb"; @fa-var-windows: "\f17a"; @fa-var-won: "\f159"; @fa-var-wordpress: "\f19a"; @fa-var-wrench: "\f0ad"; @fa-var-xing: "\f168"; @fa-var-xing-square: "\f169"; @fa-var-yahoo: "\f19e"; @fa-var-yelp: "\f1e9"; @fa-var-yen: "\f157"; @fa-var-youtube: "\f167"; @fa-var-youtube-play: "\f16a"; @fa-var-youtube-square: "\f166"; ================================================ FILE: website/font-awesome/scss/_animated.scss ================================================ // Spinning Icons // -------------------------- .#{$fa-css-prefix}-spin { -webkit-animation: fa-spin 2s infinite linear; animation: fa-spin 2s infinite linear; } .#{$fa-css-prefix}-pulse { -webkit-animation: fa-spin 1s infinite steps(8); animation: fa-spin 1s infinite steps(8); } @-webkit-keyframes fa-spin { 0% { -webkit-transform: rotate(0deg); transform: rotate(0deg); } 100% { -webkit-transform: rotate(359deg); transform: rotate(359deg); } } @keyframes fa-spin { 0% { -webkit-transform: rotate(0deg); transform: rotate(0deg); } 100% { -webkit-transform: rotate(359deg); transform: rotate(359deg); } } ================================================ FILE: website/font-awesome/scss/_bordered-pulled.scss ================================================ // Bordered & Pulled // ------------------------- .#{$fa-css-prefix}-border { padding: .2em .25em .15em; border: solid .08em $fa-border-color; border-radius: .1em; } .pull-right { float: right; } .pull-left { float: left; } .#{$fa-css-prefix} { &.pull-left { margin-right: .3em; } &.pull-right { margin-left: .3em; } } ================================================ FILE: website/font-awesome/scss/_core.scss ================================================ // Base Class Definition // ------------------------- .#{$fa-css-prefix} { display: inline-block; font: normal normal normal #{$fa-font-size-base}/1 FontAwesome; // shortening font declaration font-size: inherit; // can't have font-size inherit on line above, so need to override text-rendering: auto; // optimizelegibility throws things off #1094 -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; transform: translate(0, 0); // ensures no half-pixel rendering in firefox } ================================================ FILE: website/font-awesome/scss/_fixed-width.scss ================================================ // Fixed Width Icons // ------------------------- .#{$fa-css-prefix}-fw { width: (18em / 14); text-align: center; } ================================================ FILE: website/font-awesome/scss/_icons.scss ================================================ /* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen readers do not read off random characters that represent icons */ .#{$fa-css-prefix}-glass:before { content: $fa-var-glass; } .#{$fa-css-prefix}-music:before { content: $fa-var-music; } .#{$fa-css-prefix}-search:before { content: $fa-var-search; } .#{$fa-css-prefix}-envelope-o:before { content: $fa-var-envelope-o; } .#{$fa-css-prefix}-heart:before { content: $fa-var-heart; } .#{$fa-css-prefix}-star:before { content: $fa-var-star; } .#{$fa-css-prefix}-star-o:before { content: $fa-var-star-o; } .#{$fa-css-prefix}-user:before { content: $fa-var-user; } .#{$fa-css-prefix}-film:before { content: $fa-var-film; } .#{$fa-css-prefix}-th-large:before { content: $fa-var-th-large; } .#{$fa-css-prefix}-th:before { content: $fa-var-th; } .#{$fa-css-prefix}-th-list:before { content: $fa-var-th-list; } .#{$fa-css-prefix}-check:before { content: $fa-var-check; } .#{$fa-css-prefix}-remove:before, .#{$fa-css-prefix}-close:before, .#{$fa-css-prefix}-times:before { content: $fa-var-times; } .#{$fa-css-prefix}-search-plus:before { content: $fa-var-search-plus; } .#{$fa-css-prefix}-search-minus:before { content: $fa-var-search-minus; } .#{$fa-css-prefix}-power-off:before { content: $fa-var-power-off; } .#{$fa-css-prefix}-signal:before { content: $fa-var-signal; } .#{$fa-css-prefix}-gear:before, .#{$fa-css-prefix}-cog:before { content: $fa-var-cog; } .#{$fa-css-prefix}-trash-o:before { content: $fa-var-trash-o; } .#{$fa-css-prefix}-home:before { content: $fa-var-home; } .#{$fa-css-prefix}-file-o:before { content: $fa-var-file-o; } .#{$fa-css-prefix}-clock-o:before { content: $fa-var-clock-o; } .#{$fa-css-prefix}-road:before { content: $fa-var-road; } .#{$fa-css-prefix}-download:before { content: $fa-var-download; } .#{$fa-css-prefix}-arrow-circle-o-down:before { content: $fa-var-arrow-circle-o-down; } .#{$fa-css-prefix}-arrow-circle-o-up:before { content: $fa-var-arrow-circle-o-up; } .#{$fa-css-prefix}-inbox:before { content: $fa-var-inbox; } .#{$fa-css-prefix}-play-circle-o:before { content: $fa-var-play-circle-o; } .#{$fa-css-prefix}-rotate-right:before, .#{$fa-css-prefix}-repeat:before { content: $fa-var-repeat; } .#{$fa-css-prefix}-refresh:before { content: $fa-var-refresh; } .#{$fa-css-prefix}-list-alt:before { content: $fa-var-list-alt; } .#{$fa-css-prefix}-lock:before { content: $fa-var-lock; } .#{$fa-css-prefix}-flag:before { content: $fa-var-flag; } .#{$fa-css-prefix}-headphones:before { content: $fa-var-headphones; } .#{$fa-css-prefix}-volume-off:before { content: $fa-var-volume-off; } .#{$fa-css-prefix}-volume-down:before { content: $fa-var-volume-down; } .#{$fa-css-prefix}-volume-up:before { content: $fa-var-volume-up; } .#{$fa-css-prefix}-qrcode:before { content: $fa-var-qrcode; } .#{$fa-css-prefix}-barcode:before { content: $fa-var-barcode; } .#{$fa-css-prefix}-tag:before { content: $fa-var-tag; } .#{$fa-css-prefix}-tags:before { content: $fa-var-tags; } .#{$fa-css-prefix}-book:before { content: $fa-var-book; } .#{$fa-css-prefix}-bookmark:before { content: $fa-var-bookmark; } .#{$fa-css-prefix}-print:before { content: $fa-var-print; } .#{$fa-css-prefix}-camera:before { content: $fa-var-camera; } .#{$fa-css-prefix}-font:before { content: $fa-var-font; } .#{$fa-css-prefix}-bold:before { content: $fa-var-bold; } .#{$fa-css-prefix}-italic:before { content: $fa-var-italic; } .#{$fa-css-prefix}-text-height:before { content: $fa-var-text-height; } .#{$fa-css-prefix}-text-width:before { content: $fa-var-text-width; } .#{$fa-css-prefix}-align-left:before { content: $fa-var-align-left; } .#{$fa-css-prefix}-align-center:before { content: $fa-var-align-center; } .#{$fa-css-prefix}-align-right:before { content: $fa-var-align-right; } .#{$fa-css-prefix}-align-justify:before { content: $fa-var-align-justify; } .#{$fa-css-prefix}-list:before { content: $fa-var-list; } .#{$fa-css-prefix}-dedent:before, .#{$fa-css-prefix}-outdent:before { content: $fa-var-outdent; } .#{$fa-css-prefix}-indent:before { content: $fa-var-indent; } .#{$fa-css-prefix}-video-camera:before { content: $fa-var-video-camera; } .#{$fa-css-prefix}-photo:before, .#{$fa-css-prefix}-image:before, .#{$fa-css-prefix}-picture-o:before { content: $fa-var-picture-o; } .#{$fa-css-prefix}-pencil:before { content: $fa-var-pencil; } .#{$fa-css-prefix}-map-marker:before { content: $fa-var-map-marker; } .#{$fa-css-prefix}-adjust:before { content: $fa-var-adjust; } .#{$fa-css-prefix}-tint:before { content: $fa-var-tint; } .#{$fa-css-prefix}-edit:before, .#{$fa-css-prefix}-pencil-square-o:before { content: $fa-var-pencil-square-o; } .#{$fa-css-prefix}-share-square-o:before { content: $fa-var-share-square-o; } .#{$fa-css-prefix}-check-square-o:before { content: $fa-var-check-square-o; } .#{$fa-css-prefix}-arrows:before { content: $fa-var-arrows; } .#{$fa-css-prefix}-step-backward:before { content: $fa-var-step-backward; } .#{$fa-css-prefix}-fast-backward:before { content: $fa-var-fast-backward; } .#{$fa-css-prefix}-backward:before { content: $fa-var-backward; } .#{$fa-css-prefix}-play:before { content: $fa-var-play; } .#{$fa-css-prefix}-pause:before { content: $fa-var-pause; } .#{$fa-css-prefix}-stop:before { content: $fa-var-stop; } .#{$fa-css-prefix}-forward:before { content: $fa-var-forward; } .#{$fa-css-prefix}-fast-forward:before { content: $fa-var-fast-forward; } .#{$fa-css-prefix}-step-forward:before { content: $fa-var-step-forward; } .#{$fa-css-prefix}-eject:before { content: $fa-var-eject; } .#{$fa-css-prefix}-chevron-left:before { content: $fa-var-chevron-left; } .#{$fa-css-prefix}-chevron-right:before { content: $fa-var-chevron-right; } .#{$fa-css-prefix}-plus-circle:before { content: $fa-var-plus-circle; } .#{$fa-css-prefix}-minus-circle:before { content: $fa-var-minus-circle; } .#{$fa-css-prefix}-times-circle:before { content: $fa-var-times-circle; } .#{$fa-css-prefix}-check-circle:before { content: $fa-var-check-circle; } .#{$fa-css-prefix}-question-circle:before { content: $fa-var-question-circle; } .#{$fa-css-prefix}-info-circle:before { content: $fa-var-info-circle; } .#{$fa-css-prefix}-crosshairs:before { content: $fa-var-crosshairs; } .#{$fa-css-prefix}-times-circle-o:before { content: $fa-var-times-circle-o; } .#{$fa-css-prefix}-check-circle-o:before { content: $fa-var-check-circle-o; } .#{$fa-css-prefix}-ban:before { content: $fa-var-ban; } .#{$fa-css-prefix}-arrow-left:before { content: $fa-var-arrow-left; } .#{$fa-css-prefix}-arrow-right:before { content: $fa-var-arrow-right; } .#{$fa-css-prefix}-arrow-up:before { content: $fa-var-arrow-up; } .#{$fa-css-prefix}-arrow-down:before { content: $fa-var-arrow-down; } .#{$fa-css-prefix}-mail-forward:before, .#{$fa-css-prefix}-share:before { content: $fa-var-share; } .#{$fa-css-prefix}-expand:before { content: $fa-var-expand; } .#{$fa-css-prefix}-compress:before { content: $fa-var-compress; } .#{$fa-css-prefix}-plus:before { content: $fa-var-plus; } .#{$fa-css-prefix}-minus:before { content: $fa-var-minus; } .#{$fa-css-prefix}-asterisk:before { content: $fa-var-asterisk; } .#{$fa-css-prefix}-exclamation-circle:before { content: $fa-var-exclamation-circle; } .#{$fa-css-prefix}-gift:before { content: $fa-var-gift; } .#{$fa-css-prefix}-leaf:before { content: $fa-var-leaf; } .#{$fa-css-prefix}-fire:before { content: $fa-var-fire; } .#{$fa-css-prefix}-eye:before { content: $fa-var-eye; } .#{$fa-css-prefix}-eye-slash:before { content: $fa-var-eye-slash; } .#{$fa-css-prefix}-warning:before, .#{$fa-css-prefix}-exclamation-triangle:before { content: $fa-var-exclamation-triangle; } .#{$fa-css-prefix}-plane:before { content: $fa-var-plane; } .#{$fa-css-prefix}-calendar:before { content: $fa-var-calendar; } .#{$fa-css-prefix}-random:before { content: $fa-var-random; } .#{$fa-css-prefix}-comment:before { content: $fa-var-comment; } .#{$fa-css-prefix}-magnet:before { content: $fa-var-magnet; } .#{$fa-css-prefix}-chevron-up:before { content: $fa-var-chevron-up; } .#{$fa-css-prefix}-chevron-down:before { content: $fa-var-chevron-down; } .#{$fa-css-prefix}-retweet:before { content: $fa-var-retweet; } .#{$fa-css-prefix}-shopping-cart:before { content: $fa-var-shopping-cart; } .#{$fa-css-prefix}-folder:before { content: $fa-var-folder; } .#{$fa-css-prefix}-folder-open:before { content: $fa-var-folder-open; } .#{$fa-css-prefix}-arrows-v:before { content: $fa-var-arrows-v; } .#{$fa-css-prefix}-arrows-h:before { content: $fa-var-arrows-h; } .#{$fa-css-prefix}-bar-chart-o:before, .#{$fa-css-prefix}-bar-chart:before { content: $fa-var-bar-chart; } .#{$fa-css-prefix}-twitter-square:before { content: $fa-var-twitter-square; } .#{$fa-css-prefix}-facebook-square:before { content: $fa-var-facebook-square; } .#{$fa-css-prefix}-camera-retro:before { content: $fa-var-camera-retro; } .#{$fa-css-prefix}-key:before { content: $fa-var-key; } .#{$fa-css-prefix}-gears:before, .#{$fa-css-prefix}-cogs:before { content: $fa-var-cogs; } .#{$fa-css-prefix}-comments:before { content: $fa-var-comments; } .#{$fa-css-prefix}-thumbs-o-up:before { content: $fa-var-thumbs-o-up; } .#{$fa-css-prefix}-thumbs-o-down:before { content: $fa-var-thumbs-o-down; } .#{$fa-css-prefix}-star-half:before { content: $fa-var-star-half; } .#{$fa-css-prefix}-heart-o:before { content: $fa-var-heart-o; } .#{$fa-css-prefix}-sign-out:before { content: $fa-var-sign-out; } .#{$fa-css-prefix}-linkedin-square:before { content: $fa-var-linkedin-square; } .#{$fa-css-prefix}-thumb-tack:before { content: $fa-var-thumb-tack; } .#{$fa-css-prefix}-external-link:before { content: $fa-var-external-link; } .#{$fa-css-prefix}-sign-in:before { content: $fa-var-sign-in; } .#{$fa-css-prefix}-trophy:before { content: $fa-var-trophy; } .#{$fa-css-prefix}-github-square:before { content: $fa-var-github-square; } .#{$fa-css-prefix}-upload:before { content: $fa-var-upload; } .#{$fa-css-prefix}-lemon-o:before { content: $fa-var-lemon-o; } .#{$fa-css-prefix}-phone:before { content: $fa-var-phone; } .#{$fa-css-prefix}-square-o:before { content: $fa-var-square-o; } .#{$fa-css-prefix}-bookmark-o:before { content: $fa-var-bookmark-o; } .#{$fa-css-prefix}-phone-square:before { content: $fa-var-phone-square; } .#{$fa-css-prefix}-twitter:before { content: $fa-var-twitter; } .#{$fa-css-prefix}-facebook-f:before, .#{$fa-css-prefix}-facebook:before { content: $fa-var-facebook; } .#{$fa-css-prefix}-github:before { content: $fa-var-github; } .#{$fa-css-prefix}-unlock:before { content: $fa-var-unlock; } .#{$fa-css-prefix}-credit-card:before { content: $fa-var-credit-card; } .#{$fa-css-prefix}-rss:before { content: $fa-var-rss; } .#{$fa-css-prefix}-hdd-o:before { content: $fa-var-hdd-o; } .#{$fa-css-prefix}-bullhorn:before { content: $fa-var-bullhorn; } .#{$fa-css-prefix}-bell:before { content: $fa-var-bell; } .#{$fa-css-prefix}-certificate:before { content: $fa-var-certificate; } .#{$fa-css-prefix}-hand-o-right:before { content: $fa-var-hand-o-right; } .#{$fa-css-prefix}-hand-o-left:before { content: $fa-var-hand-o-left; } .#{$fa-css-prefix}-hand-o-up:before { content: $fa-var-hand-o-up; } .#{$fa-css-prefix}-hand-o-down:before { content: $fa-var-hand-o-down; } .#{$fa-css-prefix}-arrow-circle-left:before { content: $fa-var-arrow-circle-left; } .#{$fa-css-prefix}-arrow-circle-right:before { content: $fa-var-arrow-circle-right; } .#{$fa-css-prefix}-arrow-circle-up:before { content: $fa-var-arrow-circle-up; } .#{$fa-css-prefix}-arrow-circle-down:before { content: $fa-var-arrow-circle-down; } .#{$fa-css-prefix}-globe:before { content: $fa-var-globe; } .#{$fa-css-prefix}-wrench:before { content: $fa-var-wrench; } .#{$fa-css-prefix}-tasks:before { content: $fa-var-tasks; } .#{$fa-css-prefix}-filter:before { content: $fa-var-filter; } .#{$fa-css-prefix}-briefcase:before { content: $fa-var-briefcase; } .#{$fa-css-prefix}-arrows-alt:before { content: $fa-var-arrows-alt; } .#{$fa-css-prefix}-group:before, .#{$fa-css-prefix}-users:before { content: $fa-var-users; } .#{$fa-css-prefix}-chain:before, .#{$fa-css-prefix}-link:before { content: $fa-var-link; } .#{$fa-css-prefix}-cloud:before { content: $fa-var-cloud; } .#{$fa-css-prefix}-flask:before { content: $fa-var-flask; } .#{$fa-css-prefix}-cut:before, .#{$fa-css-prefix}-scissors:before { content: $fa-var-scissors; } .#{$fa-css-prefix}-copy:before, .#{$fa-css-prefix}-files-o:before { content: $fa-var-files-o; } .#{$fa-css-prefix}-paperclip:before { content: $fa-var-paperclip; } .#{$fa-css-prefix}-save:before, .#{$fa-css-prefix}-floppy-o:before { content: $fa-var-floppy-o; } .#{$fa-css-prefix}-square:before { content: $fa-var-square; } .#{$fa-css-prefix}-navicon:before, .#{$fa-css-prefix}-reorder:before, .#{$fa-css-prefix}-bars:before { content: $fa-var-bars; } .#{$fa-css-prefix}-list-ul:before { content: $fa-var-list-ul; } .#{$fa-css-prefix}-list-ol:before { content: $fa-var-list-ol; } .#{$fa-css-prefix}-strikethrough:before { content: $fa-var-strikethrough; } .#{$fa-css-prefix}-underline:before { content: $fa-var-underline; } .#{$fa-css-prefix}-table:before { content: $fa-var-table; } .#{$fa-css-prefix}-magic:before { content: $fa-var-magic; } .#{$fa-css-prefix}-truck:before { content: $fa-var-truck; } .#{$fa-css-prefix}-pinterest:before { content: $fa-var-pinterest; } .#{$fa-css-prefix}-pinterest-square:before { content: $fa-var-pinterest-square; } .#{$fa-css-prefix}-google-plus-square:before { content: $fa-var-google-plus-square; } .#{$fa-css-prefix}-google-plus:before { content: $fa-var-google-plus; } .#{$fa-css-prefix}-money:before { content: $fa-var-money; } .#{$fa-css-prefix}-caret-down:before { content: $fa-var-caret-down; } .#{$fa-css-prefix}-caret-up:before { content: $fa-var-caret-up; } .#{$fa-css-prefix}-caret-left:before { content: $fa-var-caret-left; } .#{$fa-css-prefix}-caret-right:before { content: $fa-var-caret-right; } .#{$fa-css-prefix}-columns:before { content: $fa-var-columns; } .#{$fa-css-prefix}-unsorted:before, .#{$fa-css-prefix}-sort:before { content: $fa-var-sort; } .#{$fa-css-prefix}-sort-down:before, .#{$fa-css-prefix}-sort-desc:before { content: $fa-var-sort-desc; } .#{$fa-css-prefix}-sort-up:before, .#{$fa-css-prefix}-sort-asc:before { content: $fa-var-sort-asc; } .#{$fa-css-prefix}-envelope:before { content: $fa-var-envelope; } .#{$fa-css-prefix}-linkedin:before { content: $fa-var-linkedin; } .#{$fa-css-prefix}-rotate-left:before, .#{$fa-css-prefix}-undo:before { content: $fa-var-undo; } .#{$fa-css-prefix}-legal:before, .#{$fa-css-prefix}-gavel:before { content: $fa-var-gavel; } .#{$fa-css-prefix}-dashboard:before, .#{$fa-css-prefix}-tachometer:before { content: $fa-var-tachometer; } .#{$fa-css-prefix}-comment-o:before { content: $fa-var-comment-o; } .#{$fa-css-prefix}-comments-o:before { content: $fa-var-comments-o; } .#{$fa-css-prefix}-flash:before, .#{$fa-css-prefix}-bolt:before { content: $fa-var-bolt; } .#{$fa-css-prefix}-sitemap:before { content: $fa-var-sitemap; } .#{$fa-css-prefix}-umbrella:before { content: $fa-var-umbrella; } .#{$fa-css-prefix}-paste:before, .#{$fa-css-prefix}-clipboard:before { content: $fa-var-clipboard; } .#{$fa-css-prefix}-lightbulb-o:before { content: $fa-var-lightbulb-o; } .#{$fa-css-prefix}-exchange:before { content: $fa-var-exchange; } .#{$fa-css-prefix}-cloud-download:before { content: $fa-var-cloud-download; } .#{$fa-css-prefix}-cloud-upload:before { content: $fa-var-cloud-upload; } .#{$fa-css-prefix}-user-md:before { content: $fa-var-user-md; } .#{$fa-css-prefix}-stethoscope:before { content: $fa-var-stethoscope; } .#{$fa-css-prefix}-suitcase:before { content: $fa-var-suitcase; } .#{$fa-css-prefix}-bell-o:before { content: $fa-var-bell-o; } .#{$fa-css-prefix}-coffee:before { content: $fa-var-coffee; } .#{$fa-css-prefix}-cutlery:before { content: $fa-var-cutlery; } .#{$fa-css-prefix}-file-text-o:before { content: $fa-var-file-text-o; } .#{$fa-css-prefix}-building-o:before { content: $fa-var-building-o; } .#{$fa-css-prefix}-hospital-o:before { content: $fa-var-hospital-o; } .#{$fa-css-prefix}-ambulance:before { content: $fa-var-ambulance; } .#{$fa-css-prefix}-medkit:before { content: $fa-var-medkit; } .#{$fa-css-prefix}-fighter-jet:before { content: $fa-var-fighter-jet; } .#{$fa-css-prefix}-beer:before { content: $fa-var-beer; } .#{$fa-css-prefix}-h-square:before { content: $fa-var-h-square; } .#{$fa-css-prefix}-plus-square:before { content: $fa-var-plus-square; } .#{$fa-css-prefix}-angle-double-left:before { content: $fa-var-angle-double-left; } .#{$fa-css-prefix}-angle-double-right:before { content: $fa-var-angle-double-right; } .#{$fa-css-prefix}-angle-double-up:before { content: $fa-var-angle-double-up; } .#{$fa-css-prefix}-angle-double-down:before { content: $fa-var-angle-double-down; } .#{$fa-css-prefix}-angle-left:before { content: $fa-var-angle-left; } .#{$fa-css-prefix}-angle-right:before { content: $fa-var-angle-right; } .#{$fa-css-prefix}-angle-up:before { content: $fa-var-angle-up; } .#{$fa-css-prefix}-angle-down:before { content: $fa-var-angle-down; } .#{$fa-css-prefix}-desktop:before { content: $fa-var-desktop; } .#{$fa-css-prefix}-laptop:before { content: $fa-var-laptop; } .#{$fa-css-prefix}-tablet:before { content: $fa-var-tablet; } .#{$fa-css-prefix}-mobile-phone:before, .#{$fa-css-prefix}-mobile:before { content: $fa-var-mobile; } .#{$fa-css-prefix}-circle-o:before { content: $fa-var-circle-o; } .#{$fa-css-prefix}-quote-left:before { content: $fa-var-quote-left; } .#{$fa-css-prefix}-quote-right:before { content: $fa-var-quote-right; } .#{$fa-css-prefix}-spinner:before { content: $fa-var-spinner; } .#{$fa-css-prefix}-circle:before { content: $fa-var-circle; } .#{$fa-css-prefix}-mail-reply:before, .#{$fa-css-prefix}-reply:before { content: $fa-var-reply; } .#{$fa-css-prefix}-github-alt:before { content: $fa-var-github-alt; } .#{$fa-css-prefix}-folder-o:before { content: $fa-var-folder-o; } .#{$fa-css-prefix}-folder-open-o:before { content: $fa-var-folder-open-o; } .#{$fa-css-prefix}-smile-o:before { content: $fa-var-smile-o; } .#{$fa-css-prefix}-frown-o:before { content: $fa-var-frown-o; } .#{$fa-css-prefix}-meh-o:before { content: $fa-var-meh-o; } .#{$fa-css-prefix}-gamepad:before { content: $fa-var-gamepad; } .#{$fa-css-prefix}-keyboard-o:before { content: $fa-var-keyboard-o; } .#{$fa-css-prefix}-flag-o:before { content: $fa-var-flag-o; } .#{$fa-css-prefix}-flag-checkered:before { content: $fa-var-flag-checkered; } .#{$fa-css-prefix}-terminal:before { content: $fa-var-terminal; } .#{$fa-css-prefix}-code:before { content: $fa-var-code; } .#{$fa-css-prefix}-mail-reply-all:before, .#{$fa-css-prefix}-reply-all:before { content: $fa-var-reply-all; } .#{$fa-css-prefix}-star-half-empty:before, .#{$fa-css-prefix}-star-half-full:before, .#{$fa-css-prefix}-star-half-o:before { content: $fa-var-star-half-o; } .#{$fa-css-prefix}-location-arrow:before { content: $fa-var-location-arrow; } .#{$fa-css-prefix}-crop:before { content: $fa-var-crop; } .#{$fa-css-prefix}-code-fork:before { content: $fa-var-code-fork; } .#{$fa-css-prefix}-unlink:before, .#{$fa-css-prefix}-chain-broken:before { content: $fa-var-chain-broken; } .#{$fa-css-prefix}-question:before { content: $fa-var-question; } .#{$fa-css-prefix}-info:before { content: $fa-var-info; } .#{$fa-css-prefix}-exclamation:before { content: $fa-var-exclamation; } .#{$fa-css-prefix}-superscript:before { content: $fa-var-superscript; } .#{$fa-css-prefix}-subscript:before { content: $fa-var-subscript; } .#{$fa-css-prefix}-eraser:before { content: $fa-var-eraser; } .#{$fa-css-prefix}-puzzle-piece:before { content: $fa-var-puzzle-piece; } .#{$fa-css-prefix}-microphone:before { content: $fa-var-microphone; } .#{$fa-css-prefix}-microphone-slash:before { content: $fa-var-microphone-slash; } .#{$fa-css-prefix}-shield:before { content: $fa-var-shield; } .#{$fa-css-prefix}-calendar-o:before { content: $fa-var-calendar-o; } .#{$fa-css-prefix}-fire-extinguisher:before { content: $fa-var-fire-extinguisher; } .#{$fa-css-prefix}-rocket:before { content: $fa-var-rocket; } .#{$fa-css-prefix}-maxcdn:before { content: $fa-var-maxcdn; } .#{$fa-css-prefix}-chevron-circle-left:before { content: $fa-var-chevron-circle-left; } .#{$fa-css-prefix}-chevron-circle-right:before { content: $fa-var-chevron-circle-right; } .#{$fa-css-prefix}-chevron-circle-up:before { content: $fa-var-chevron-circle-up; } .#{$fa-css-prefix}-chevron-circle-down:before { content: $fa-var-chevron-circle-down; } .#{$fa-css-prefix}-html5:before { content: $fa-var-html5; } .#{$fa-css-prefix}-css3:before { content: $fa-var-css3; } .#{$fa-css-prefix}-anchor:before { content: $fa-var-anchor; } .#{$fa-css-prefix}-unlock-alt:before { content: $fa-var-unlock-alt; } .#{$fa-css-prefix}-bullseye:before { content: $fa-var-bullseye; } .#{$fa-css-prefix}-ellipsis-h:before { content: $fa-var-ellipsis-h; } .#{$fa-css-prefix}-ellipsis-v:before { content: $fa-var-ellipsis-v; } .#{$fa-css-prefix}-rss-square:before { content: $fa-var-rss-square; } .#{$fa-css-prefix}-play-circle:before { content: $fa-var-play-circle; } .#{$fa-css-prefix}-ticket:before { content: $fa-var-ticket; } .#{$fa-css-prefix}-minus-square:before { content: $fa-var-minus-square; } .#{$fa-css-prefix}-minus-square-o:before { content: $fa-var-minus-square-o; } .#{$fa-css-prefix}-level-up:before { content: $fa-var-level-up; } .#{$fa-css-prefix}-level-down:before { content: $fa-var-level-down; } .#{$fa-css-prefix}-check-square:before { content: $fa-var-check-square; } .#{$fa-css-prefix}-pencil-square:before { content: $fa-var-pencil-square; } .#{$fa-css-prefix}-external-link-square:before { content: $fa-var-external-link-square; } .#{$fa-css-prefix}-share-square:before { content: $fa-var-share-square; } .#{$fa-css-prefix}-compass:before { content: $fa-var-compass; } .#{$fa-css-prefix}-toggle-down:before, .#{$fa-css-prefix}-caret-square-o-down:before { content: $fa-var-caret-square-o-down; } .#{$fa-css-prefix}-toggle-up:before, .#{$fa-css-prefix}-caret-square-o-up:before { content: $fa-var-caret-square-o-up; } .#{$fa-css-prefix}-toggle-right:before, .#{$fa-css-prefix}-caret-square-o-right:before { content: $fa-var-caret-square-o-right; } .#{$fa-css-prefix}-euro:before, .#{$fa-css-prefix}-eur:before { content: $fa-var-eur; } .#{$fa-css-prefix}-gbp:before { content: $fa-var-gbp; } .#{$fa-css-prefix}-dollar:before, .#{$fa-css-prefix}-usd:before { content: $fa-var-usd; } .#{$fa-css-prefix}-rupee:before, .#{$fa-css-prefix}-inr:before { content: $fa-var-inr; } .#{$fa-css-prefix}-cny:before, .#{$fa-css-prefix}-rmb:before, .#{$fa-css-prefix}-yen:before, .#{$fa-css-prefix}-jpy:before { content: $fa-var-jpy; } .#{$fa-css-prefix}-ruble:before, .#{$fa-css-prefix}-rouble:before, .#{$fa-css-prefix}-rub:before { content: $fa-var-rub; } .#{$fa-css-prefix}-won:before, .#{$fa-css-prefix}-krw:before { content: $fa-var-krw; } .#{$fa-css-prefix}-bitcoin:before, .#{$fa-css-prefix}-btc:before { content: $fa-var-btc; } .#{$fa-css-prefix}-file:before { content: $fa-var-file; } .#{$fa-css-prefix}-file-text:before { content: $fa-var-file-text; } .#{$fa-css-prefix}-sort-alpha-asc:before { content: $fa-var-sort-alpha-asc; } .#{$fa-css-prefix}-sort-alpha-desc:before { content: $fa-var-sort-alpha-desc; } .#{$fa-css-prefix}-sort-amount-asc:before { content: $fa-var-sort-amount-asc; } .#{$fa-css-prefix}-sort-amount-desc:before { content: $fa-var-sort-amount-desc; } .#{$fa-css-prefix}-sort-numeric-asc:before { content: $fa-var-sort-numeric-asc; } .#{$fa-css-prefix}-sort-numeric-desc:before { content: $fa-var-sort-numeric-desc; } .#{$fa-css-prefix}-thumbs-up:before { content: $fa-var-thumbs-up; } .#{$fa-css-prefix}-thumbs-down:before { content: $fa-var-thumbs-down; } .#{$fa-css-prefix}-youtube-square:before { content: $fa-var-youtube-square; } .#{$fa-css-prefix}-youtube:before { content: $fa-var-youtube; } .#{$fa-css-prefix}-xing:before { content: $fa-var-xing; } .#{$fa-css-prefix}-xing-square:before { content: $fa-var-xing-square; } .#{$fa-css-prefix}-youtube-play:before { content: $fa-var-youtube-play; } .#{$fa-css-prefix}-dropbox:before { content: $fa-var-dropbox; } .#{$fa-css-prefix}-stack-overflow:before { content: $fa-var-stack-overflow; } .#{$fa-css-prefix}-instagram:before { content: $fa-var-instagram; } .#{$fa-css-prefix}-flickr:before { content: $fa-var-flickr; } .#{$fa-css-prefix}-adn:before { content: $fa-var-adn; } .#{$fa-css-prefix}-bitbucket:before { content: $fa-var-bitbucket; } .#{$fa-css-prefix}-bitbucket-square:before { content: $fa-var-bitbucket-square; } .#{$fa-css-prefix}-tumblr:before { content: $fa-var-tumblr; } .#{$fa-css-prefix}-tumblr-square:before { content: $fa-var-tumblr-square; } .#{$fa-css-prefix}-long-arrow-down:before { content: $fa-var-long-arrow-down; } .#{$fa-css-prefix}-long-arrow-up:before { content: $fa-var-long-arrow-up; } .#{$fa-css-prefix}-long-arrow-left:before { content: $fa-var-long-arrow-left; } .#{$fa-css-prefix}-long-arrow-right:before { content: $fa-var-long-arrow-right; } .#{$fa-css-prefix}-apple:before { content: $fa-var-apple; } .#{$fa-css-prefix}-windows:before { content: $fa-var-windows; } .#{$fa-css-prefix}-android:before { content: $fa-var-android; } .#{$fa-css-prefix}-linux:before { content: $fa-var-linux; } .#{$fa-css-prefix}-dribbble:before { content: $fa-var-dribbble; } .#{$fa-css-prefix}-skype:before { content: $fa-var-skype; } .#{$fa-css-prefix}-foursquare:before { content: $fa-var-foursquare; } .#{$fa-css-prefix}-trello:before { content: $fa-var-trello; } .#{$fa-css-prefix}-female:before { content: $fa-var-female; } .#{$fa-css-prefix}-male:before { content: $fa-var-male; } .#{$fa-css-prefix}-gittip:before, .#{$fa-css-prefix}-gratipay:before { content: $fa-var-gratipay; } .#{$fa-css-prefix}-sun-o:before { content: $fa-var-sun-o; } .#{$fa-css-prefix}-moon-o:before { content: $fa-var-moon-o; } .#{$fa-css-prefix}-archive:before { content: $fa-var-archive; } .#{$fa-css-prefix}-bug:before { content: $fa-var-bug; } .#{$fa-css-prefix}-vk:before { content: $fa-var-vk; } .#{$fa-css-prefix}-weibo:before { content: $fa-var-weibo; } .#{$fa-css-prefix}-renren:before { content: $fa-var-renren; } .#{$fa-css-prefix}-pagelines:before { content: $fa-var-pagelines; } .#{$fa-css-prefix}-stack-exchange:before { content: $fa-var-stack-exchange; } .#{$fa-css-prefix}-arrow-circle-o-right:before { content: $fa-var-arrow-circle-o-right; } .#{$fa-css-prefix}-arrow-circle-o-left:before { content: $fa-var-arrow-circle-o-left; } .#{$fa-css-prefix}-toggle-left:before, .#{$fa-css-prefix}-caret-square-o-left:before { content: $fa-var-caret-square-o-left; } .#{$fa-css-prefix}-dot-circle-o:before { content: $fa-var-dot-circle-o; } .#{$fa-css-prefix}-wheelchair:before { content: $fa-var-wheelchair; } .#{$fa-css-prefix}-vimeo-square:before { content: $fa-var-vimeo-square; } .#{$fa-css-prefix}-turkish-lira:before, .#{$fa-css-prefix}-try:before { content: $fa-var-try; } .#{$fa-css-prefix}-plus-square-o:before { content: $fa-var-plus-square-o; } .#{$fa-css-prefix}-space-shuttle:before { content: $fa-var-space-shuttle; } .#{$fa-css-prefix}-slack:before { content: $fa-var-slack; } .#{$fa-css-prefix}-envelope-square:before { content: $fa-var-envelope-square; } .#{$fa-css-prefix}-wordpress:before { content: $fa-var-wordpress; } .#{$fa-css-prefix}-openid:before { content: $fa-var-openid; } .#{$fa-css-prefix}-institution:before, .#{$fa-css-prefix}-bank:before, .#{$fa-css-prefix}-university:before { content: $fa-var-university; } .#{$fa-css-prefix}-mortar-board:before, .#{$fa-css-prefix}-graduation-cap:before { content: $fa-var-graduation-cap; } .#{$fa-css-prefix}-yahoo:before { content: $fa-var-yahoo; } .#{$fa-css-prefix}-google:before { content: $fa-var-google; } .#{$fa-css-prefix}-reddit:before { content: $fa-var-reddit; } .#{$fa-css-prefix}-reddit-square:before { content: $fa-var-reddit-square; } .#{$fa-css-prefix}-stumbleupon-circle:before { content: $fa-var-stumbleupon-circle; } .#{$fa-css-prefix}-stumbleupon:before { content: $fa-var-stumbleupon; } .#{$fa-css-prefix}-delicious:before { content: $fa-var-delicious; } .#{$fa-css-prefix}-digg:before { content: $fa-var-digg; } .#{$fa-css-prefix}-pied-piper:before { content: $fa-var-pied-piper; } .#{$fa-css-prefix}-pied-piper-alt:before { content: $fa-var-pied-piper-alt; } .#{$fa-css-prefix}-drupal:before { content: $fa-var-drupal; } .#{$fa-css-prefix}-joomla:before { content: $fa-var-joomla; } .#{$fa-css-prefix}-language:before { content: $fa-var-language; } .#{$fa-css-prefix}-fax:before { content: $fa-var-fax; } .#{$fa-css-prefix}-building:before { content: $fa-var-building; } .#{$fa-css-prefix}-child:before { content: $fa-var-child; } .#{$fa-css-prefix}-paw:before { content: $fa-var-paw; } .#{$fa-css-prefix}-spoon:before { content: $fa-var-spoon; } .#{$fa-css-prefix}-cube:before { content: $fa-var-cube; } .#{$fa-css-prefix}-cubes:before { content: $fa-var-cubes; } .#{$fa-css-prefix}-behance:before { content: $fa-var-behance; } .#{$fa-css-prefix}-behance-square:before { content: $fa-var-behance-square; } .#{$fa-css-prefix}-steam:before { content: $fa-var-steam; } .#{$fa-css-prefix}-steam-square:before { content: $fa-var-steam-square; } .#{$fa-css-prefix}-recycle:before { content: $fa-var-recycle; } .#{$fa-css-prefix}-automobile:before, .#{$fa-css-prefix}-car:before { content: $fa-var-car; } .#{$fa-css-prefix}-cab:before, .#{$fa-css-prefix}-taxi:before { content: $fa-var-taxi; } .#{$fa-css-prefix}-tree:before { content: $fa-var-tree; } .#{$fa-css-prefix}-spotify:before { content: $fa-var-spotify; } .#{$fa-css-prefix}-deviantart:before { content: $fa-var-deviantart; } .#{$fa-css-prefix}-soundcloud:before { content: $fa-var-soundcloud; } .#{$fa-css-prefix}-database:before { content: $fa-var-database; } .#{$fa-css-prefix}-file-pdf-o:before { content: $fa-var-file-pdf-o; } .#{$fa-css-prefix}-file-word-o:before { content: $fa-var-file-word-o; } .#{$fa-css-prefix}-file-excel-o:before { content: $fa-var-file-excel-o; } .#{$fa-css-prefix}-file-powerpoint-o:before { content: $fa-var-file-powerpoint-o; } .#{$fa-css-prefix}-file-photo-o:before, .#{$fa-css-prefix}-file-picture-o:before, .#{$fa-css-prefix}-file-image-o:before { content: $fa-var-file-image-o; } .#{$fa-css-prefix}-file-zip-o:before, .#{$fa-css-prefix}-file-archive-o:before { content: $fa-var-file-archive-o; } .#{$fa-css-prefix}-file-sound-o:before, .#{$fa-css-prefix}-file-audio-o:before { content: $fa-var-file-audio-o; } .#{$fa-css-prefix}-file-movie-o:before, .#{$fa-css-prefix}-file-video-o:before { content: $fa-var-file-video-o; } .#{$fa-css-prefix}-file-code-o:before { content: $fa-var-file-code-o; } .#{$fa-css-prefix}-vine:before { content: $fa-var-vine; } .#{$fa-css-prefix}-codepen:before { content: $fa-var-codepen; } .#{$fa-css-prefix}-jsfiddle:before { content: $fa-var-jsfiddle; } .#{$fa-css-prefix}-life-bouy:before, .#{$fa-css-prefix}-life-buoy:before, .#{$fa-css-prefix}-life-saver:before, .#{$fa-css-prefix}-support:before, .#{$fa-css-prefix}-life-ring:before { content: $fa-var-life-ring; } .#{$fa-css-prefix}-circle-o-notch:before { content: $fa-var-circle-o-notch; } .#{$fa-css-prefix}-ra:before, .#{$fa-css-prefix}-rebel:before { content: $fa-var-rebel; } .#{$fa-css-prefix}-ge:before, .#{$fa-css-prefix}-empire:before { content: $fa-var-empire; } .#{$fa-css-prefix}-git-square:before { content: $fa-var-git-square; } .#{$fa-css-prefix}-git:before { content: $fa-var-git; } .#{$fa-css-prefix}-hacker-news:before { content: $fa-var-hacker-news; } .#{$fa-css-prefix}-tencent-weibo:before { content: $fa-var-tencent-weibo; } .#{$fa-css-prefix}-qq:before { content: $fa-var-qq; } .#{$fa-css-prefix}-wechat:before, .#{$fa-css-prefix}-weixin:before { content: $fa-var-weixin; } .#{$fa-css-prefix}-send:before, .#{$fa-css-prefix}-paper-plane:before { content: $fa-var-paper-plane; } .#{$fa-css-prefix}-send-o:before, .#{$fa-css-prefix}-paper-plane-o:before { content: $fa-var-paper-plane-o; } .#{$fa-css-prefix}-history:before { content: $fa-var-history; } .#{$fa-css-prefix}-genderless:before, .#{$fa-css-prefix}-circle-thin:before { content: $fa-var-circle-thin; } .#{$fa-css-prefix}-header:before { content: $fa-var-header; } .#{$fa-css-prefix}-paragraph:before { content: $fa-var-paragraph; } .#{$fa-css-prefix}-sliders:before { content: $fa-var-sliders; } .#{$fa-css-prefix}-share-alt:before { content: $fa-var-share-alt; } .#{$fa-css-prefix}-share-alt-square:before { content: $fa-var-share-alt-square; } .#{$fa-css-prefix}-bomb:before { content: $fa-var-bomb; } .#{$fa-css-prefix}-soccer-ball-o:before, .#{$fa-css-prefix}-futbol-o:before { content: $fa-var-futbol-o; } .#{$fa-css-prefix}-tty:before { content: $fa-var-tty; } .#{$fa-css-prefix}-binoculars:before { content: $fa-var-binoculars; } .#{$fa-css-prefix}-plug:before { content: $fa-var-plug; } .#{$fa-css-prefix}-slideshare:before { content: $fa-var-slideshare; } .#{$fa-css-prefix}-twitch:before { content: $fa-var-twitch; } .#{$fa-css-prefix}-yelp:before { content: $fa-var-yelp; } .#{$fa-css-prefix}-newspaper-o:before { content: $fa-var-newspaper-o; } .#{$fa-css-prefix}-wifi:before { content: $fa-var-wifi; } .#{$fa-css-prefix}-calculator:before { content: $fa-var-calculator; } .#{$fa-css-prefix}-paypal:before { content: $fa-var-paypal; } .#{$fa-css-prefix}-google-wallet:before { content: $fa-var-google-wallet; } .#{$fa-css-prefix}-cc-visa:before { content: $fa-var-cc-visa; } .#{$fa-css-prefix}-cc-mastercard:before { content: $fa-var-cc-mastercard; } .#{$fa-css-prefix}-cc-discover:before { content: $fa-var-cc-discover; } .#{$fa-css-prefix}-cc-amex:before { content: $fa-var-cc-amex; } .#{$fa-css-prefix}-cc-paypal:before { content: $fa-var-cc-paypal; } .#{$fa-css-prefix}-cc-stripe:before { content: $fa-var-cc-stripe; } .#{$fa-css-prefix}-bell-slash:before { content: $fa-var-bell-slash; } .#{$fa-css-prefix}-bell-slash-o:before { content: $fa-var-bell-slash-o; } .#{$fa-css-prefix}-trash:before { content: $fa-var-trash; } .#{$fa-css-prefix}-copyright:before { content: $fa-var-copyright; } .#{$fa-css-prefix}-at:before { content: $fa-var-at; } .#{$fa-css-prefix}-eyedropper:before { content: $fa-var-eyedropper; } .#{$fa-css-prefix}-paint-brush:before { content: $fa-var-paint-brush; } .#{$fa-css-prefix}-birthday-cake:before { content: $fa-var-birthday-cake; } .#{$fa-css-prefix}-area-chart:before { content: $fa-var-area-chart; } .#{$fa-css-prefix}-pie-chart:before { content: $fa-var-pie-chart; } .#{$fa-css-prefix}-line-chart:before { content: $fa-var-line-chart; } .#{$fa-css-prefix}-lastfm:before { content: $fa-var-lastfm; } .#{$fa-css-prefix}-lastfm-square:before { content: $fa-var-lastfm-square; } .#{$fa-css-prefix}-toggle-off:before { content: $fa-var-toggle-off; } .#{$fa-css-prefix}-toggle-on:before { content: $fa-var-toggle-on; } .#{$fa-css-prefix}-bicycle:before { content: $fa-var-bicycle; } .#{$fa-css-prefix}-bus:before { content: $fa-var-bus; } .#{$fa-css-prefix}-ioxhost:before { content: $fa-var-ioxhost; } .#{$fa-css-prefix}-angellist:before { content: $fa-var-angellist; } .#{$fa-css-prefix}-cc:before { content: $fa-var-cc; } .#{$fa-css-prefix}-shekel:before, .#{$fa-css-prefix}-sheqel:before, .#{$fa-css-prefix}-ils:before { content: $fa-var-ils; } .#{$fa-css-prefix}-meanpath:before { content: $fa-var-meanpath; } .#{$fa-css-prefix}-buysellads:before { content: $fa-var-buysellads; } .#{$fa-css-prefix}-connectdevelop:before { content: $fa-var-connectdevelop; } .#{$fa-css-prefix}-dashcube:before { content: $fa-var-dashcube; } .#{$fa-css-prefix}-forumbee:before { content: $fa-var-forumbee; } .#{$fa-css-prefix}-leanpub:before { content: $fa-var-leanpub; } .#{$fa-css-prefix}-sellsy:before { content: $fa-var-sellsy; } .#{$fa-css-prefix}-shirtsinbulk:before { content: $fa-var-shirtsinbulk; } .#{$fa-css-prefix}-simplybuilt:before { content: $fa-var-simplybuilt; } .#{$fa-css-prefix}-skyatlas:before { content: $fa-var-skyatlas; } .#{$fa-css-prefix}-cart-plus:before { content: $fa-var-cart-plus; } .#{$fa-css-prefix}-cart-arrow-down:before { content: $fa-var-cart-arrow-down; } .#{$fa-css-prefix}-diamond:before { content: $fa-var-diamond; } .#{$fa-css-prefix}-ship:before { content: $fa-var-ship; } .#{$fa-css-prefix}-user-secret:before { content: $fa-var-user-secret; } .#{$fa-css-prefix}-motorcycle:before { content: $fa-var-motorcycle; } .#{$fa-css-prefix}-street-view:before { content: $fa-var-street-view; } .#{$fa-css-prefix}-heartbeat:before { content: $fa-var-heartbeat; } .#{$fa-css-prefix}-venus:before { content: $fa-var-venus; } .#{$fa-css-prefix}-mars:before { content: $fa-var-mars; } .#{$fa-css-prefix}-mercury:before { content: $fa-var-mercury; } .#{$fa-css-prefix}-transgender:before { content: $fa-var-transgender; } .#{$fa-css-prefix}-transgender-alt:before { content: $fa-var-transgender-alt; } .#{$fa-css-prefix}-venus-double:before { content: $fa-var-venus-double; } .#{$fa-css-prefix}-mars-double:before { content: $fa-var-mars-double; } .#{$fa-css-prefix}-venus-mars:before { content: $fa-var-venus-mars; } .#{$fa-css-prefix}-mars-stroke:before { content: $fa-var-mars-stroke; } .#{$fa-css-prefix}-mars-stroke-v:before { content: $fa-var-mars-stroke-v; } .#{$fa-css-prefix}-mars-stroke-h:before { content: $fa-var-mars-stroke-h; } .#{$fa-css-prefix}-neuter:before { content: $fa-var-neuter; } .#{$fa-css-prefix}-facebook-official:before { content: $fa-var-facebook-official; } .#{$fa-css-prefix}-pinterest-p:before { content: $fa-var-pinterest-p; } .#{$fa-css-prefix}-whatsapp:before { content: $fa-var-whatsapp; } .#{$fa-css-prefix}-server:before { content: $fa-var-server; } .#{$fa-css-prefix}-user-plus:before { content: $fa-var-user-plus; } .#{$fa-css-prefix}-user-times:before { content: $fa-var-user-times; } .#{$fa-css-prefix}-hotel:before, .#{$fa-css-prefix}-bed:before { content: $fa-var-bed; } .#{$fa-css-prefix}-viacoin:before { content: $fa-var-viacoin; } .#{$fa-css-prefix}-train:before { content: $fa-var-train; } .#{$fa-css-prefix}-subway:before { content: $fa-var-subway; } .#{$fa-css-prefix}-medium:before { content: $fa-var-medium; } ================================================ FILE: website/font-awesome/scss/_larger.scss ================================================ // Icon Sizes // ------------------------- /* makes the font 33% larger relative to the icon container */ .#{$fa-css-prefix}-lg { font-size: (4em / 3); line-height: (3em / 4); vertical-align: -15%; } .#{$fa-css-prefix}-2x { font-size: 2em; } .#{$fa-css-prefix}-3x { font-size: 3em; } .#{$fa-css-prefix}-4x { font-size: 4em; } .#{$fa-css-prefix}-5x { font-size: 5em; } ================================================ FILE: website/font-awesome/scss/_list.scss ================================================ // List Icons // ------------------------- .#{$fa-css-prefix}-ul { padding-left: 0; margin-left: $fa-li-width; list-style-type: none; > li { position: relative; } } .#{$fa-css-prefix}-li { position: absolute; left: -$fa-li-width; width: $fa-li-width; top: (2em / 14); text-align: center; &.#{$fa-css-prefix}-lg { left: -$fa-li-width + (4em / 14); } } ================================================ FILE: website/font-awesome/scss/_mixins.scss ================================================ // Mixins // -------------------------- @mixin fa-icon() { display: inline-block; font: normal normal normal #{$fa-font-size-base}/1 FontAwesome; // shortening font declaration font-size: inherit; // can't have font-size inherit on line above, so need to override text-rendering: auto; // optimizelegibility throws things off #1094 -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; transform: translate(0, 0); // ensures no half-pixel rendering in firefox } @mixin fa-icon-rotate($degrees, $rotation) { filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=#{$rotation}); -webkit-transform: rotate($degrees); -ms-transform: rotate($degrees); transform: rotate($degrees); } @mixin fa-icon-flip($horiz, $vert, $rotation) { filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=#{$rotation}); -webkit-transform: scale($horiz, $vert); -ms-transform: scale($horiz, $vert); transform: scale($horiz, $vert); } ================================================ FILE: website/font-awesome/scss/_path.scss ================================================ /* FONT PATH * -------------------------- */ @font-face { font-family: 'FontAwesome'; src: url('#{$fa-font-path}/fontawesome-webfont.eot?v=#{$fa-version}'); src: url('#{$fa-font-path}/fontawesome-webfont.eot?#iefix&v=#{$fa-version}') format('embedded-opentype'), url('#{$fa-font-path}/fontawesome-webfont.woff2?v=#{$fa-version}') format('woff2'), url('#{$fa-font-path}/fontawesome-webfont.woff?v=#{$fa-version}') format('woff'), url('#{$fa-font-path}/fontawesome-webfont.ttf?v=#{$fa-version}') format('truetype'), url('#{$fa-font-path}/fontawesome-webfont.svg?v=#{$fa-version}#fontawesomeregular') format('svg'); // src: url('#{$fa-font-path}/FontAwesome.otf') format('opentype'); // used when developing fonts font-weight: normal; font-style: normal; } ================================================ FILE: website/font-awesome/scss/_rotated-flipped.scss ================================================ // Rotated & Flipped Icons // ------------------------- .#{$fa-css-prefix}-rotate-90 { @include fa-icon-rotate(90deg, 1); } .#{$fa-css-prefix}-rotate-180 { @include fa-icon-rotate(180deg, 2); } .#{$fa-css-prefix}-rotate-270 { @include fa-icon-rotate(270deg, 3); } .#{$fa-css-prefix}-flip-horizontal { @include fa-icon-flip(-1, 1, 0); } .#{$fa-css-prefix}-flip-vertical { @include fa-icon-flip(1, -1, 2); } // Hook for IE8-9 // ------------------------- :root .#{$fa-css-prefix}-rotate-90, :root .#{$fa-css-prefix}-rotate-180, :root .#{$fa-css-prefix}-rotate-270, :root .#{$fa-css-prefix}-flip-horizontal, :root .#{$fa-css-prefix}-flip-vertical { filter: none; } ================================================ FILE: website/font-awesome/scss/_stacked.scss ================================================ // Stacked Icons // ------------------------- .#{$fa-css-prefix}-stack { position: relative; display: inline-block; width: 2em; height: 2em; line-height: 2em; vertical-align: middle; } .#{$fa-css-prefix}-stack-1x, .#{$fa-css-prefix}-stack-2x { position: absolute; left: 0; width: 100%; text-align: center; } .#{$fa-css-prefix}-stack-1x { line-height: inherit; } .#{$fa-css-prefix}-stack-2x { font-size: 2em; } .#{$fa-css-prefix}-inverse { color: $fa-inverse; } ================================================ FILE: website/font-awesome/scss/_variables.scss ================================================ // Variables // -------------------------- $fa-font-path: "../fonts" !default; $fa-font-size-base: 14px !default; //$fa-font-path: "//netdna.bootstrapcdn.com/font-awesome/4.3.0/fonts" !default; // for referencing Bootstrap CDN font files directly $fa-css-prefix: fa !default; $fa-version: "4.3.0" !default; $fa-border-color: #eee !default; $fa-inverse: #fff !default; $fa-li-width: (30em / 14) !default; $fa-var-adjust: "\f042"; $fa-var-adn: "\f170"; $fa-var-align-center: "\f037"; $fa-var-align-justify: "\f039"; $fa-var-align-left: "\f036"; $fa-var-align-right: "\f038"; $fa-var-ambulance: "\f0f9"; $fa-var-anchor: "\f13d"; $fa-var-android: "\f17b"; $fa-var-angellist: "\f209"; $fa-var-angle-double-down: "\f103"; $fa-var-angle-double-left: "\f100"; $fa-var-angle-double-right: "\f101"; $fa-var-angle-double-up: "\f102"; $fa-var-angle-down: "\f107"; $fa-var-angle-left: "\f104"; $fa-var-angle-right: "\f105"; $fa-var-angle-up: "\f106"; $fa-var-apple: "\f179"; $fa-var-archive: "\f187"; $fa-var-area-chart: "\f1fe"; $fa-var-arrow-circle-down: "\f0ab"; $fa-var-arrow-circle-left: "\f0a8"; $fa-var-arrow-circle-o-down: "\f01a"; $fa-var-arrow-circle-o-left: "\f190"; $fa-var-arrow-circle-o-right: "\f18e"; $fa-var-arrow-circle-o-up: "\f01b"; $fa-var-arrow-circle-right: "\f0a9"; $fa-var-arrow-circle-up: "\f0aa"; $fa-var-arrow-down: "\f063"; $fa-var-arrow-left: "\f060"; $fa-var-arrow-right: "\f061"; $fa-var-arrow-up: "\f062"; $fa-var-arrows: "\f047"; $fa-var-arrows-alt: "\f0b2"; $fa-var-arrows-h: "\f07e"; $fa-var-arrows-v: "\f07d"; $fa-var-asterisk: "\f069"; $fa-var-at: "\f1fa"; $fa-var-automobile: "\f1b9"; $fa-var-backward: "\f04a"; $fa-var-ban: "\f05e"; $fa-var-bank: "\f19c"; $fa-var-bar-chart: "\f080"; $fa-var-bar-chart-o: "\f080"; $fa-var-barcode: "\f02a"; $fa-var-bars: "\f0c9"; $fa-var-bed: "\f236"; $fa-var-beer: "\f0fc"; $fa-var-behance: "\f1b4"; $fa-var-behance-square: "\f1b5"; $fa-var-bell: "\f0f3"; $fa-var-bell-o: "\f0a2"; $fa-var-bell-slash: "\f1f6"; $fa-var-bell-slash-o: "\f1f7"; $fa-var-bicycle: "\f206"; $fa-var-binoculars: "\f1e5"; $fa-var-birthday-cake: "\f1fd"; $fa-var-bitbucket: "\f171"; $fa-var-bitbucket-square: "\f172"; $fa-var-bitcoin: "\f15a"; $fa-var-bold: "\f032"; $fa-var-bolt: "\f0e7"; $fa-var-bomb: "\f1e2"; $fa-var-book: "\f02d"; $fa-var-bookmark: "\f02e"; $fa-var-bookmark-o: "\f097"; $fa-var-briefcase: "\f0b1"; $fa-var-btc: "\f15a"; $fa-var-bug: "\f188"; $fa-var-building: "\f1ad"; $fa-var-building-o: "\f0f7"; $fa-var-bullhorn: "\f0a1"; $fa-var-bullseye: "\f140"; $fa-var-bus: "\f207"; $fa-var-buysellads: "\f20d"; $fa-var-cab: "\f1ba"; $fa-var-calculator: "\f1ec"; $fa-var-calendar: "\f073"; $fa-var-calendar-o: "\f133"; $fa-var-camera: "\f030"; $fa-var-camera-retro: "\f083"; $fa-var-car: "\f1b9"; $fa-var-caret-down: "\f0d7"; $fa-var-caret-left: "\f0d9"; $fa-var-caret-right: "\f0da"; $fa-var-caret-square-o-down: "\f150"; $fa-var-caret-square-o-left: "\f191"; $fa-var-caret-square-o-right: "\f152"; $fa-var-caret-square-o-up: "\f151"; $fa-var-caret-up: "\f0d8"; $fa-var-cart-arrow-down: "\f218"; $fa-var-cart-plus: "\f217"; $fa-var-cc: "\f20a"; $fa-var-cc-amex: "\f1f3"; $fa-var-cc-discover: "\f1f2"; $fa-var-cc-mastercard: "\f1f1"; $fa-var-cc-paypal: "\f1f4"; $fa-var-cc-stripe: "\f1f5"; $fa-var-cc-visa: "\f1f0"; $fa-var-certificate: "\f0a3"; $fa-var-chain: "\f0c1"; $fa-var-chain-broken: "\f127"; $fa-var-check: "\f00c"; $fa-var-check-circle: "\f058"; $fa-var-check-circle-o: "\f05d"; $fa-var-check-square: "\f14a"; $fa-var-check-square-o: "\f046"; $fa-var-chevron-circle-down: "\f13a"; $fa-var-chevron-circle-left: "\f137"; $fa-var-chevron-circle-right: "\f138"; $fa-var-chevron-circle-up: "\f139"; $fa-var-chevron-down: "\f078"; $fa-var-chevron-left: "\f053"; $fa-var-chevron-right: "\f054"; $fa-var-chevron-up: "\f077"; $fa-var-child: "\f1ae"; $fa-var-circle: "\f111"; $fa-var-circle-o: "\f10c"; $fa-var-circle-o-notch: "\f1ce"; $fa-var-circle-thin: "\f1db"; $fa-var-clipboard: "\f0ea"; $fa-var-clock-o: "\f017"; $fa-var-close: "\f00d"; $fa-var-cloud: "\f0c2"; $fa-var-cloud-download: "\f0ed"; $fa-var-cloud-upload: "\f0ee"; $fa-var-cny: "\f157"; $fa-var-code: "\f121"; $fa-var-code-fork: "\f126"; $fa-var-codepen: "\f1cb"; $fa-var-coffee: "\f0f4"; $fa-var-cog: "\f013"; $fa-var-cogs: "\f085"; $fa-var-columns: "\f0db"; $fa-var-comment: "\f075"; $fa-var-comment-o: "\f0e5"; $fa-var-comments: "\f086"; $fa-var-comments-o: "\f0e6"; $fa-var-compass: "\f14e"; $fa-var-compress: "\f066"; $fa-var-connectdevelop: "\f20e"; $fa-var-copy: "\f0c5"; $fa-var-copyright: "\f1f9"; $fa-var-credit-card: "\f09d"; $fa-var-crop: "\f125"; $fa-var-crosshairs: "\f05b"; $fa-var-css3: "\f13c"; $fa-var-cube: "\f1b2"; $fa-var-cubes: "\f1b3"; $fa-var-cut: "\f0c4"; $fa-var-cutlery: "\f0f5"; $fa-var-dashboard: "\f0e4"; $fa-var-dashcube: "\f210"; $fa-var-database: "\f1c0"; $fa-var-dedent: "\f03b"; $fa-var-delicious: "\f1a5"; $fa-var-desktop: "\f108"; $fa-var-deviantart: "\f1bd"; $fa-var-diamond: "\f219"; $fa-var-digg: "\f1a6"; $fa-var-dollar: "\f155"; $fa-var-dot-circle-o: "\f192"; $fa-var-download: "\f019"; $fa-var-dribbble: "\f17d"; $fa-var-dropbox: "\f16b"; $fa-var-drupal: "\f1a9"; $fa-var-edit: "\f044"; $fa-var-eject: "\f052"; $fa-var-ellipsis-h: "\f141"; $fa-var-ellipsis-v: "\f142"; $fa-var-empire: "\f1d1"; $fa-var-envelope: "\f0e0"; $fa-var-envelope-o: "\f003"; $fa-var-envelope-square: "\f199"; $fa-var-eraser: "\f12d"; $fa-var-eur: "\f153"; $fa-var-euro: "\f153"; $fa-var-exchange: "\f0ec"; $fa-var-exclamation: "\f12a"; $fa-var-exclamation-circle: "\f06a"; $fa-var-exclamation-triangle: "\f071"; $fa-var-expand: "\f065"; $fa-var-external-link: "\f08e"; $fa-var-external-link-square: "\f14c"; $fa-var-eye: "\f06e"; $fa-var-eye-slash: "\f070"; $fa-var-eyedropper: "\f1fb"; $fa-var-facebook: "\f09a"; $fa-var-facebook-f: "\f09a"; $fa-var-facebook-official: "\f230"; $fa-var-facebook-square: "\f082"; $fa-var-fast-backward: "\f049"; $fa-var-fast-forward: "\f050"; $fa-var-fax: "\f1ac"; $fa-var-female: "\f182"; $fa-var-fighter-jet: "\f0fb"; $fa-var-file: "\f15b"; $fa-var-file-archive-o: "\f1c6"; $fa-var-file-audio-o: "\f1c7"; $fa-var-file-code-o: "\f1c9"; $fa-var-file-excel-o: "\f1c3"; $fa-var-file-image-o: "\f1c5"; $fa-var-file-movie-o: "\f1c8"; $fa-var-file-o: "\f016"; $fa-var-file-pdf-o: "\f1c1"; $fa-var-file-photo-o: "\f1c5"; $fa-var-file-picture-o: "\f1c5"; $fa-var-file-powerpoint-o: "\f1c4"; $fa-var-file-sound-o: "\f1c7"; $fa-var-file-text: "\f15c"; $fa-var-file-text-o: "\f0f6"; $fa-var-file-video-o: "\f1c8"; $fa-var-file-word-o: "\f1c2"; $fa-var-file-zip-o: "\f1c6"; $fa-var-files-o: "\f0c5"; $fa-var-film: "\f008"; $fa-var-filter: "\f0b0"; $fa-var-fire: "\f06d"; $fa-var-fire-extinguisher: "\f134"; $fa-var-flag: "\f024"; $fa-var-flag-checkered: "\f11e"; $fa-var-flag-o: "\f11d"; $fa-var-flash: "\f0e7"; $fa-var-flask: "\f0c3"; $fa-var-flickr: "\f16e"; $fa-var-floppy-o: "\f0c7"; $fa-var-folder: "\f07b"; $fa-var-folder-o: "\f114"; $fa-var-folder-open: "\f07c"; $fa-var-folder-open-o: "\f115"; $fa-var-font: "\f031"; $fa-var-forumbee: "\f211"; $fa-var-forward: "\f04e"; $fa-var-foursquare: "\f180"; $fa-var-frown-o: "\f119"; $fa-var-futbol-o: "\f1e3"; $fa-var-gamepad: "\f11b"; $fa-var-gavel: "\f0e3"; $fa-var-gbp: "\f154"; $fa-var-ge: "\f1d1"; $fa-var-gear: "\f013"; $fa-var-gears: "\f085"; $fa-var-genderless: "\f1db"; $fa-var-gift: "\f06b"; $fa-var-git: "\f1d3"; $fa-var-git-square: "\f1d2"; $fa-var-github: "\f09b"; $fa-var-github-alt: "\f113"; $fa-var-github-square: "\f092"; $fa-var-gittip: "\f184"; $fa-var-glass: "\f000"; $fa-var-globe: "\f0ac"; $fa-var-google: "\f1a0"; $fa-var-google-plus: "\f0d5"; $fa-var-google-plus-square: "\f0d4"; $fa-var-google-wallet: "\f1ee"; $fa-var-graduation-cap: "\f19d"; $fa-var-gratipay: "\f184"; $fa-var-group: "\f0c0"; $fa-var-h-square: "\f0fd"; $fa-var-hacker-news: "\f1d4"; $fa-var-hand-o-down: "\f0a7"; $fa-var-hand-o-left: "\f0a5"; $fa-var-hand-o-right: "\f0a4"; $fa-var-hand-o-up: "\f0a6"; $fa-var-hdd-o: "\f0a0"; $fa-var-header: "\f1dc"; $fa-var-headphones: "\f025"; $fa-var-heart: "\f004"; $fa-var-heart-o: "\f08a"; $fa-var-heartbeat: "\f21e"; $fa-var-history: "\f1da"; $fa-var-home: "\f015"; $fa-var-hospital-o: "\f0f8"; $fa-var-hotel: "\f236"; $fa-var-html5: "\f13b"; $fa-var-ils: "\f20b"; $fa-var-image: "\f03e"; $fa-var-inbox: "\f01c"; $fa-var-indent: "\f03c"; $fa-var-info: "\f129"; $fa-var-info-circle: "\f05a"; $fa-var-inr: "\f156"; $fa-var-instagram: "\f16d"; $fa-var-institution: "\f19c"; $fa-var-ioxhost: "\f208"; $fa-var-italic: "\f033"; $fa-var-joomla: "\f1aa"; $fa-var-jpy: "\f157"; $fa-var-jsfiddle: "\f1cc"; $fa-var-key: "\f084"; $fa-var-keyboard-o: "\f11c"; $fa-var-krw: "\f159"; $fa-var-language: "\f1ab"; $fa-var-laptop: "\f109"; $fa-var-lastfm: "\f202"; $fa-var-lastfm-square: "\f203"; $fa-var-leaf: "\f06c"; $fa-var-leanpub: "\f212"; $fa-var-legal: "\f0e3"; $fa-var-lemon-o: "\f094"; $fa-var-level-down: "\f149"; $fa-var-level-up: "\f148"; $fa-var-life-bouy: "\f1cd"; $fa-var-life-buoy: "\f1cd"; $fa-var-life-ring: "\f1cd"; $fa-var-life-saver: "\f1cd"; $fa-var-lightbulb-o: "\f0eb"; $fa-var-line-chart: "\f201"; $fa-var-link: "\f0c1"; $fa-var-linkedin: "\f0e1"; $fa-var-linkedin-square: "\f08c"; $fa-var-linux: "\f17c"; $fa-var-list: "\f03a"; $fa-var-list-alt: "\f022"; $fa-var-list-ol: "\f0cb"; $fa-var-list-ul: "\f0ca"; $fa-var-location-arrow: "\f124"; $fa-var-lock: "\f023"; $fa-var-long-arrow-down: "\f175"; $fa-var-long-arrow-left: "\f177"; $fa-var-long-arrow-right: "\f178"; $fa-var-long-arrow-up: "\f176"; $fa-var-magic: "\f0d0"; $fa-var-magnet: "\f076"; $fa-var-mail-forward: "\f064"; $fa-var-mail-reply: "\f112"; $fa-var-mail-reply-all: "\f122"; $fa-var-male: "\f183"; $fa-var-map-marker: "\f041"; $fa-var-mars: "\f222"; $fa-var-mars-double: "\f227"; $fa-var-mars-stroke: "\f229"; $fa-var-mars-stroke-h: "\f22b"; $fa-var-mars-stroke-v: "\f22a"; $fa-var-maxcdn: "\f136"; $fa-var-meanpath: "\f20c"; $fa-var-medium: "\f23a"; $fa-var-medkit: "\f0fa"; $fa-var-meh-o: "\f11a"; $fa-var-mercury: "\f223"; $fa-var-microphone: "\f130"; $fa-var-microphone-slash: "\f131"; $fa-var-minus: "\f068"; $fa-var-minus-circle: "\f056"; $fa-var-minus-square: "\f146"; $fa-var-minus-square-o: "\f147"; $fa-var-mobile: "\f10b"; $fa-var-mobile-phone: "\f10b"; $fa-var-money: "\f0d6"; $fa-var-moon-o: "\f186"; $fa-var-mortar-board: "\f19d"; $fa-var-motorcycle: "\f21c"; $fa-var-music: "\f001"; $fa-var-navicon: "\f0c9"; $fa-var-neuter: "\f22c"; $fa-var-newspaper-o: "\f1ea"; $fa-var-openid: "\f19b"; $fa-var-outdent: "\f03b"; $fa-var-pagelines: "\f18c"; $fa-var-paint-brush: "\f1fc"; $fa-var-paper-plane: "\f1d8"; $fa-var-paper-plane-o: "\f1d9"; $fa-var-paperclip: "\f0c6"; $fa-var-paragraph: "\f1dd"; $fa-var-paste: "\f0ea"; $fa-var-pause: "\f04c"; $fa-var-paw: "\f1b0"; $fa-var-paypal: "\f1ed"; $fa-var-pencil: "\f040"; $fa-var-pencil-square: "\f14b"; $fa-var-pencil-square-o: "\f044"; $fa-var-phone: "\f095"; $fa-var-phone-square: "\f098"; $fa-var-photo: "\f03e"; $fa-var-picture-o: "\f03e"; $fa-var-pie-chart: "\f200"; $fa-var-pied-piper: "\f1a7"; $fa-var-pied-piper-alt: "\f1a8"; $fa-var-pinterest: "\f0d2"; $fa-var-pinterest-p: "\f231"; $fa-var-pinterest-square: "\f0d3"; $fa-var-plane: "\f072"; $fa-var-play: "\f04b"; $fa-var-play-circle: "\f144"; $fa-var-play-circle-o: "\f01d"; $fa-var-plug: "\f1e6"; $fa-var-plus: "\f067"; $fa-var-plus-circle: "\f055"; $fa-var-plus-square: "\f0fe"; $fa-var-plus-square-o: "\f196"; $fa-var-power-off: "\f011"; $fa-var-print: "\f02f"; $fa-var-puzzle-piece: "\f12e"; $fa-var-qq: "\f1d6"; $fa-var-qrcode: "\f029"; $fa-var-question: "\f128"; $fa-var-question-circle: "\f059"; $fa-var-quote-left: "\f10d"; $fa-var-quote-right: "\f10e"; $fa-var-ra: "\f1d0"; $fa-var-random: "\f074"; $fa-var-rebel: "\f1d0"; $fa-var-recycle: "\f1b8"; $fa-var-reddit: "\f1a1"; $fa-var-reddit-square: "\f1a2"; $fa-var-refresh: "\f021"; $fa-var-remove: "\f00d"; $fa-var-renren: "\f18b"; $fa-var-reorder: "\f0c9"; $fa-var-repeat: "\f01e"; $fa-var-reply: "\f112"; $fa-var-reply-all: "\f122"; $fa-var-retweet: "\f079"; $fa-var-rmb: "\f157"; $fa-var-road: "\f018"; $fa-var-rocket: "\f135"; $fa-var-rotate-left: "\f0e2"; $fa-var-rotate-right: "\f01e"; $fa-var-rouble: "\f158"; $fa-var-rss: "\f09e"; $fa-var-rss-square: "\f143"; $fa-var-rub: "\f158"; $fa-var-ruble: "\f158"; $fa-var-rupee: "\f156"; $fa-var-save: "\f0c7"; $fa-var-scissors: "\f0c4"; $fa-var-search: "\f002"; $fa-var-search-minus: "\f010"; $fa-var-search-plus: "\f00e"; $fa-var-sellsy: "\f213"; $fa-var-send: "\f1d8"; $fa-var-send-o: "\f1d9"; $fa-var-server: "\f233"; $fa-var-share: "\f064"; $fa-var-share-alt: "\f1e0"; $fa-var-share-alt-square: "\f1e1"; $fa-var-share-square: "\f14d"; $fa-var-share-square-o: "\f045"; $fa-var-shekel: "\f20b"; $fa-var-sheqel: "\f20b"; $fa-var-shield: "\f132"; $fa-var-ship: "\f21a"; $fa-var-shirtsinbulk: "\f214"; $fa-var-shopping-cart: "\f07a"; $fa-var-sign-in: "\f090"; $fa-var-sign-out: "\f08b"; $fa-var-signal: "\f012"; $fa-var-simplybuilt: "\f215"; $fa-var-sitemap: "\f0e8"; $fa-var-skyatlas: "\f216"; $fa-var-skype: "\f17e"; $fa-var-slack: "\f198"; $fa-var-sliders: "\f1de"; $fa-var-slideshare: "\f1e7"; $fa-var-smile-o: "\f118"; $fa-var-soccer-ball-o: "\f1e3"; $fa-var-sort: "\f0dc"; $fa-var-sort-alpha-asc: "\f15d"; $fa-var-sort-alpha-desc: "\f15e"; $fa-var-sort-amount-asc: "\f160"; $fa-var-sort-amount-desc: "\f161"; $fa-var-sort-asc: "\f0de"; $fa-var-sort-desc: "\f0dd"; $fa-var-sort-down: "\f0dd"; $fa-var-sort-numeric-asc: "\f162"; $fa-var-sort-numeric-desc: "\f163"; $fa-var-sort-up: "\f0de"; $fa-var-soundcloud: "\f1be"; $fa-var-space-shuttle: "\f197"; $fa-var-spinner: "\f110"; $fa-var-spoon: "\f1b1"; $fa-var-spotify: "\f1bc"; $fa-var-square: "\f0c8"; $fa-var-square-o: "\f096"; $fa-var-stack-exchange: "\f18d"; $fa-var-stack-overflow: "\f16c"; $fa-var-star: "\f005"; $fa-var-star-half: "\f089"; $fa-var-star-half-empty: "\f123"; $fa-var-star-half-full: "\f123"; $fa-var-star-half-o: "\f123"; $fa-var-star-o: "\f006"; $fa-var-steam: "\f1b6"; $fa-var-steam-square: "\f1b7"; $fa-var-step-backward: "\f048"; $fa-var-step-forward: "\f051"; $fa-var-stethoscope: "\f0f1"; $fa-var-stop: "\f04d"; $fa-var-street-view: "\f21d"; $fa-var-strikethrough: "\f0cc"; $fa-var-stumbleupon: "\f1a4"; $fa-var-stumbleupon-circle: "\f1a3"; $fa-var-subscript: "\f12c"; $fa-var-subway: "\f239"; $fa-var-suitcase: "\f0f2"; $fa-var-sun-o: "\f185"; $fa-var-superscript: "\f12b"; $fa-var-support: "\f1cd"; $fa-var-table: "\f0ce"; $fa-var-tablet: "\f10a"; $fa-var-tachometer: "\f0e4"; $fa-var-tag: "\f02b"; $fa-var-tags: "\f02c"; $fa-var-tasks: "\f0ae"; $fa-var-taxi: "\f1ba"; $fa-var-tencent-weibo: "\f1d5"; $fa-var-terminal: "\f120"; $fa-var-text-height: "\f034"; $fa-var-text-width: "\f035"; $fa-var-th: "\f00a"; $fa-var-th-large: "\f009"; $fa-var-th-list: "\f00b"; $fa-var-thumb-tack: "\f08d"; $fa-var-thumbs-down: "\f165"; $fa-var-thumbs-o-down: "\f088"; $fa-var-thumbs-o-up: "\f087"; $fa-var-thumbs-up: "\f164"; $fa-var-ticket: "\f145"; $fa-var-times: "\f00d"; $fa-var-times-circle: "\f057"; $fa-var-times-circle-o: "\f05c"; $fa-var-tint: "\f043"; $fa-var-toggle-down: "\f150"; $fa-var-toggle-left: "\f191"; $fa-var-toggle-off: "\f204"; $fa-var-toggle-on: "\f205"; $fa-var-toggle-right: "\f152"; $fa-var-toggle-up: "\f151"; $fa-var-train: "\f238"; $fa-var-transgender: "\f224"; $fa-var-transgender-alt: "\f225"; $fa-var-trash: "\f1f8"; $fa-var-trash-o: "\f014"; $fa-var-tree: "\f1bb"; $fa-var-trello: "\f181"; $fa-var-trophy: "\f091"; $fa-var-truck: "\f0d1"; $fa-var-try: "\f195"; $fa-var-tty: "\f1e4"; $fa-var-tumblr: "\f173"; $fa-var-tumblr-square: "\f174"; $fa-var-turkish-lira: "\f195"; $fa-var-twitch: "\f1e8"; $fa-var-twitter: "\f099"; $fa-var-twitter-square: "\f081"; $fa-var-umbrella: "\f0e9"; $fa-var-underline: "\f0cd"; $fa-var-undo: "\f0e2"; $fa-var-university: "\f19c"; $fa-var-unlink: "\f127"; $fa-var-unlock: "\f09c"; $fa-var-unlock-alt: "\f13e"; $fa-var-unsorted: "\f0dc"; $fa-var-upload: "\f093"; $fa-var-usd: "\f155"; $fa-var-user: "\f007"; $fa-var-user-md: "\f0f0"; $fa-var-user-plus: "\f234"; $fa-var-user-secret: "\f21b"; $fa-var-user-times: "\f235"; $fa-var-users: "\f0c0"; $fa-var-venus: "\f221"; $fa-var-venus-double: "\f226"; $fa-var-venus-mars: "\f228"; $fa-var-viacoin: "\f237"; $fa-var-video-camera: "\f03d"; $fa-var-vimeo-square: "\f194"; $fa-var-vine: "\f1ca"; $fa-var-vk: "\f189"; $fa-var-volume-down: "\f027"; $fa-var-volume-off: "\f026"; $fa-var-volume-up: "\f028"; $fa-var-warning: "\f071"; $fa-var-wechat: "\f1d7"; $fa-var-weibo: "\f18a"; $fa-var-weixin: "\f1d7"; $fa-var-whatsapp: "\f232"; $fa-var-wheelchair: "\f193"; $fa-var-wifi: "\f1eb"; $fa-var-windows: "\f17a"; $fa-var-won: "\f159"; $fa-var-wordpress: "\f19a"; $fa-var-wrench: "\f0ad"; $fa-var-xing: "\f168"; $fa-var-xing-square: "\f169"; $fa-var-yahoo: "\f19e"; $fa-var-yelp: "\f1e9"; $fa-var-yen: "\f157"; $fa-var-youtube: "\f167"; $fa-var-youtube-play: "\f16a"; $fa-var-youtube-square: "\f166"; ================================================ FILE: website/font-awesome/scss/font-awesome.scss ================================================ /*! * Font Awesome 4.3.0 by @davegandy - http://fontawesome.io - @fontawesome * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) */ @import "variables"; @import "mixins"; @import "path"; @import "core"; @import "larger"; @import "fixed-width"; @import "list"; @import "bordered-pulled"; @import "animated"; @import "rotated-flipped"; @import "stacked"; @import "icons"; ================================================ FILE: website/index.md ================================================ --- layout: home --- ================================================ FILE: website/js/bootstrap.js ================================================ /*! * Bootstrap v3.3.2 (http://getbootstrap.com) * Copyright 2011-2015 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) */ if (typeof jQuery === 'undefined') { throw new Error('Bootstrap\'s JavaScript requires jQuery') } +function ($) { 'use strict'; var version = $.fn.jquery.split(' ')[0].split('.') if ((version[0] < 2 && version[1] < 9) || (version[0] == 1 && version[1] == 9 && version[2] < 1)) { throw new Error('Bootstrap\'s JavaScript requires jQuery version 1.9.1 or higher') } }(jQuery); /* ======================================================================== * Bootstrap: transition.js v3.3.2 * http://getbootstrap.com/javascript/#transitions * ======================================================================== * Copyright 2011-2015 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // CSS TRANSITION SUPPORT (Shoutout: http://www.modernizr.com/) // ============================================================ function transitionEnd() { var el = document.createElement('bootstrap') var transEndEventNames = { WebkitTransition : 'webkitTransitionEnd', MozTransition : 'transitionend', OTransition : 'oTransitionEnd otransitionend', transition : 'transitionend' } for (var name in transEndEventNames) { if (el.style[name] !== undefined) { return { end: transEndEventNames[name] } } } return false // explicit for ie8 ( ._.) } // http://blog.alexmaccaw.com/css-transitions $.fn.emulateTransitionEnd = function (duration) { var called = false var $el = this $(this).one('bsTransitionEnd', function () { called = true }) var callback = function () { if (!called) $($el).trigger($.support.transition.end) } setTimeout(callback, duration) return this } $(function () { $.support.transition = transitionEnd() if (!$.support.transition) return $.event.special.bsTransitionEnd = { bindType: $.support.transition.end, delegateType: $.support.transition.end, handle: function (e) { if ($(e.target).is(this)) return e.handleObj.handler.apply(this, arguments) } } }) }(jQuery); /* ======================================================================== * Bootstrap: alert.js v3.3.2 * http://getbootstrap.com/javascript/#alerts * ======================================================================== * Copyright 2011-2015 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // ALERT CLASS DEFINITION // ====================== var dismiss = '[data-dismiss="alert"]' var Alert = function (el) { $(el).on('click', dismiss, this.close) } Alert.VERSION = '3.3.2' Alert.TRANSITION_DURATION = 150 Alert.prototype.close = function (e) { var $this = $(this) var selector = $this.attr('data-target') if (!selector) { selector = $this.attr('href') selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7 } var $parent = $(selector) if (e) e.preventDefault() if (!$parent.length) { $parent = $this.closest('.alert') } $parent.trigger(e = $.Event('close.bs.alert')) if (e.isDefaultPrevented()) return $parent.removeClass('in') function removeElement() { // detach from parent, fire event then clean up data $parent.detach().trigger('closed.bs.alert').remove() } $.support.transition && $parent.hasClass('fade') ? $parent .one('bsTransitionEnd', removeElement) .emulateTransitionEnd(Alert.TRANSITION_DURATION) : removeElement() } // ALERT PLUGIN DEFINITION // ======================= function Plugin(option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.alert') if (!data) $this.data('bs.alert', (data = new Alert(this))) if (typeof option == 'string') data[option].call($this) }) } var old = $.fn.alert $.fn.alert = Plugin $.fn.alert.Constructor = Alert // ALERT NO CONFLICT // ================= $.fn.alert.noConflict = function () { $.fn.alert = old return this } // ALERT DATA-API // ============== $(document).on('click.bs.alert.data-api', dismiss, Alert.prototype.close) }(jQuery); /* ======================================================================== * Bootstrap: button.js v3.3.2 * http://getbootstrap.com/javascript/#buttons * ======================================================================== * Copyright 2011-2015 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // BUTTON PUBLIC CLASS DEFINITION // ============================== var Button = function (element, options) { this.$element = $(element) this.options = $.extend({}, Button.DEFAULTS, options) this.isLoading = false } Button.VERSION = '3.3.2' Button.DEFAULTS = { loadingText: 'loading...' } Button.prototype.setState = function (state) { var d = 'disabled' var $el = this.$element var val = $el.is('input') ? 'val' : 'html' var data = $el.data() state = state + 'Text' if (data.resetText == null) $el.data('resetText', $el[val]()) // push to event loop to allow forms to submit setTimeout($.proxy(function () { $el[val](data[state] == null ? this.options[state] : data[state]) if (state == 'loadingText') { this.isLoading = true $el.addClass(d).attr(d, d) } else if (this.isLoading) { this.isLoading = false $el.removeClass(d).removeAttr(d) } }, this), 0) } Button.prototype.toggle = function () { var changed = true var $parent = this.$element.closest('[data-toggle="buttons"]') if ($parent.length) { var $input = this.$element.find('input') if ($input.prop('type') == 'radio') { if ($input.prop('checked') && this.$element.hasClass('active')) changed = false else $parent.find('.active').removeClass('active') } if (changed) $input.prop('checked', !this.$element.hasClass('active')).trigger('change') } else { this.$element.attr('aria-pressed', !this.$element.hasClass('active')) } if (changed) this.$element.toggleClass('active') } // BUTTON PLUGIN DEFINITION // ======================== function Plugin(option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.button') var options = typeof option == 'object' && option if (!data) $this.data('bs.button', (data = new Button(this, options))) if (option == 'toggle') data.toggle() else if (option) data.setState(option) }) } var old = $.fn.button $.fn.button = Plugin $.fn.button.Constructor = Button // BUTTON NO CONFLICT // ================== $.fn.button.noConflict = function () { $.fn.button = old return this } // BUTTON DATA-API // =============== $(document) .on('click.bs.button.data-api', '[data-toggle^="button"]', function (e) { var $btn = $(e.target) if (!$btn.hasClass('btn')) $btn = $btn.closest('.btn') Plugin.call($btn, 'toggle') e.preventDefault() }) .on('focus.bs.button.data-api blur.bs.button.data-api', '[data-toggle^="button"]', function (e) { $(e.target).closest('.btn').toggleClass('focus', /^focus(in)?$/.test(e.type)) }) }(jQuery); /* ======================================================================== * Bootstrap: carousel.js v3.3.2 * http://getbootstrap.com/javascript/#carousel * ======================================================================== * Copyright 2011-2015 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // CAROUSEL CLASS DEFINITION // ========================= var Carousel = function (element, options) { this.$element = $(element) this.$indicators = this.$element.find('.carousel-indicators') this.options = options this.paused = this.sliding = this.interval = this.$active = this.$items = null this.options.keyboard && this.$element.on('keydown.bs.carousel', $.proxy(this.keydown, this)) this.options.pause == 'hover' && !('ontouchstart' in document.documentElement) && this.$element .on('mouseenter.bs.carousel', $.proxy(this.pause, this)) .on('mouseleave.bs.carousel', $.proxy(this.cycle, this)) } Carousel.VERSION = '3.3.2' Carousel.TRANSITION_DURATION = 600 Carousel.DEFAULTS = { interval: 5000, pause: 'hover', wrap: true, keyboard: true } Carousel.prototype.keydown = function (e) { if (/input|textarea/i.test(e.target.tagName)) return switch (e.which) { case 37: this.prev(); break case 39: this.next(); break default: return } e.preventDefault() } Carousel.prototype.cycle = function (e) { e || (this.paused = false) this.interval && clearInterval(this.interval) this.options.interval && !this.paused && (this.interval = setInterval($.proxy(this.next, this), this.options.interval)) return this } Carousel.prototype.getItemIndex = function (item) { this.$items = item.parent().children('.item') return this.$items.index(item || this.$active) } Carousel.prototype.getItemForDirection = function (direction, active) { var activeIndex = this.getItemIndex(active) var willWrap = (direction == 'prev' && activeIndex === 0) || (direction == 'next' && activeIndex == (this.$items.length - 1)) if (willWrap && !this.options.wrap) return active var delta = direction == 'prev' ? -1 : 1 var itemIndex = (activeIndex + delta) % this.$items.length return this.$items.eq(itemIndex) } Carousel.prototype.to = function (pos) { var that = this var activeIndex = this.getItemIndex(this.$active = this.$element.find('.item.active')) if (pos > (this.$items.length - 1) || pos < 0) return if (this.sliding) return this.$element.one('slid.bs.carousel', function () { that.to(pos) }) // yes, "slid" if (activeIndex == pos) return this.pause().cycle() return this.slide(pos > activeIndex ? 'next' : 'prev', this.$items.eq(pos)) } Carousel.prototype.pause = function (e) { e || (this.paused = true) if (this.$element.find('.next, .prev').length && $.support.transition) { this.$element.trigger($.support.transition.end) this.cycle(true) } this.interval = clearInterval(this.interval) return this } Carousel.prototype.next = function () { if (this.sliding) return return this.slide('next') } Carousel.prototype.prev = function () { if (this.sliding) return return this.slide('prev') } Carousel.prototype.slide = function (type, next) { var $active = this.$element.find('.item.active') var $next = next || this.getItemForDirection(type, $active) var isCycling = this.interval var direction = type == 'next' ? 'left' : 'right' var that = this if ($next.hasClass('active')) return (this.sliding = false) var relatedTarget = $next[0] var slideEvent = $.Event('slide.bs.carousel', { relatedTarget: relatedTarget, direction: direction }) this.$element.trigger(slideEvent) if (slideEvent.isDefaultPrevented()) return this.sliding = true isCycling && this.pause() if (this.$indicators.length) { this.$indicators.find('.active').removeClass('active') var $nextIndicator = $(this.$indicators.children()[this.getItemIndex($next)]) $nextIndicator && $nextIndicator.addClass('active') } var slidEvent = $.Event('slid.bs.carousel', { relatedTarget: relatedTarget, direction: direction }) // yes, "slid" if ($.support.transition && this.$element.hasClass('slide')) { $next.addClass(type) $next[0].offsetWidth // force reflow $active.addClass(direction) $next.addClass(direction) $active .one('bsTransitionEnd', function () { $next.removeClass([type, direction].join(' ')).addClass('active') $active.removeClass(['active', direction].join(' ')) that.sliding = false setTimeout(function () { that.$element.trigger(slidEvent) }, 0) }) .emulateTransitionEnd(Carousel.TRANSITION_DURATION) } else { $active.removeClass('active') $next.addClass('active') this.sliding = false this.$element.trigger(slidEvent) } isCycling && this.cycle() return this } // CAROUSEL PLUGIN DEFINITION // ========================== function Plugin(option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.carousel') var options = $.extend({}, Carousel.DEFAULTS, $this.data(), typeof option == 'object' && option) var action = typeof option == 'string' ? option : options.slide if (!data) $this.data('bs.carousel', (data = new Carousel(this, options))) if (typeof option == 'number') data.to(option) else if (action) data[action]() else if (options.interval) data.pause().cycle() }) } var old = $.fn.carousel $.fn.carousel = Plugin $.fn.carousel.Constructor = Carousel // CAROUSEL NO CONFLICT // ==================== $.fn.carousel.noConflict = function () { $.fn.carousel = old return this } // CAROUSEL DATA-API // ================= var clickHandler = function (e) { var href var $this = $(this) var $target = $($this.attr('data-target') || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) // strip for ie7 if (!$target.hasClass('carousel')) return var options = $.extend({}, $target.data(), $this.data()) var slideIndex = $this.attr('data-slide-to') if (slideIndex) options.interval = false Plugin.call($target, options) if (slideIndex) { $target.data('bs.carousel').to(slideIndex) } e.preventDefault() } $(document) .on('click.bs.carousel.data-api', '[data-slide]', clickHandler) .on('click.bs.carousel.data-api', '[data-slide-to]', clickHandler) $(window).on('load', function () { $('[data-ride="carousel"]').each(function () { var $carousel = $(this) Plugin.call($carousel, $carousel.data()) }) }) }(jQuery); /* ======================================================================== * Bootstrap: collapse.js v3.3.2 * http://getbootstrap.com/javascript/#collapse * ======================================================================== * Copyright 2011-2015 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // COLLAPSE PUBLIC CLASS DEFINITION // ================================ var Collapse = function (element, options) { this.$element = $(element) this.options = $.extend({}, Collapse.DEFAULTS, options) this.$trigger = $(this.options.trigger).filter('[href="#' + element.id + '"], [data-target="#' + element.id + '"]') this.transitioning = null if (this.options.parent) { this.$parent = this.getParent() } else { this.addAriaAndCollapsedClass(this.$element, this.$trigger) } if (this.options.toggle) this.toggle() } Collapse.VERSION = '3.3.2' Collapse.TRANSITION_DURATION = 350 Collapse.DEFAULTS = { toggle: true, trigger: '[data-toggle="collapse"]' } Collapse.prototype.dimension = function () { var hasWidth = this.$element.hasClass('width') return hasWidth ? 'width' : 'height' } Collapse.prototype.show = function () { if (this.transitioning || this.$element.hasClass('in')) return var activesData var actives = this.$parent && this.$parent.children('.panel').children('.in, .collapsing') if (actives && actives.length) { activesData = actives.data('bs.collapse') if (activesData && activesData.transitioning) return } var startEvent = $.Event('show.bs.collapse') this.$element.trigger(startEvent) if (startEvent.isDefaultPrevented()) return if (actives && actives.length) { Plugin.call(actives, 'hide') activesData || actives.data('bs.collapse', null) } var dimension = this.dimension() this.$element .removeClass('collapse') .addClass('collapsing')[dimension](0) .attr('aria-expanded', true) this.$trigger .removeClass('collapsed') .attr('aria-expanded', true) this.transitioning = 1 var complete = function () { this.$element .removeClass('collapsing') .addClass('collapse in')[dimension]('') this.transitioning = 0 this.$element .trigger('shown.bs.collapse') } if (!$.support.transition) return complete.call(this) var scrollSize = $.camelCase(['scroll', dimension].join('-')) this.$element .one('bsTransitionEnd', $.proxy(complete, this)) .emulateTransitionEnd(Collapse.TRANSITION_DURATION)[dimension](this.$element[0][scrollSize]) } Collapse.prototype.hide = function () { if (this.transitioning || !this.$element.hasClass('in')) return var startEvent = $.Event('hide.bs.collapse') this.$element.trigger(startEvent) if (startEvent.isDefaultPrevented()) return var dimension = this.dimension() this.$element[dimension](this.$element[dimension]())[0].offsetHeight this.$element .addClass('collapsing') .removeClass('collapse in') .attr('aria-expanded', false) this.$trigger .addClass('collapsed') .attr('aria-expanded', false) this.transitioning = 1 var complete = function () { this.transitioning = 0 this.$element .removeClass('collapsing') .addClass('collapse') .trigger('hidden.bs.collapse') } if (!$.support.transition) return complete.call(this) this.$element [dimension](0) .one('bsTransitionEnd', $.proxy(complete, this)) .emulateTransitionEnd(Collapse.TRANSITION_DURATION) } Collapse.prototype.toggle = function () { this[this.$element.hasClass('in') ? 'hide' : 'show']() } Collapse.prototype.getParent = function () { return $(this.options.parent) .find('[data-toggle="collapse"][data-parent="' + this.options.parent + '"]') .each($.proxy(function (i, element) { var $element = $(element) this.addAriaAndCollapsedClass(getTargetFromTrigger($element), $element) }, this)) .end() } Collapse.prototype.addAriaAndCollapsedClass = function ($element, $trigger) { var isOpen = $element.hasClass('in') $element.attr('aria-expanded', isOpen) $trigger .toggleClass('collapsed', !isOpen) .attr('aria-expanded', isOpen) } function getTargetFromTrigger($trigger) { var href var target = $trigger.attr('data-target') || (href = $trigger.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '') // strip for ie7 return $(target) } // COLLAPSE PLUGIN DEFINITION // ========================== function Plugin(option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.collapse') var options = $.extend({}, Collapse.DEFAULTS, $this.data(), typeof option == 'object' && option) if (!data && options.toggle && option == 'show') options.toggle = false if (!data) $this.data('bs.collapse', (data = new Collapse(this, options))) if (typeof option == 'string') data[option]() }) } var old = $.fn.collapse $.fn.collapse = Plugin $.fn.collapse.Constructor = Collapse // COLLAPSE NO CONFLICT // ==================== $.fn.collapse.noConflict = function () { $.fn.collapse = old return this } // COLLAPSE DATA-API // ================= $(document).on('click.bs.collapse.data-api', '[data-toggle="collapse"]', function (e) { var $this = $(this) if (!$this.attr('data-target')) e.preventDefault() var $target = getTargetFromTrigger($this) var data = $target.data('bs.collapse') var option = data ? 'toggle' : $.extend({}, $this.data(), { trigger: this }) Plugin.call($target, option) }) }(jQuery); /* ======================================================================== * Bootstrap: dropdown.js v3.3.2 * http://getbootstrap.com/javascript/#dropdowns * ======================================================================== * Copyright 2011-2015 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // DROPDOWN CLASS DEFINITION // ========================= var backdrop = '.dropdown-backdrop' var toggle = '[data-toggle="dropdown"]' var Dropdown = function (element) { $(element).on('click.bs.dropdown', this.toggle) } Dropdown.VERSION = '3.3.2' Dropdown.prototype.toggle = function (e) { var $this = $(this) if ($this.is('.disabled, :disabled')) return var $parent = getParent($this) var isActive = $parent.hasClass('open') clearMenus() if (!isActive) { if ('ontouchstart' in document.documentElement && !$parent.closest('.navbar-nav').length) { // if mobile we use a backdrop because click events don't delegate $('