Repository: Oteemo/charts Branch: master Commit: 960f0c88ff0b Files: 141 Total size: 259.3 KB Directory structure: gitextract_wrburklz/ ├── .github/ │ └── workflows/ │ ├── lint-test.yml │ └── release.yml ├── .gitignore ├── LICENSE ├── README.md ├── charts/ │ ├── nexusiq/ │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── README.md │ │ ├── references/ │ │ │ ├── README.MD │ │ │ └── nexus-iq-config-template.yml │ │ ├── templates/ │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── deployment.yaml │ │ │ ├── ingress.yaml │ │ │ ├── pvc.yaml │ │ │ ├── service.yaml │ │ │ └── tests/ │ │ │ └── test-nexusiq.yaml │ │ └── values.yaml │ ├── sonarqube/ │ │ ├── .helmignore │ │ ├── CHANGELOG.md │ │ ├── Chart.yaml │ │ ├── README.md │ │ ├── templates/ │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── change-admin-password-hook.yml │ │ │ ├── config.yaml │ │ │ ├── deployment.yaml │ │ │ ├── ingress.yaml │ │ │ ├── init-sysctl.yaml │ │ │ ├── install-plugins.yaml │ │ │ ├── postgres-config.yaml │ │ │ ├── pvc.yaml │ │ │ ├── secret.yaml │ │ │ ├── service.yaml │ │ │ ├── serviceaccount.yaml │ │ │ ├── sonarqube-scc.yaml │ │ │ └── tests/ │ │ │ ├── sonarqube-test.yaml │ │ │ └── test-config.yaml │ │ └── values.yaml │ └── sonatype-nexus/ │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── templates/ │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── adtl-configmap.yaml │ │ ├── backup-pv.yaml │ │ ├── backup-pvc.yaml │ │ ├── backup-secret.yaml │ │ ├── cloudiam-pv.yaml │ │ ├── cloudiam-pvc.yaml │ │ ├── clusterrolebinding.yaml │ │ ├── configmap.yaml │ │ ├── deployment-statefulset.yaml │ │ ├── ingress-docker.yaml │ │ ├── ingress.yaml │ │ ├── initpwd-job.yaml │ │ ├── proxy-ks-secret.yaml │ │ ├── proxy-route.yaml │ │ ├── proxy-svc.yaml │ │ ├── pv.yaml │ │ ├── pvc.yaml │ │ ├── rclone-config-secret.yaml │ │ ├── route.yaml │ │ ├── secret.yaml │ │ ├── service.yaml │ │ └── serviceaccount.yaml │ └── values.yaml ├── docs/ │ ├── che-0.1.1.tgz │ ├── che-0.1.2.tgz │ ├── index.html │ └── index.yaml ├── lint.yaml ├── old-charts/ │ └── che/ │ ├── .gitignore │ ├── .helmignore │ ├── Chart.yaml │ ├── Readme.md │ ├── custom-charts/ │ │ ├── che-devfile-registry/ │ │ │ ├── Chart.yaml │ │ │ ├── README.md │ │ │ ├── templates/ │ │ │ │ ├── deployment.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ └── service.yaml │ │ │ └── values.yaml │ │ ├── che-jaeger/ │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── templates/ │ │ │ │ ├── deployment.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ ├── service-agent.yaml │ │ │ │ ├── service-collector.yaml │ │ │ │ └── service-query.yaml │ │ │ └── values.yaml │ │ ├── che-keycloak/ │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── templates/ │ │ │ │ ├── deployment.yaml │ │ │ │ ├── endpoints-monitor-role.yaml │ │ │ │ ├── endpoints-monitor-rolebinding.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ ├── keycloak-data-claim.yaml │ │ │ │ ├── keycloak-log-claim.yaml │ │ │ │ ├── keycloak-serviceaccount.yaml │ │ │ │ └── service.yaml │ │ │ └── values.yaml │ │ ├── che-plugin-registry/ │ │ │ ├── Chart.yaml │ │ │ ├── README.md │ │ │ ├── templates/ │ │ │ │ ├── deployment.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ └── service.yaml │ │ │ └── values.yaml │ │ └── che-postgres/ │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── templates/ │ │ │ ├── deployment.yaml │ │ │ ├── postgres-data-claim.yaml │ │ │ └── service.yaml │ │ └── values.yaml │ ├── requirements.yaml │ ├── templates/ │ │ ├── NOTES.txt │ │ ├── _hostHelper.tpl │ │ ├── _keycloakAuthUrlHelper.tpl │ │ ├── _keycloakHostHelper.tpl │ │ ├── _secretHelper.tpl │ │ ├── cluster-role-binding.yaml │ │ ├── configmap.yaml │ │ ├── deployment.yaml │ │ ├── exec-role.yaml │ │ ├── ingress.yaml │ │ ├── metrics-ingress.yaml │ │ ├── pvc.yaml │ │ ├── registry-pull-secret.yaml │ │ ├── service.yaml │ │ ├── serviceaccount.yaml │ │ ├── wildcard-certificate.yaml │ │ ├── workspace-exec-role-binding.yaml │ │ ├── workspace-service-account.yaml │ │ ├── workspace-view-role-binding.yaml │ │ └── workspace-view-role.yaml │ ├── tiller-rbac.yaml │ ├── values/ │ │ ├── default-host.yaml │ │ ├── multi-user.yaml │ │ └── tls.yaml │ └── values.yaml ├── pom.xml └── test.yaml ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/workflows/lint-test.yml ================================================ name: Lint and Test Charts on: pull_request jobs: lint-test: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v2 with: fetch-depth: 0 - name: Set up Helm uses: azure/setup-helm@v1 with: version: v3.4.1 - name: Set up Python uses: actions/setup-python@v2 with: python-version: 3.7 - name: Set up chart-testing uses: helm/chart-testing-action@v2.1.0 - name: Run chart-testing (list-changed) id: list-changed run: | changed=$(ct list-changed --config test.yaml) if [[ -n "$changed" ]]; then echo "::set-output name=changed::true" fi - name: Run chart-testing (lint) run: ct lint --config test.yaml - name: Create kind cluster uses: helm/kind-action@v1.2.0 if: steps.list-changed.outputs.changed == 'true' - name: Run chart-testing (install) run: ct install --config test.yaml ================================================ FILE: .github/workflows/release.yml ================================================ name: Release Charts on: push: branches: - master jobs: release: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v2 - name: Fetch history run: git fetch --prune --unshallow - name: Configure Git run: | git config user.name "$GITHUB_ACTOR" git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - name: Install Helm uses: azure/setup-helm@v1 with: version: v3.4.0 - name: Add helm repositories run: | helm repo add bitnami https://charts.bitnami.com/bitnami - name: Run chart-releaser uses: helm/chart-releaser-action@v1.2.0 env: CR_TOKEN: "${{ secrets.CR_TOKEN }}" ================================================ FILE: .gitignore ================================================ # General files for the project pkg/* *.pyc bin/* .project /.bin /_test/secrets/*.json # OSX leaves these everywhere on SMB shares ._* # OSX trash .DS_Store # Files generated by JetBrains IDEs, e.g. IntelliJ IDEA .idea/ *.iml # Vscode files .vscode # Emacs save files *~ \#*\# .\#* # Vim-related files [._]*.s[a-w][a-z] [._]s[a-w][a-z] *.un~ Session.vim .netrwhist # Chart dependencies **/charts/*.tgz .history ================================================ FILE: LICENSE ================================================ MIT License Copyright (c) 2020 Oteemo Inc Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: README.md ================================================ # Oteemo Charts Repository ***This helm charts repository has been deprecated, please see individual charts for examples of where to migrate to*** Welcome to the oteemo charts repository. All charts are in the charts directory. ## Adding the chart Repository `helm repo add oteemocharts https://oteemo.github.io/charts` ## Contributing Feel free to fork our repo and create a pull request with any new features or bug fixes. ## Contacting us For issues or concerns, please fill out an issue or email us at charts@oteemo.com ## How It Works GitHub Pages points to the `gh-pages` branch so anything pushed to that branch will be publicly available. We are using a couple github actions to automate testing and deployment of charts. It is based off the example [here](https://github.com/helm/charts-repo-actions-demo). ## Process to add a chart to the repository 1. Create a branch or fork for your new chart 1. Initialize new chart in the `charts` directory with `helm create mychart` or by copying in your work from outside 1. After chart development is done, run (at minimum) `helm lint mychart/` to validate yaml and templates 1. Don't forget to bump your chart version (if needed) 1. Create a pull request with the new chart or updates 1. Once the PR is approved, the automation will publish the chart to our repository ## Notes about current testing Testing is currently done with Helm3 ================================================ FILE: charts/nexusiq/.helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *~ # Various IDEs .project .idea/ *.tmproj .vscode/ ================================================ FILE: charts/nexusiq/Chart.yaml ================================================ apiVersion: v1 appVersion: 1.63.0 version: 1.0.5 description: A Helm chart for Nexus IQ name: nexusiq keywords: - sonatype - nexusiq - nexus home: https://www.sonatype.com/nexus-iq-server icon: http://www.sonatype.org/nexus/content/uploads/2015/06/Nexus-Logo.jpg sources: - https://help.sonatype.com/iqserver - https://hub.docker.com/r/sonatype/nexus-iq-server deprecated: true ================================================ FILE: charts/nexusiq/README.md ================================================ # Nexus IQ ***This chart is deprecated. Nexus IQ chart is now managed by sonatype: https://artifacthub.io/packages/helm/sonatype/nexus-iq-server*** ## What is Nexus IQ Shares component intelligence with your teams early, often and throughout the software supply chain so they make better decisions and build better software. Offers a fully-customizeable policy engine, so you can define which components are acceptable, and which are not. Integrates with popular development tools including, but not limited to: Maven, Eclipse, IntelliJ, Visual Studio, GitHub, Bamboo, Jenkins, Xebia Labs, and SonarQube. Provides a full suite of supported REST APIs that provide access to core features for custom implementations. ## Introduction This chart bootstraps a Nexus IQ deployment on a cluster using Helm. ## Installing the Chart ### Installing with Helm 3.x ```bash helm repo add oteemocharts https://oteemo.github.io/charts helm install nexusiq oteemocharts/nexusiq ``` ### Templating with Helm 3.x To template this with helm 3.x: 1. Complete the values file with your values. 2. Execute the ```helm template``` command to generate your manifest files 3. Execute the ```kubectl apply``` command to create the deployment within your kubernetes cluster. ## Uninstalling the Chart ### Uninstalling with Helm 3.x ```bash $ helm list NAME REVISION UPDATED STATUS CHART NAMESPACE nexusiq 1 Fri Sep 1 13:19:50 2017 DEPLOYED nexusiq default $ helm delete nexusiq ``` ### Uninstalling without Helm 3.x In a tiller-less helm 2.x environment you must individually delete the objects created by the helm chart: deployment, persistent volumes, and persistent volume claims. ## Configuration The following table lists the configurable parameters of the NexusIQ chart and their default values. | Parameter | Description | Default | | ------------------------------------------ | ---------------------------------- | ----------------------------------------| | `nexusIQ.repository` | NexusIQ image repo | `sonatype/nexus-iq-server` | | `nexusIQ.tag` | NexusIQ image version | `1.63.0` | | `nexusIQ.pullPolicy` | NexusIQ image pull policy | `IfNotPresent` | | `nexusIQ.metricsPort` | NexusIQ port to expose prometheus metrics over | `8071` | | `nexusIQ.applicationPort` | NexusIQ application port | `8070` | | `nexusIQ.portName` | blank | `nexus-iq-server` | | `nexusIQ.livenessProbe.initialDelaySeconds` | LivenessProbe initial delay | `30` | | `nexusIQ.livenessProbe.periodSeconds` | LivenessProbe period seconds | `30` | | `nexusIQ.livenessProbe.failureThreshold` | LivenessProbe failure threshold | `6` | | `nexusIQ.livenessProbe.path` | LivenessProbe path | `/` | | `nexusIQ.readinessProbe.initialDelaySeconds` | ReadinessProbe initial delay | `30` | | `nexusIQ.readinessProbe.periodSeconds` | ReadinessProbe period seconds | `30` | | `nexusIQ.readinessProbe.failureThreshold` | ReadinessProbe failure threshold | `6` | | `nexusIQ.readinessProbe.path` | ReadinessProbe path | `/` | | `service.enabled` | Service Enabled Flag | `false` | | `service.name` | Name for Service | `nexus-iq-server` | | `service.type` | Service Type | `ClusterIP` | | `service.port` | Service Port | `80` | | `ingress.enabled` | Ingress Enabled Flag | `false` | | `ingress.annotations` | Ingress annotations | blank | | `ingress.hostName` | Ingress host name | blank | | `ingress.hosts` | Ingress hosts | blank | | `ingress.tls` | Ingress TLS configuration | blank | | `persistence.enabled` | Enable persistent storage | `false` | | `persistence.accessMode` | Set Storage Access Mode| `ReadWriteOnce` | | `persistence.storageSize` | Set Storage Size | `25Gi` | | `persistence.storageClass` | Set Storage Type | `gp2` | | `persistence.labels` | Set Storage Labels | blank | | `persistence.annotations` | Set storage annotations | blank | ## After Installing the Chart After installing the chart a couple of actions still need to be done in order to use NexusIQ. Please follow the instructions below. ### NexusIQ Configuration The following steps need to be executed in order to use NexusIQ: 1. Install the license. Without a valid license you will not be able to navigate past the license page and use NexusIQ in any way. 2. Configure basic permissions. By default NexusIQ creates a default `admin` user with a password of `admin123` that is not configurable at boostrap. You MUST change this immediately upon logging in. 3. (Optional) Configure LDAP. ### Nexus IQ Server System Requirements The following table lists the system requirements of the Nexus IQ Server | Resource | Description | | ------------------------------------------ | ---------------------------------- | | `CPU & RAM` | Recommend a processor with at least 8 CPU cores and 8GB of RAM for initial setup. A minimum of 6GB of process space should be available to the IQ Server. Additional RAM can improve the performance due to decreased disk caching. | | `Disk` | Storage requirements range with the number of applications projected to use the IQ Server. 500 GB to 1 TB of free disk space should provide more than adequate resources. | | `Account` | It is recommended that an unprivileged service account be created if running the IQ Server as a daemon. | | `Operating System` | Generally, any machine that can run a supported Sun/Oracle Java version should work. Refer to the Oracle documentation for specifics: Oracle JDK 8 and JRE 8 Certified System Configurations. The most widely used operating system for the IQ Server is Linux and therefore customers should consider it the best tested platform. | | `Ports` | The IQ Server requires the following network access. Inbound: 8070 TCP: Used by all IQ Server clients for HTTP access. This port is configurable. 8071 TCP: Used by the local host or other IT monitoring tools for monitoring and operating functions. This port is optional and configurable. If not specified, port 8081 will be used. Outbound: 443 TCP to : Used by the IQ Server to securely access Sonatype Data Services. This hostname and port are not configurable. Sonatype Data Services must be reachable by IQ Server on the following URL: . | | `Java` | OpenJDK 8 (since December 2018, IQ Server release 55). Prior to IQ Server release 63, the IQ Server used to check if the used JVM is supported. This check does not work for certain OpenJDK versions/flavors. You can disable this check by adding -Dclm.disableJreCheck=true to the command used to start the IQ Server. | ### Important Links 1. Nexus IQ Server Web Page - 2. Nexus IQ Server Documentation & Help Page - 3. Nexus IQ Server Getting Started Guide - 4. Nexus IQ Docker Repo & Docker Documentation - ================================================ FILE: charts/nexusiq/references/README.MD ================================================ ### Reference Documentation for Nexus IQ Server config.yml The IQ Server is an application exposed using a Dropwizard server. The main configuration file for the IQ Server installation is a YAML formatted file called config.yml found in the installation directory. The config.yml file typically contains only those configuration options which are rarely changed. Special considerations when editing the config.yml file: TAB characters are not supported, use space characters only for indenting structure is tree-like - indents define structure hierarchy and are relevant to proper parsing of the file indented lines are considered child options of the first un-commented outdented line preceeding them commented lines are ignored - comments begin with the # character an improperly formatted config.yml will prevent the server from starting We strongly recommend using a text editor that will inform you of any TAB characters accidentally inserted into the file. Options that are more commonly changed are typically found in the System Preferences section of the IQ Server user interface, which you can access by clicking on the System Preferences icon located in the top right of the IQ Server header (). ## Useful Links: 1. Documentation on config.yml - https://help.sonatype.com/iqserver/configuring/config.yml The config.yml file is stored by default in the container in the following directory: ~/opt/sonatype//opt/sonatype/nexus-iq-server/config.yml This ================================================ FILE: charts/nexusiq/references/nexus-iq-config-template.yml ================================================ # # NOTE: The indentation in this file is crucial for proper processing. Please keep the existing indentation when editing it. # # Directory for data files. sonatypeWork: ./sonatype-work/clm-server # Path to a license file to automatically install if unlicensed. #licenseFile: ./license.lic # Base URL of the Sonatype CLM server for user facing links back to the server. #baseUrl: http://localhost:8070/ # Hour of the day(0-23) to schedule Policy Monitoring execution. The default is midnight. #policyMonitoringHour: 0 # If true, allows anonymous policy evaluation from client tooling (e.g. CLI, CI, Maven). # If false, forces authentication and disallows anonymous access. # Defaults to false for security. #anonymousClientAccessAllowed: false # Enables/disables cross-site request forgery protection. Defaults to true for increased security. #csrfProtection: true # A custom fragment to add to the "user-agent" for HTTP calls #userAgentSuffix: "example fragment" # HTTP-specific options. server: # The context path for the application. Note that this must have a leading slash. applicationContextPath: / applicationConnectors: - type: http # The port on which the HTTP server listens for service requests. # Because Java cannot drop privileges in a POSIX system, these # ports cannot be in the range 1-1024. A port value of 0 will # make the OS use an arbitrary unused port. port: 8070 # The hostname of the interface to which the application HTTP server socket # will be bound. If omitted, the socket will listen on all # interfaces. #bindHost: 127.0.0.1 # only bind to loopback adminConnectors: - type: http # The port on which the HTTP server listens for administrative # requests. Subject to the same limitations as "port". port: 8071 # The hostname of the interface to which the admin HTTP server socket # will be bound. If omitted, the socket will listen on all # interfaces. #bindHost: 127.0.0.1 # only bind to loopback # HTTP request log settings. requestLog: appenders: # Settings for logging to a file. - type: file # The file to which current statements will be logged. currentLogFilename: ./log/request.log logFormat: "%clientHost %l %user [%date] \"%requestURL\" %statusCode %bytesSent %elapsedTime \"%header{User-Agent}\"" # When the log file rotates, the archived log will be renamed to this and gzipped. The # %d is replaced with the previous day (yyyy-MM-dd). Custom rolling windows can be created # by passing a SimpleDateFormat-compatible format as an argument: "%d{yyyy-MM-dd-hh}". archivedLogFilenamePattern: ./log/request-%d.log.gz # The number of archived files to keep. archivedFileCount: 50 # Notification mail settings. # If enabled, ensure that the baseUrl configuration setting is also enabled and correct, because generated emails often contain links to the server. #mail: # The host running the SMTP server to use. #hostname: "127.0.0.1" # The port at which the SMTP server listens on. #port: 587 # The address used as From for outgoing notification mails sent by the server. #systemEmail: "SonatypeCLM@localhost" # The username used to access the mail server. #username: "anonymous" # The password used to access the mail server. #password: "guest" # Connect using SSL. #ssl: true # Connect using TLS. #tls: true # Notification JIRA settings. # Note that any user of the Nexus IQ Server will have access to see all projects and applicable issue types available # to the configured JIRA account. More details available in the Nexus IQ Server documentation. If enabled, ensure that # the baseUrl configuration setting is also enabled and correct, because generated tickets contain links to the server. #jira: # The JIRA server address #url: "https://jira.example.org" # The username used to connect to the JIRA server #username: "exampleuser" # The password used to connect to the JIRA server #password: "examplepassword" # Any JIRA project issue type required fields without default values defined in JIRA must have their initial # field values defined here in order for that project and issue type to be available for policy notifications #customFields: # Example 'user' type system field #reporter: # name: "username" # Example 'array' type system field #labels: # - test # - bug # Example 'version' type custom field #customfield_12001: # name: "Example" # Example 'option' type custom field #customfield_10050: # value: "P1" # Example 'number' type custom field #customfield_13001: 10 # Example 'datetime' type custom field #customfield_14000: "2016-11-01" # Proxy settings. #proxy: # The host running the proxy server to use. #hostname: "127.0.0.1" # The port at which the proxy server listens on. #port: 80 # The username used to access the proxy server. #username: "anonymous" # The password used to access the proxy server. #password: "guest" # Configures reverse proxy authentication for the web UI. #reverseProxyAuthentication: # Enables/disables authentication via a reverse proxy. Once the reverse proxy has authenticated a user's identity, # it is expected to forward the username to the CLM server via the HTTP header configured below. The CLM server # will then consider the user logged in without further password prompt. # # WARNING: If improperly configured, this feature poses a security vulnerability. To guard against authentication # bypass it is crucial that all HTTP access to the CLM server is proxied such that the designated username header # is only settable by the reverse proxy that performs authentication and cannot be forged by end users themselves. # #enabled: false # The name of the HTTP request header that carries the username of an authenticated user. #usernameHeader: "REMOTE_USER" # For backwards compatibility of client requests - skips CSRF protection when using RUT #csrfProtectionDisabled: false # The service URL that will be redirected to when a user requests logout. #logoutUrl: http://localhost/logout/index.html # Logging settings. logging: # The default level of all loggers. Can be OFF, ERROR, WARN, INFO, DEBUG, TRACE, or ALL. level: DEBUG # Logger-specific settings. loggers: "com.sonatype.insight.scan": INFO "eu.medsea.mimeutil.MimeUtil2": INFO "org.apache.http": INFO "org.apache.http.wire": ERROR "org.eclipse.birt.report.engine.layout.pdf.font.FontConfigReader": WARN "org.eclipse.jetty": INFO "org.apache.shiro.web.filter.authc.BasicHttpAuthenticationFilter": INFO # WARNING: This reveals credentials at DEBUG level "com.sonatype.insight.audit": appenders: - type: file # The file to which audit statements will be logged. currentLogFilename: ./log/audit.log # When the audit log file rotates, the archived audit log will be renamed to this and gzipped. The # %d is replaced with the previous day (yyyy-MM-dd). Custom rolling windows can be created # by passing a SimpleDateFormat-compatible format as an argument: "%d{yyyy-MM-dd-hh}". # # If archive is true, this must be specified. archivedLogFilenamePattern: ./log/audit-%d.log.gz # The number of archived audit log files to keep. archivedFileCount: 50 #"com.sonatype.insight.policy.violation": #appenders: #- type: file # The file to which policy violations will be logged. #currentLogFilename: ./log/policy-violation.log # When the policy violation log file rotates, the archived policy violation log will be renamed to this # and gzipped. The %d is replaced with the previous day (yyyy-MM-dd). Custom rolling windows can be created # by passing a SimpleDateFormat-compatible format as an argument: "%d{yyyy-MM-dd-hh}". # # If archive is true, this must be specified. #archivedLogFilenamePattern: ./log/policy-violation-%d.log.gz # The number of archived policy violation log files to keep. #archivedFileCount: 5 appenders: # Settings for logging to stdout. - type: console # Do not display log statements below this threshold to stdout. threshold: INFO logFormat: "%d{'yyyy-MM-dd HH:mm:ss,SSSZ'} %level [%thread] %X{username} %logger - %msg%n" # Settings for logging to a file. - type: file # Do not write log statements below this threshold to the file. threshold: ALL logFormat: "%d{'yyyy-MM-dd HH:mm:ss,SSSZ'} %level [%thread] %X{username} %logger - %msg%n" # The file to which current statements will be logged. currentLogFilename: ./log/clm-server.log # When the log file rotates, the archived log will be renamed to this and gzipped. The # %d is replaced with the previous day (yyyy-MM-dd). Custom rolling windows can be created # by passing a SimpleDateFormat-compatible format as an argument: "%d{yyyy-MM-dd-hh}". # # If archive is true, this must be specified. archivedLogFilenamePattern: ./log/clm-server-%d.log.gz # The number of archived files to keep. archivedFileCount: 50 # Passphrase used to encrypt the Webhook Secret Keys #webhookSecretPassphrase: "^d1swM!FF&qQ" #eventBus: # Configures the number of threads used for the EventBus # Defaults to 500 if not provided. # maxPoolSize: 500 # Sample data is created for new installs. createSampleData: true ================================================ FILE: charts/nexusiq/templates/NOTES.txt ================================================ 1. Get the application URL by running these commands: {{- if .Values.ingress.enabled }} {{- range $host := .Values.ingress.hosts }} {{- range .paths }} http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} {{- end }} {{- end }} {{- else if contains "NodePort" .Values.service.type }} export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "nexus-iq.fullname" . }}) export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") echo http://$NODE_IP:$NODE_PORT {{- else if contains "LoadBalancer" .Values.service.type }} NOTE: It may take a few minutes for the LoadBalancer IP to be available. You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "nexus-iq.fullname" . }}' export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "nexus-iq.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') echo http://$SERVICE_IP:{{ .Values.service.port }} {{- else if contains "ClusterIP" .Values.service.type }} export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "nexus-iq.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") echo "Visit http://127.0.0.1:8080 to use your application" kubectl port-forward $POD_NAME 8080:80 {{- end }} ================================================ FILE: charts/nexusiq/templates/_helpers.tpl ================================================ {{/* vim: set filetype=mustache: */}} {{/* Expand the name of the chart. */}} {{- define "nexus-iq.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} {{- define "nexus-iq.fullname" -}} {{- if .Values.fullnameOverride -}} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} {{- else -}} {{- $name := default .Chart.Name .Values.nameOverride -}} {{- if contains $name .Release.Name -}} {{- .Release.Name | trunc 63 | trimSuffix "-" -}} {{- else -}} {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} {{- end -}} {{- end -}} {{- end -}} {{/* Create chart name and version as used by the chart label. */}} {{- define "nexus-iq.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Manage the labels for each entity */}} {{- define "nexus-iq.labels" -}} app: {{ template "nexus-iq.name" . }} fullname: {{ template "nexus-iq.fullname" . }} chart: {{ template "nexus-iq.chart" . }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} {{- end -}} ================================================ FILE: charts/nexusiq/templates/deployment.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: namespace: {{ .Release.Namespace }} name: {{ include "nexus-iq.fullname" . }} labels: {{ include "nexus-iq.labels" . | indent 4 }} spec: replicas: {{ .Values.replicaCount }} selector: matchLabels: {{ include "nexus-iq.labels" . | indent 7 }} template: metadata: labels: {{ include "nexus-iq.labels" . | indent 7 }} spec: containers: - name: nexus-iq-server image: "{{ .Values.nexusIQ.repository }}:{{ .Values.nexusIQ.tag }}" imagePullPolicy: {{ .Values.nexusIQ.pullPolicy }} ports: - name: {{.Values.nexusIQ.portName}} containerPort: {{.Values.nexusIQ.applicationPort}} - name: metricsport containerPort: {{.Values.nexusIQ.metricsPort}} livenessProbe: httpGet: path: {{.Values.nexusIQ.livenessProbe.path}} port: {{.Values.nexusIQ.applicationPort}} initialDelaySeconds: {{ .Values.nexusIQ.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.nexusIQ.livenessProbe.periodSeconds }} failureThreshold: {{ .Values.nexusIQ.livenessProbe.failureThreshold }} readinessProbe: httpGet: path: {{.Values.nexusIQ.readinessProbe.path}} port: {{.Values.nexusIQ.applicationPort}} initialDelaySeconds: {{ .Values.nexusIQ.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.nexusIQ.livenessProbe.periodSeconds }} failureThreshold: {{ .Values.nexusIQ.livenessProbe.failureThreshold }} resources: {{- toYaml .Values.resources | nindent 12 }} volumeMounts: - mountPath: /nexusiq-data name: {{ template "nexus-iq.fullname" . }}-data volumes: - name: {{ template "nexus-iq.fullname" . }}-data {{- if .Values.persistence.enabled }} persistentVolumeClaim: claimName: {{ .Values.persistence.existingClaim | default (printf "%s-%s" (include "nexus-iq.fullname" .) "data") }} {{- end }} {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} ================================================ FILE: charts/nexusiq/templates/ingress.yaml ================================================ {{- if .Values.ingress.enabled -}} {{- $fullName := include "nexus-iq.fullname" . -}} apiVersion: extensions/v1beta1 kind: Ingress metadata: namespace: {{ .Release.Namespace }} name: {{ $fullName }} labels: {{ include "nexus-iq.labels" . | indent 4 }} {{- with .Values.ingress.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} spec: {{- if .Values.ingress.tls }} tls: {{ toYaml .Values.ingress.tls | indent 4 }} {{- end }} rules: - host: {{ .Values.ingress.hostName| quote }} http: paths: - backend: serviceName: {{ .Values.service.name }} ##this needs to match the name of the service being deployed. servicePort: {{.Values.service.port}} path: / {{- end }} ================================================ FILE: charts/nexusiq/templates/pvc.yaml ================================================ {{- if .Values.persistence.enabled }} kind: PersistentVolumeClaim apiVersion: v1 metadata: ## Modified from upstream,added namespace namespace: {{ .Release.Namespace }} name: {{ template "nexus-iq.fullname" . }}-data labels: {{ include "nexus-iq.labels" . | indent 4 }} {{- if .Values.persistence.annotations }} annotations: {{ toYaml .Values.persistence.annotations | indent 4 }} {{- end }} spec: accessModes: - {{ .Values.persistence.accessMode | quote }} resources: requests: storage: {{ .Values.persistence.storageSize | quote }} {{- if .Values.persistence.storageClass }} {{- if (eq "-" .Values.persistence.storageClass) }} storageClassName: "" {{- else }} storageClassName: "{{ .Values.persistence.storageClass }}" {{- end }} {{- end }} {{- end }} ================================================ FILE: charts/nexusiq/templates/service.yaml ================================================ {{- if .Values.service.enabled -}} apiVersion: v1 kind: Service metadata: namespace: {{ .Release.Namespace }} name: {{ .Values.service.name }} labels: {{ include "nexus-iq.labels" . | indent 4 }} spec: type: {{ .Values.service.type }} ports: - name: {{.Values.service.portName}} port: {{ .Values.service.port }} targetPort: {{.Values.service.port}} protocol: TCP selector: app: {{ template "nexus-iq.name" . }} release: {{ .Release.Name }} {{- end}} ================================================ FILE: charts/nexusiq/templates/tests/test-nexusiq.yaml ================================================ apiVersion: v1 kind: Pod metadata: namespace: {{ .Release.Namespace }} name: {{ include "nexus-iq.fullname" . }}-test annotations: "helm.sh/hook": test-success spec: containers: - name: nexus-iq-server-test image: "{{ .Values.nexusIQ.repository }}:{{ .Values.nexusIQ.tag }}" imagePullPolicy: {{ .Values.nexusIQ.pullPolicy }} ports: - name: {{.Values.nexusIQ.portName}} containerPort: {{.Values.nexusIQ.applicationPort}} - name: metricsport containerPort: {{.Values.nexusIQ.metricsPort}} livenessProbe: httpGet: path: {{.Values.nexusIQ.livenessProbe.path}} port: {{.Values.nexusIQ.applicationPort}} initialDelaySeconds: {{ .Values.nexusIQ.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.nexusIQ.livenessProbe.periodSeconds }} failureThreshold: {{ .Values.nexusIQ.livenessProbe.failureThreshold }} readinessProbe: httpGet: path: {{.Values.nexusIQ.readinessProbe.path}} port: {{.Values.nexusIQ.applicationPort}} initialDelaySeconds: {{ .Values.nexusIQ.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.nexusIQ.livenessProbe.periodSeconds }} failureThreshold: {{ .Values.nexusIQ.livenessProbe.failureThreshold }} command: ["sh","-c", "sleep 30", "curl http://localhost:8071"] restartPolicy: Never ================================================ FILE: charts/nexusiq/values.yaml ================================================ replicaCount: 1 nexusIQ: repository: sonatype/nexus-iq-server tag: 1.63.0 pullPolicy: IfNotPresent metricsPort: 8071 applicationPort: 8070 # env: # pvPath: -Djava.util.prefs.userRoot=/some-other-dir livenessProbe: initialDelaySeconds: 30 periodSeconds: 30 failureThreshold: 6 # timeoutSeconds: 10 path: / readinessProbe: initialDelaySeconds: 30 periodSeconds: 30 failureThreshold: 6 # timeoutSeconds: 10 path: / nameOverride: "" fullnameOverride: "" service: enabled: false name: nexus-iq-server type: ClusterIP port: 8070 portName: http ingress: enabled: false annotations: # certmanager.k8s.io/cluster-issuer: "letsencrypt-prod" # nginx.ingress.kubernetes.io/ssl-redirect: "false" # nginx.ingress.kubernetes.io/add-base-url: "true" # certmanager.k8s.io/acme-challenge-type: "dns01" # certmanager.k8s.io/acme-dns01-provider: "aws" # kubernetes.io/tls-acme: "true" hostName: hosts: - host: paths: [] tls: - secretName: nexusiq_tls hosts: persistence: enabled: false accessMode: ReadWriteOnce storageSize: 25Gi storageClass: gp2 labels: annotations: {} resources: # Request and Resource limits have not been tested. Please read system requirements for NexusIQ # and experiment based upon usage levels you are monitoring. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} tolerations: [] affinity: {} ================================================ FILE: charts/sonarqube/.helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *~ # Various IDEs .project .idea/ *.tmproj # OWNERS file for Kubernetes OWNERS ================================================ FILE: charts/sonarqube/CHANGELOG.md ================================================ # SonarQube Chart Changelog All changes to this chart will be documented in this file. ## [9.x.x] * Use the `initContainers.securityContext` to also set the security context of the `wait-for-db` init container. ## [9.6.3] * Fixed GH-277 by ensuring current/new admin passwords are URL escaped in the change-admin-password-hook job. ## [9.6.2] * Change order of env variables to better support 7.9-lts ## [9.6.1] * Add support for setting custom annotations in admin hook job. ## [9.6.0] * Add the possibility of definining the secret key name of the postgres password. ## [9.5.0] * Add Ingress default backend for GCE class ## [9.2.3] * Added namespace to port-foward command in notes. ## [9.2.2] * Added a condition to deployment.yaml so that `wait-for-db` initContainer is only created if `postgresql.enabled=true` ## [9.2.1] * Updated the configuration table to include the additional keys added in release 9.2.0. ## [9.2.0] * Added functionality for deployments to OpenShift clusters. * .Values.OpenShift flag to signify if deploying to OpenShift. * Ability to have chart generate an SCC allowing the init-sysctl container to run as privileged. * Setting of a seperate securityContext section for the main SonarQube container to avoid running as root. * Exposing additional `postreSQL` keys in values.yaml to support configuring postgres to run under standard "restricted" or "anyuid"/"nonroot" SCCs on OpenShift. * Added initContainer `wait-for-db` to await postgreSQL successful startup before starting SonarQube, to avoid race conditions. ## [9.1.1] * Update SonarQube to 8.5.1. * **Fix:** Purge plugins directory before download. ## [9.0.0] * Update SonarQube to 8.5. * **Breaking change:** Rework init containers. * Move global defaults from `plugins` section to `initContainers`. * Update container images. * **Deprecation:** `elasticsearch.configureNode` in favor of `initSysctl.enabled`. * Rework sysctl with support for custom values. * Rework plugins installation via `opt/sonarqube/extensions/downloads` folder that is handled by SonarQube itself. * **Breaking change:** remove `plugins.deleteDefaultPlugins` as SonarQube stores bundled plugins out of `opt/sonarqube/extensions`. * Rename deprecated `SONARQUBE_` environment variables to `SONAR_` ones. * **Breaking change:** Rename `enabledTests` to `tests.enabled`. * Add `terminationGracePeriodSeconds`. ================================================ FILE: charts/sonarqube/Chart.yaml ================================================ apiVersion: v1 name: sonarqube description: SonarQube is an open sourced code quality scanning tool version: 9.11.0 appVersion: 8.9.7-community keywords: - coverage - security - code - quality home: https://www.sonarqube.org/ icon: https://www.sonarqube.org/assets/logo-31ad3115b1b4b120f3d1efd63e6b13ac9f1f89437f0cf6881cc4d8b5603a52b4.svg sources: - https://github.com/SonarSource/docker-sonarqube deprecated: true ================================================ FILE: charts/sonarqube/README.md ================================================ # SonarQube ***This chart has been deprecated, please refer to another available chart listed below*** ## Alternative Charts * [PlatformOne's version](https://repo1.dso.mil/platform-one/big-bang/apps/developer-tools/sonarqube) * [SonarSource's version](https://github.com/SonarSource/helm-chart-sonarqube) * Other versions can be found on artifacthub.io [SonarQube](https://www.sonarqube.org/) is an open sourced code quality scanning tool. ## Introduction This chart bootstraps a SonarQube instance with a PostgreSQL database. ## Prerequisites - Kubernetes 1.10+ ## Installing the chart To install the chart: ```bash helm repo add oteemocharts https://oteemo.github.io/charts helm install oteemocharts/sonarqube ``` The above command deploys Sonarqube on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. For OpenShift installations; if you wish for the chart to create the required SCC for the privileged initContainer, and run PostgreSQL under the restricted SCC use the following `set` statements: ```bash helm repo add oteemocharts https://oteemo.github.io/charts helm install oteemocharts/sonarqube --set OpenShift.enabled=true,\ serviceAccount.create=true,\ postgresql.serviceAccount.enabled=true,\ postgresql.securityContext.enabled=false,\ postgresql.volumePermissions.enabled=true,\ postgresql.volumePermissions.securityContext.runAsUser="auto" ``` The default login is admin/admin. ## Uninstalling the chart To uninstall/delete the deployment: ```bash $ helm list NAME REVISION UPDATED STATUS CHART NAMESPACE kindly-newt 1 Mon Oct 2 15:05:44 2017 DEPLOYED sonarqube-0.1.0 default $ helm delete kindly-newt ``` ## Ingress ### Path Some cloud may need the path to be `/*` instead of `/.` Try this first if you are having issues getting traffic through the ingress. ### Default Backend if you use GCP as a cloud provider you need to set a default backend to avoid useless default backend created by the gce controller. To add this default backend you must set "ingress.class" annotation with "gce" or "gce-internal" value. Example: ```yaml --- ingress: enabled: true hosts: - name: sonarqube.example.com path: "/*" annotations: kubernetes.io/ingress.class: "gce-internal" kubernetes.io/ingress.allow-http: "false" ``` ## Configuration The following table lists the configurable parameters of the Sonarqube chart and their default values. | Parameter | Description | Default | | -------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------- | | `replicaCount` | Number of replicas deployed | `1` | | `deploymentStrategy` | Deployment strategy | `{}` | | `priorityClassName` | Schedule pods on priority (commented out) | `"high-priority"` | | `schedulerName` | Kubernetes scheduler name | None | | `OpenShift.enabled` | Define if this deployment is for OpenShift | `false` | | `OpenShift.createSCC` | If this deployment is for OpenShift, define if SCC should be created for sonarqube pod | `true` | | `image.repository` | image repository | `sonarqube` | | `image.tag` | `sonarqube` image tag. | `8.9.7-community` | | `image.pullPolicy` | Image pull policy | `IfNotPresent` | | `image.pullSecret` | imagePullSecret to use for private repository (commented out) | `my-repo-secret` | | `securityContext.fsGroup` | Group applied to mounted directories/files | `1000` | | `containerSecurityContext.runAsUser` | User to run containers in sonarqube pod as, unless overwritten (such as for init-sysctl container) | `1000` | | `elasticsearch.configureNode` | [DEPRECATED] Use initSysctl.enabled instead. | `true` | | `elasticsearch.bootstrapChecks` | Enables/disables Elasticsearch bootstrap checks | `true` | | `service.type` | Kubernetes service type | `ClusterIP` | | `service.externalPort` | Kubernetes service port | `9000` | | `service.internalPort` | Kubernetes container port | `9000` | | `service.labels` | Kubernetes service labels | None | | `service.annotations` | Kubernetes service annotations | None | | `service.loadBalancerSourceRanges` | Kubernetes service LB Allowed inbound IP addresses | None | | `service.loadBalancerIP` | Kubernetes service LB Optional fixed external IP | None | | `service.nodePort` | Kubernetes service port of exposed on nodePort or LD | None | | `ingress.enabled` | Flag for enabling ingress | false | | `ingress.labels` | Ingress additional labels | `{}` | | `ingress.hosts[0].name` | Hostname to your SonarQube installation | `sonar.organization.com` | | `ingress.hosts[0].path` | Path within the URL structure | / | | `ingress.hosts[0].pathType` | [URL match type](https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types) | Prefix | | `ingress.hosts[0].serviceName` | Optional field to override the default serviceName of a path | None | | `ingress.hosts[0].servicePort` | Optional field to override the default servicePort of a path | None | | `ingress.tls` | Ingress secrets for TLS certificates | `[]` | | `affinity` | Node / Pod affinities | `{}` | | `tolerations` | List of node taints to tolerate | `[]` | | `nodeSelector` | Node labels for pod assignment | `{}` | | `hostAliases` | Aliases for IPs in /etc/hosts | `[]` | | `readinessProbe.initialDelaySecond` | ReadinessProbe initial delay for SonarQube checking | `60` | | `readinessProbe.periodSeconds` | ReadinessProbe period between checking SonarQube | `30` | | `readinessProbe.failureThreshold` | ReadinessProbe thresold for marking as failed | `6` | | `readinessProbe.sonarWebContext` | SonarQube web context for readinessProbe | / | | `livenessProbe.initialDelaySecond` | LivenessProbe initial delay for SonarQube checking | `60` | | `livenessProbe.periodSeconds` | LivenessProbe period between checking SonarQube | `30` | | `livenessProbe.sonarWebContext` | SonarQube web context for livenessProbe | / | | `initContainers.image` | Change init container image | `busybox:1.32` | | `initContainers.securityContext` | SecurityContext for init containers | `nil` | | `initContainers.resources` | Resources for init containers | `{}` | | `extraInitContainers` | Extra init containers to e.g. download required artifacts | `{}` | | `extraContainers` | Extra containers to run alongside sonarqube container | `{}` | | `caCerts.image` | Change init CA certificates container image | `adoptopenjdk/openjdk11:alpine` | | `caCerts.secret` | Name of the secret containing additional CA certificates | `nil` | | `initSysctl.enabled` | Modify k8s worker to conform to system requirements | `true` | | `initSysctl.vmMaxMapCount` | Set init sysctl container vm.max_map_count | `524288` | | `initSysctl.fsFileMax` | Set init sysctl container fs.file-max | `131072` | | `initSysctl.nofile` | Set init sysctl container open file descriptors limit | `131072` | | `initSysctl.nproc` | Set init sysctl container open threads limit | `8192 ` | | `initSysctl.image` | Change init sysctl container image | `busybox:1.32` | | `initSysctl.securityContext` | InitSysctl container security context | `{privileged: true}` | | `initSysctl.resources` | InitSysctl container resource requests & limits | `{}` | | `plugins.install` | List of plugins to install | `[]` | | `plugins.lib` | Plugins libray | `[]` | | `plugins.resources` | Plugin Pod resource requests & limits | `{}` | | `plugins.httpProxy` | For use behind a corporate proxy when downloading plugins | "" | | `plugins.httpsProxy` | For use behind a corporate proxy when downloading plugins | "" | | `plugins.noProxy` | For use behind a corporate proxy when downloading plugins | "" | | `plugins.image` | Image for plugins container | "" | | `plugins.resources` | Resources for plugins container | "" | | `plugins.netrcCreds` | Name of the secret containing .netrc file to use creds when downloading plugins | "" | | `plugins.noCheckCertificate` | Flag to not check server's certificate when downloading plugins | `false` | | `jvmOpts` | Values to add to SONARQUBE_WEB_JVM_OPTS | `""` | | `env` | Environment variables to attach to the pods | `nil` | | `annotations` | Sonarqube Pod annotations | `{}` | | `resources` | Sonarqube Pod resource requests & limits | `{}` | | `persistence.enabled` | Flag for enabling persistent storage | false | | `persistence.annotations` | Kubernetes pvc annotations | `{}` | | `persistence.existingClaim` | Do not create a new PVC but use this one | None | | `persistence.storageClass` | Storage class to be used | "" | | `persistence.accessMode` | Volumes access mode to be set | `ReadWriteOnce` | | `persistence.size` | Size of the volume | 10Gi | | `persistence.volumes` | Specify extra volumes. Refer to ".spec.volumes" specification | [] | | `persistence.mounts` | Specify extra mounts. Refer to ".spec.containers.volumeMounts" specification | [] | | `emptyDir` | Configuration of resources for `emptyDir` | `{}` | | `sonarProperties` | Custom `sonar.properties` file | None | | `sonarSecretProperties` | Additional `sonar.properties` file to load from a secret | None | | `sonarSecretKey` | Name of existing secret used for settings encryption | None | | `jdbcDatabaseType` | Type of the JDBC Database driver | `postgreql` | | `jdbcUrlOverride` | Overrides default JDBC URL creation | None | | `createPostgresqlSecret` | Set to `true` to create the postgresql secret in Sonarqube chart, instead of postgresql chart | `true` | | `postgresql.enabled` | Set to `false` to use external server | `true` | | `postgresql.existingSecret` | Secret containing the password of the external Postgresql server | `sonar-postgresql` | | `postgresql.secretKey` | | `postgresql-password` | | `postgresql.existingSecretPasswordKey` | Secret Key containing the password of the external Postgresql server | `postgresql-password` | | `postgresql.postgresqlServer` | Hostname of the external Postgresql server | `null` | | `postgresql.postgresqlUsername` | Postgresql database user | `sonarUser` | | `postgresql.postgresqlPassword` | Postgresql database password | `""` | | `postgresql.postgresqlDatabase` | Postgresql database name | `sonarDB` | | `postgresql.service.port` | Postgresql port | `5432` | | `postgresql.resources.requests.memory` | Postgresql memory request | `256Mi` | | `postgresql.resources.requests.cpu` | Postgresql cpu request | `250m` | | `postgresql.resources.limits.memory` | Postgresql memory limit | `2Gi` | | `postgresql.resources.limits.cpu` | Postgresql cpu limit | `2` | | `postgresql.persistence.enabled` | Postgresql persistence en/disabled | `true` | | `postgresql.persistence.accessMode` | Postgresql persistence accessMode | `ReadWriteOnce` | | `postgresql.persistence.size` | Postgresql persistence size | `20Gi` | | `postgresql.persistence.storageClass` | Postgresql persistence storageClass | `""` | | `postgresql.securityContext.enabled` | Postgresql securityContext en/disabled | `true` | | `postgresql.securityContext.fsGroup` | Postgresql securityContext fsGroup | `1001` | | `postgresql.securityContext.runAsUser` | Postgresql securityContext runAsUser | `1001` | | `postgresql.volumePermissions.enabled` | Postgres vol permissions en/disabled | `false` | | `postgresql.volumePermissions.securityContext.runAsUser` | Postgres vol permissions secContext runAsUser | `0` | | `postgresql.shmVolume.chmod.enabled` | Postgresql shared memory vol en/disabled | `false` | | `postgresql.serivceAccount.enabled` | Postgresql service Account creation en/disabled | `false` | | `postgresql.serivceAccount.name` | Postgresql service Account name (commented out) | `""` | | `podLabels` | Map of labels to add to the pods | `{}` | | `sonarqubeFolder` | Directory name of Sonarqube | `/opt/sonarqube` | | `tests.enabled` | Flag that allows tests to be excluded from generated yaml | true | | `tests.image` | Change init test container image | `dduportal/bats:0.4.0` | | `serviceAccount.create` | If set to true, create a serviceAccount | false | | `serviceAccount.name` | Name of the serviceAccount to create/use | `sonarqube-sonarqube` | | `serviceAccount.annotations` | Additional serviceAccount annotations | `{}` | | `extraConfig.secrets` | A list of `Secret`s (which must contain key/value pairs) which may be loaded into the Scanner as environment variables | `[]` | | `extraConfig.configmaps` | A list of `ConfigMap`s (which must contain key/value pairs) which may be loaded into the Scanner as environment variables | `[]` | | `account.adminPassword` | Custom admin password | `"admin"` | | `account.currentAdminPassword` | Current admin password | `"admin"` | | `curlContainerImage` | Curl container image | `"curlimages/curl:latest"` | | `adminJobAnnotations` | Custom annotations for admin hook Job | `{}` | | `terminationGracePeriodSeconds` | Configuration of `terminationGracePeriodSeconds` | `60` | You can also configure values for the PostgreSQL database via the Postgresql [Chart](https://hub.helm.sh/charts/bitnami/postgresql) For overriding variables see: [Customizing the chart](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing) ### Use custom `cacerts` In environments with air-gapped setup, especially with internal tooling (repos) and self-signed certificates it is required to provide an adequate `cacerts` which overrides the default one: 1. Create a yaml file `cacerts.yaml` with a secret that contains one or more keys to represent the certificates that you want including ```yaml apiVersion: v1 kind: Secret metadata: name: my-cacerts data: cert-1.crt: | xxxxxxxxxxxxxxxxxxxxxxx ``` 2. Upload your `cacerts.yaml` to a secret in the cluster you are installing Sonarqube to. ```shell kubectl apply -f cacerts.yaml ``` 3. Set the following values of the chart: ```yaml caCerts: enabled: true secret: my-cacerts ``` ### Elasticsearch Settings Since SonarQube comes bundled with an Elasticsearch instance, some [bootstrap checks](https://www.elastic.co/guide/en/elasticsearch/reference/master/bootstrap-checks.html) of the host settings are done at start. This chart offers the option to use an initContainer in privilaged mode to automatically set certain kernel settings on the kube worker. While this can ensure proper functionality of Elasticsearch, modifying the underlying kernel settings on the Kubernetes node can impact other users. It may be best to work with your cluster administrator to either provide specific nodes with the proper kernel settings, or ensure they are set cluster wide. To enable auto-configuration of the kube worker node, set `elasticsearch.configureNode` to `true`. This is the default behavior, so you do not need to explicitly set this. This will run `sysctl -w vm.max_map_count=262144` on the worker where the sonarqube pod(s) get scheduled. This needs to be set to `262144` but normally defaults to `65530`. Other kernel settings are recommended by the [docker image](https://hub.docker.com/_/sonarqube/#requirements), but the defaults work fine in most cases. To disable worker node configuration, set `elasticsearch.configureNode` to `false`. Note that if node configuration is not enabled, then you will likely need to also disable the Elasticsearch bootstrap checks. These can be explicitly disabled by setting `elasticsearch.bootstrapChecks` to `false`. ### As of 7.9 Mysql is no longer supported, so it has been removed from the chart ### Extra Config For environments where another tool, such as terraform or ansible, is used to provision infrastructure or passwords then setting databases addresses and credentials via helm becomes less than ideal. Ditto for environments where this config may be visible. In such environments, configuration may be read, via environment variables, from Secrets and ConfigMaps. 1. Create a `ConfigMap` (or `Secret`) containing key/value pairs, as expected by Sonarqube ```yaml apiVersion: v1 kind: ConfigMap metadata: name: external-sonarqube-opts data: SONARQUBE_JDBC_USERNAME: foo SONARQUBE_JDBC_URL: jdbc:postgresql://db.example.com:5432/sonar ``` 2. Set the following in your `values.yaml` (using the key `extraConfig.secrets` to reference `Secret`s) ```yaml extraConfig: configmaps: - external-sonarqube-opts ``` ================================================ FILE: charts/sonarqube/templates/NOTES.txt ================================================ 1. Get the application URL by running these commands: {{- if .Values.ingress.enabled }} {{- range .Values.ingress.hosts }} http://{{ .name }} {{- end }} {{- else if contains "NodePort" .Values.service.type }} export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "sonarqube.fullname" . }}) export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") echo http://$NODE_IP:$NODE_PORT {{- else if contains "LoadBalancer" .Values.service.type }} NOTE: It may take a few minutes for the LoadBalancer IP to be available. You can watch the status of by running 'kubectl get svc -w {{ template "sonarqube.fullname" . }}' export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "sonarqube.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') echo http://$SERVICE_IP:{{ .Values.service.externalPort }} {{- else if contains "ClusterIP" .Values.service.type }} export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "sonarqube.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") echo "Visit http://127.0.0.1:8080 to use your application" kubectl port-forward $POD_NAME 8080:{{ .Values.service.externalPort }} -n {{ .Release.Namespace }} {{- end }} ================================================ FILE: charts/sonarqube/templates/_helpers.tpl ================================================ {{/* vim: set filetype=mustache: */}} {{/* Expand the name of the chart. */}} {{- define "sonarqube.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). */}} {{- define "sonarqube.fullname" -}} {{- if .Values.fullnameOverride -}} {{- .Values.fullnameOverride | trunc 63 -}} {{- else -}} {{- printf "%s-%s" .Release.Name (include "sonarqube.name" .) | trunc 63 | trimSuffix "-" -}} {{- end -}} {{- end -}} {{/* Create a default fully qualified mysql/postgresql name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). */}} {{- define "postgresql.fullname" -}} {{- printf "%s-%s" .Release.Name "postgresql" | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Determine the hostname to use for PostgreSQL/mySQL. */}} {{- define "postgresql.hostname" -}} {{- if .Values.postgresql.enabled -}} {{- printf "%s-%s" .Release.Name "postgresql" | trunc 63 | trimSuffix "-" -}} {{- else -}} {{- printf "%s" .Values.postgresql.postgresqlServer -}} {{- end -}} {{- end -}} ================================================ FILE: charts/sonarqube/templates/change-admin-password-hook.yml ================================================ {{- if .Values.account }} {{- if .Values.account.adminPassword }} apiVersion: batch/v1 kind: Job metadata: name: {{ template "sonarqube.fullname" . }}-change-admin-password-hook labels: app: {{ template "sonarqube.name" . }} heritage: {{ .Release.Service }} release: {{ .Release.Name }} helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" {{- range $key, $value := .Values.service.labels }} {{ $key }}: {{ $value | quote }} {{- end }} annotations: "helm.sh/hook": post-install "helm.sh/hook-delete-policy": hook-succeeded {{- range $key, $value := .Values.adminJobAnnotations }} {{ $key }}: {{ $value | quote }} {{- end }} spec: template: metadata: name: {{ template "sonarqube.fullname" . }}-change-admin-password-hook labels: app: {{ template "sonarqube.name" . }} heritage: {{ .Release.Service }} release: {{ .Release.Name }} {{- range $key, $value := .Values.service.labels }} {{ $key }}: {{ $value | quote }} {{- end }} spec: restartPolicy: OnFailure containers: - name: {{ template "sonarqube.fullname" . }}-change-default-admin-password image: {{ default "curlimages/curl:latest" .Values.curlContainerImage }} command: ["sh", "-c", 'until curl -v --connect-timeout 100 {{ template "sonarqube.fullname" . }}:{{ default 9000 .Values.service.internalPort }}/api/system/status | grep -w UP; do sleep 10; done; curl --connect-timeout 100 -u admin:{{ default "admin" .Values.account.currentAdminPassword }} -X POST "{{ template "sonarqube.fullname" . }}:{{ default 9000 .Values.service.internalPort }}/api/users/change_password?login=admin&previousPassword={{ .Values.account.currentAdminPassword | default "admin" | urlquery }}&password={{ .Values.account.adminPassword | default "admin" | urlquery }}"'] {{- end }} {{- end }} ================================================ FILE: charts/sonarqube/templates/config.yaml ================================================ apiVersion: v1 kind: ConfigMap metadata: name: {{ template "sonarqube.fullname" . }}-config labels: app: {{ template "sonarqube.name" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} data: {{- if and .Values.sonarSecretKey (not .Values.sonarProperties) (not .Values.elasticsearch.bootstrapChecks) }} sonar.properties: sonar.secretKeyPath={{ .Values.sonarqubeFolder }}/secret/sonar-secret.txt {{- end }} {{- if or .Values.sonarProperties (not .Values.elasticsearch.bootstrapChecks) }} sonar.properties: {{ range $key, $val := .Values.sonarProperties }} {{ $key }}={{ $val }} {{ end }} {{- if not .Values.elasticsearch.bootstrapChecks }} sonar.es.bootstrap.checks.disable=true {{- end }} {{- end }} {{- if and .Values.sonarSecretKey .Values.sonarProperties }} sonar.secretKeyPath={{ .Values.sonarqubeFolder }}/secret/sonar-secret.txt {{- end }} ================================================ FILE: charts/sonarqube/templates/deployment.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: name: {{ template "sonarqube.fullname" . }} labels: app: {{ template "sonarqube.name" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} app.kubernetes.io/name: {{ template "sonarqube.name" . }}-{{ template "sonarqube.fullname" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/part-of: sonarqube app.kubernetes.io/component: {{ template "sonarqube.fullname" . }} app.kubernetes.io/version: {{ .Values.image.tag | quote }} spec: replicas: {{ .Values.replicaCount }} selector: matchLabels: app: {{ template "sonarqube.name" . }} release: {{ .Release.Name }} {{- if .Values.deploymentStrategy }} strategy: {{ toYaml .Values.deploymentStrategy | indent 4 }} {{- end }} template: metadata: labels: app: {{ template "sonarqube.name" . }} release: {{ .Release.Name }} {{- with .Values.podLabels }} {{ toYaml . | indent 8 }} {{- end }} annotations: checksum/init-sysctl: {{ include (print $.Template.BasePath "/init-sysctl.yaml") . | sha256sum }} checksum/plugins: {{ include (print $.Template.BasePath "/install-plugins.yaml") . | sha256sum }} checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }} checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} {{- if .Values.annotations}} {{- range $key, $value := .Values.annotations }} {{ $key }}: {{ $value | quote }} {{- end }} {{- end }} spec: {{- if .Values.schedulerName }} schedulerName: {{ .Values.schedulerName }} {{- end }} {{- if .Values.serviceAccount.create }} {{- if .Values.serviceAccount.name }} serviceAccountName: {{ .Values.serviceAccount.name }} {{- else }} serviceAccountName: {{ include "sonarqube.fullname" . }} {{- end }} {{- end }} securityContext: {{ toYaml .Values.securityContext | indent 8 }} {{- if .Values.image.pullSecret }} imagePullSecrets: - name: {{ .Values.image.pullSecret }} {{- end }} initContainers: {{- if .Values.extraInitContainers }} {{ toYaml .Values.extraInitContainers | indent 8 }} {{- end }} {{- if .Values.caCerts }} - name: ca-certs image: {{ default "adoptopenjdk/openjdk11:alpine" .Values.caCerts.image }} imagePullPolicy: {{ .Values.image.pullPolicy }} command: ["sh"] args: ["-c", "cp -f \"${JAVA_HOME}/lib/security/cacerts\" /tmp/certs/cacerts; if [ \"$(ls /tmp/secrets/ca-certs)\" ]; then for f in /tmp/secrets/ca-certs/*; do keytool -importcert -file \"${f}\" -alias \"$(basename \"${f}\")\" -keystore /tmp/certs/cacerts -storepass changeit -trustcacerts -noprompt; done; fi;"] {{- if $securityContext := .Values.initContainers.securityContext }} securityContext: {{ toYaml $securityContext | indent 12 }} {{- end }} resources: {{ toYaml .Values.initContainers.resources | indent 12 }} volumeMounts: - mountPath: /tmp/certs name: sonarqube subPath: certs - mountPath: /tmp/secrets/ca-certs name: ca-certs {{- with .Values.env }} env: {{- . | toYaml | trim | nindent 12 }} {{- end }} {{- end }} {{- if or .Values.initSysctl.enabled .Values.elasticsearch.configureNode }} - name: init-sysctl image: {{ default "busybox:1.32" .Values.initSysctl.image }} imagePullPolicy: {{ .Values.image.pullPolicy }} {{- if $securityContext := (default .Values.initContainers.securityContext .Values.initSysctl.securityContext) }} securityContext: {{ toYaml $securityContext | indent 12 }} {{- end }} resources: {{ toYaml (default .Values.initContainers.resources .Values.initSysctl.resources) | indent 12 }} command: ["sh", "-e", "/tmp/scripts/init_sysctl.sh"] volumeMounts: - name: init-sysctl mountPath: /tmp/scripts/ {{- with .Values.env }} env: {{- . | toYaml | trim | nindent 12 }} {{- end }} {{- end }} {{- if .Values.plugins.install }} - name: install-plugins image: {{ default "rjkernick/alpine-wget:latest" .Values.plugins.image }} imagePullPolicy: {{ .Values.image.pullPolicy }} command: ["sh", "-e", "/tmp/scripts/install_plugins.sh"] volumeMounts: - mountPath: {{ .Values.sonarqubeFolder }}/extensions/downloads name: sonarqube subPath: extensions/downloads - mountPath: {{ .Values.sonarqubeFolder }}/lib/common name: sonarqube subPath: lib/common - name: install-plugins mountPath: /tmp/scripts/ {{- if .Values.plugins.netrcCreds }} - name: plugins-netrc-file mountPath: /root {{- end }} {{- if $securityContext := .Values.initContainers.securityContext }} securityContext: {{ toYaml $securityContext | indent 12 }} {{- end }} resources: {{ toYaml (default .Values.initContainers.resources .Values.plugins.resource) | indent 12 }} {{- with .Values.env }} env: {{- . | toYaml | trim | nindent 12 }} {{- end }} {{- end }} {{- if and .Values.sonarProperties .Values.sonarSecretProperties }} - name: concat-properties image: {{ default "busybox:1.32" .Values.initContainers.image }} imagePullPolicy: {{ .Values.image.pullPolicy }} command: ["sh", "-c", "awk 1 /tmp/props/sonar.properties /tmp/props/secret.properties > /tmp/result/sonar.properties"] volumeMounts: - mountPath: /tmp/props/sonar.properties name: config subPath: sonar.properties - mountPath: /tmp/props/secret.properties name: secret-config subPath: secret.properties - mountPath: /tmp/result name: concat-dir {{- if $securityContext := .Values.initContainers.securityContext }} securityContext: {{ toYaml $securityContext | indent 12 }} {{- end }} resources: {{ toYaml .Values.initContainers.resources | indent 12 }} {{- with .Values.env }} env: {{- . | toYaml | trim | nindent 12 }} {{- end }} {{- end }} {{- if .Values.postgresql.enabled }} - name: "wait-for-db" image: {{ default "busybox:1.32" .Values.initContainers.image }} imagePullPolicy: {{ .Values.image.pullPolicy }} {{- if $securityContext := .Values.initContainers.securityContext }} securityContext: {{ toYaml $securityContext | indent 12 }} {{- end }} resources: {{ toYaml .Values.initContainers.resources | indent 12 }} command: ["/bin/sh", "-c", "for i in $(seq 1 200); do nc -z -w3 {{ .Release.Name}}-postgresql 5432 && exit 0 || sleep 2; done; exit 1"] {{- end }} {{- if .Values.priorityClassName }} priorityClassName: {{ .Values.priorityClassName }} {{- end }} {{- if .Values.nodeSelector }} nodeSelector: {{ toYaml .Values.nodeSelector | indent 8 }} {{- end }} {{- if .Values.hostAliases }} hostAliases: {{ toYaml .Values.hostAliases | indent 8 }} {{- end }} {{- if .Values.tolerations }} tolerations: {{ toYaml .Values.tolerations | indent 8 }} {{- end }} {{- if .Values.affinity }} affinity: {{ toYaml .Values.affinity | indent 8 }} {{- end }} containers: {{- if .Values.extraContainers }} {{- toYaml .Values.extraContainers | nindent 8 }} {{- end }} - name: {{ .Chart.Name }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - name: http containerPort: {{ .Values.service.internalPort }} protocol: TCP env: - name: SONAR_WEB_JAVAOPTS {{- if .Values.caCerts }} value: {{ printf "-Djavax.net.ssl.trustStore=%s/certs/cacerts %s" .Values.sonarqubeFolder .Values.jvmOpts | trim | quote }} {{- else }} value: "{{ .Values.jvmOpts }}" {{- end }} {{- if .Values.caCerts }} - name: SONAR_CE_JAVAOPTS value: {{ printf "-Djavax.net.ssl.trustStore=%s/certs/cacerts" .Values.sonarqubeFolder | trim | quote }} {{- end }} - name: SONAR_JDBC_PASSWORD valueFrom: secretKeyRef: name: {{- if .Values.postgresql.existingSecret }} {{ .Values.postgresql.existingSecret }} {{ else }} {{ .Release.Name }}-postgresql {{- end }} key: {{ .Values.postgresql.secretKey }} {{- with .Values.env }} {{- . | toYaml | trim | nindent 12 }} {{- end }} envFrom: - configMapRef: name: {{ template "sonarqube.fullname" . }}-postgres-config {{- range .Values.extraConfig.secrets }} - secretRef: name: {{ . }} {{- end }} {{- range .Values.extraConfig.configmaps }} - configMapRef: name: {{ . }} {{- end }} livenessProbe: httpGet: path: {{ .Values.livenessProbe.sonarWebContext }}api/system/status port: http initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.livenessProbe.periodSeconds }} readinessProbe: httpGet: path: {{ .Values.readinessProbe.sonarWebContext }}api/system/status port: http initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.readinessProbe.periodSeconds }} failureThreshold: {{ .Values.readinessProbe.failureThreshold }} {{- if .Values.containerSecurityContext }} securityContext: {{- toYaml .Values.containerSecurityContext | nindent 12 }} {{- end }} volumeMounts: {{- if .Values.persistence.mounts }} {{ toYaml .Values.persistence.mounts | indent 12 }} {{- end }} {{- if and .Values.sonarProperties .Values.sonarSecretProperties }} - mountPath: {{ .Values.sonarqubeFolder }}/conf/ name: concat-dir {{- else if or .Values.sonarProperties (not .Values.elasticsearch.bootstrapChecks) }} - mountPath: {{ .Values.sonarqubeFolder }}/conf/ name: config {{- end }} {{- if .Values.sonarSecretKey }} - mountPath: {{ .Values.sonarqubeFolder }}/secret/ name: secret {{- end }} {{- if .Values.caCerts }} - mountPath: {{ .Values.sonarqubeFolder }}/certs name: sonarqube subPath: certs {{- end }} - mountPath: {{ .Values.sonarqubeFolder }}/data name: sonarqube subPath: data {{- if .Values.persistence.enabled }} - mountPath: {{ .Values.sonarqubeFolder }}/extensions name: sonarqube subPath: extensions {{- else if .Values.plugins.install }} - mountPath: {{ .Values.sonarqubeFolder }}/extensions/downloads name: sonarqube subPath: extensions/downloads {{- end }} {{- if .Values.plugins.lib }} {{- range $index, $val := .Values.plugins.lib }} - mountPath: {{ $.Values.sonarqubeFolder }}/lib/common/{{ $val }} name: sonarqube subPath: lib/common/{{ $val }} {{- end }} {{- end }} - mountPath: {{ .Values.sonarqubeFolder }}/temp name: sonarqube subPath: temp - mountPath: {{ .Values.sonarqubeFolder }}/logs name: sonarqube subPath: logs - mountPath: /tmp name: tmp-dir resources: {{ toYaml .Values.resources | indent 12 }} {{- if .Values.nodeSelector }} nodeSelector: {{ toYaml .Values.nodeSelector | indent 8 }} {{- end }} {{- if .Values.tolerations }} tolerations: {{ toYaml .Values.tolerations | indent 8 }} {{- end }} {{- if .Values.affinity }} affinity: {{ toYaml .Values.affinity | indent 8 }} {{- end }} volumes: {{- if .Values.persistence.volumes }} {{ tpl (toYaml .Values.persistence.volumes | indent 6) . }} {{- end }} {{- if or .Values.sonarProperties (not .Values.elasticsearch.bootstrapChecks) }} - name: config configMap: name: {{ template "sonarqube.fullname" . }}-config items: - key: sonar.properties path: sonar.properties {{- end }} {{- if .Values.sonarSecretProperties }} - name: secret-config secret: secretName: {{ .Values.sonarSecretProperties }} items: - key: secret.properties path: secret.properties {{- end }} {{- if .Values.sonarSecretKey }} - name: secret secret: secretName: {{ .Values.sonarSecretKey }} items: - key: sonar-secret.txt path: sonar-secret.txt {{- end }} {{- if .Values.caCerts }} - name: ca-certs secret: secretName: {{ .Values.caCerts.secret }} {{- end }} {{- if .Values.plugins.netrcCreds }} - name: plugins-netrc-file secret: secretName: {{ .Values.plugins.netrcCreds }} items: - key: netrc path: .netrc {{- end }} - name: init-sysctl configMap: name: {{ template "sonarqube.fullname" . }}-init-sysctl items: - key: init_sysctl.sh path: init_sysctl.sh - name: install-plugins configMap: name: {{ template "sonarqube.fullname" . }}-install-plugins items: - key: install_plugins.sh path: install_plugins.sh - name: sonarqube {{- if .Values.persistence.enabled }} persistentVolumeClaim: claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "sonarqube.fullname" . }}{{- end }} {{- else }} emptyDir: {{- toYaml .Values.emptyDir | nindent 10 }} {{- end }} - name : tmp-dir emptyDir: {{- toYaml .Values.emptyDir | nindent 10 }} {{- if .Values.sonarSecretProperties }} - name : concat-dir emptyDir: {{- toYaml .Values.emptyDir | nindent 10 -}} {{- end }} ================================================ FILE: charts/sonarqube/templates/ingress.yaml ================================================ {{- if .Values.ingress.enabled -}} {{- $serviceName := include "sonarqube.fullname" . -}} {{- $servicePort := .Values.service.externalPort -}} {{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1" -}} apiVersion: networking.k8s.io/v1 {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} apiVersion: networking.k8s.io/v1beta1 {{- else -}} apiVersion: extensions/v1beta1 {{- end }} kind: Ingress metadata: name: {{ template "sonarqube.fullname" . }} labels: app: {{ template "sonarqube.name" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} {{- if .Values.ingress.labels }} {{ .Values.ingress.labels | toYaml | trimSuffix "\n"| indent 4 -}} {{- end}} {{- if .Values.ingress.annotations}} annotations: {{- range $key, $value := .Values.ingress.annotations }} {{ $key }}: {{ $value | quote }} {{- end }} {{- end }} spec: {{- if .Values.ingress.className }} ingressClassName: {{ .Values.ingress.className }} {{- end }} {{- if .Values.ingress.annotations }} {{- range $key, $value := .Values.ingress.annotations }} {{- if and (eq $key "kubernetes.io/ingress.class") (contains (toString $value) "gce") }} backend: serviceName: {{ default $serviceName .serviceName }} servicePort: {{ default $servicePort .servicePort }} {{- end }} {{- end }} {{- end }} rules: {{- range .Values.ingress.hosts }} - host: {{ .name }} http: paths: - path: {{ .path }} {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }} pathType: {{ .pathType | default "Prefix" }} {{- end }} backend: {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }} service: name: {{ default $serviceName .serviceName }} port: number: {{ default $servicePort .servicePort }} {{- else }} serviceName: {{ default $serviceName .serviceName }} servicePort: {{ default $servicePort .servicePort }} {{- end }} {{- end -}} {{- if .Values.ingress.tls }} tls: {{ toYaml .Values.ingress.tls | indent 4 }} {{- end -}} {{- end -}} ================================================ FILE: charts/sonarqube/templates/init-sysctl.yaml ================================================ apiVersion: v1 kind: ConfigMap metadata: name: {{ template "sonarqube.fullname" . }}-init-sysctl labels: app: {{ template "sonarqube.name" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} data: init_sysctl.sh: |- {{- if .Values.initSysctl.vmMaxMapCount }} if [[ "$(sysctl -n vm.max_map_count)" -lt {{ .Values.initSysctl.vmMaxMapCount }} ]]; then sysctl -w vm.max_map_count={{ .Values.initSysctl.vmMaxMapCount }} fi {{- end }} {{- if .Values.initSysctl.fsFileMax }} if [[ "$(sysctl -n fs.file-max)" -lt {{ .Values.initSysctl.fsFileMax }} ]]; then sysctl -w fs.file-max={{ .Values.initSysctl.fsFileMax }} fi {{- end }} {{- if .Values.initSysctl.nofile }} if [[ "$(ulimit -n)" != "unlimited" ]]; then if [[ "$(ulimit -n)" -lt {{ .Values.initSysctl.nofile }} ]]; then echo "ulimit -n {{ .Values.initSysctl.nofile }}" ulimit -n {{ .Values.initSysctl.nofile }} fi fi {{- end }} {{- if .Values.initSysctl.nproc }} if [[ "$(ulimit -u)" != "unlimited" ]]; then if [[ "$(ulimit -u)" -lt {{ .Values.initSysctl.nproc }} ]]; then echo "ulimit -u {{ .Values.initSysctl.nproc }}" ulimit -u {{ .Values.initSysctl.nproc }} fi fi {{- end }} ================================================ FILE: charts/sonarqube/templates/install-plugins.yaml ================================================ apiVersion: v1 kind: ConfigMap metadata: name: {{ template "sonarqube.fullname" . }}-install-plugins labels: app: {{ template "sonarqube.name" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} data: install_plugins.sh: |- {{- if .Values.plugins.httpProxy }} export http_proxy={{ .Values.plugins.httpProxy }} {{- end }} {{- if .Values.plugins.httpsProxy }} export https_proxy={{ .Values.plugins.httpsProxy }} {{- end }} {{- if .Values.plugins.noProxy }} export no_proxy={{ .Values.plugins.noProxy }} {{- end }} {{- if .Values.plugins.install }} [ -e {{ .Values.sonarqubeFolder }}/extensions/downloads/* ] && rm {{ .Values.sonarqubeFolder }}/extensions/downloads/* {{ range $index, $val := .Values.plugins.install }} echo {{ $val | quote }} >> {{ $.Values.sonarqubeFolder }}/extensions/downloads/list{{ end }} cat {{ .Values.sonarqubeFolder }}/extensions/downloads/list | xargs -n 1 -P 8 wget --directory-prefix {{ .Values.sonarqubeFolder }}/extensions/downloads --no-verbose{{ if .Values.plugins.noCheckCertificate }} --no-check-certificate{{ end }} rm {{ .Values.sonarqubeFolder }}/extensions/downloads/list {{- end }} {{- if .Values.plugins.lib }} {{- range $index, $val := .Values.plugins.lib }} cp -f {{ $.Values.sonarqubeFolder }}/extensions/downloads/{{ $val }} {{ $.Values.sonarqubeFolder }}/lib/common/{{ $val }} {{- end }} {{- end }} ================================================ FILE: charts/sonarqube/templates/postgres-config.yaml ================================================ apiVersion: v1 kind: ConfigMap metadata: name: {{ template "sonarqube.fullname" . }}-postgres-config labels: app: {{ template "sonarqube.name" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} data: {{- if .Values.postgresql.postgresqlUsername }} SONAR_JDBC_USERNAME: {{ .Values.postgresql.postgresqlUsername | quote }} {{- end }} {{- if .Values.jdbcUrlOverride }} SONAR_JDBC_URL: "{{- .Values.jdbcUrlOverride -}}" {{- else if and .Values.postgresql.service.port .Values.postgresql.postgresqlDatabase }} SONAR_JDBC_URL: "jdbc:{{- .Values.jdbcDatabaseType -}}://{{ template "postgresql.hostname" . }}:{{- .Values.postgresql.service.port -}}/{{- .Values.postgresql.postgresqlDatabase -}}" {{- end }} ================================================ FILE: charts/sonarqube/templates/pvc.yaml ================================================ {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} kind: PersistentVolumeClaim apiVersion: v1 metadata: name: {{ template "sonarqube.fullname" . }} labels: app: {{ template "sonarqube.name" . }} chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" {{ if .Values.persistence.annotations}} annotations: {{- range $key, $value := .Values.persistence.annotations }} {{ $key }}: {{ $value | quote }} {{- end }} {{- end }} spec: accessModes: - {{ .Values.persistence.accessMode | quote }} resources: requests: storage: {{ .Values.persistence.size | quote }} {{- if .Values.persistence.storageClass }} {{- if (eq "-" .Values.persistence.storageClass) }} storageClassName: "" {{- else }} storageClassName: "{{ .Values.persistence.storageClass }}" {{- end }} {{- end }} {{- end }} ================================================ FILE: charts/sonarqube/templates/secret.yaml ================================================ {{- if .Values.createPostgresqlSecret -}} {{- $relname := printf "%s-%s" .Release.Name "postgresql" -}} apiVersion: v1 kind: Secret metadata: name: {{- if .Values.postgresql.existingSecret }} {{ .Values.postgresql.existingSecret }} {{ else }} {{ $relname }} {{- end }} labels: app: {{ template "sonarqube.name" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} type: Opaque data: {{- if .Release.IsUpgrade }} # check to see if secret already exists in namespace. {{- if (index (lookup "v1" "Secret" .Release.Namespace $relname ) ) }} postgresql-password: {{ index (lookup "v1" "Secret" .Release.Namespace $relname ).data "postgresql-password" }} {{- else if (index (lookup "v1" "Secret" .Release.Namespace .Values.postgresql.existingSecret ) ) }} postgresql-password: {{ index (lookup "v1" "Secret" .Release.Namespace .Values.postgresql.existingSecret ).data "postgresql-password" }} {{ else }} # if a secret isn't found when perfroming an upgrade create a new secret. {{- if .Values.postgresql.postgresqlPassword }} postgresql-password: {{ .Values.postgresql.postgresqlPassword | b64enc | quote }} {{- else }} {{- $postgresRandomPassword := randAlphaNum 16 | b64enc | quote }} postgresql-password: {{ $postgresRandomPassword }} {{- end }} {{- end }} {{ else }} # Perform normal install operation {{- if .Values.postgresql.postgresqlPassword }} postgresql-password: {{ .Values.postgresql.postgresqlPassword | b64enc | quote }} {{- else }} {{- $postgresRandomPassword := randAlphaNum 16 | b64enc | quote }} postgresql-password: {{ $postgresRandomPassword }} {{ end }} {{ end }} {{- end }} ================================================ FILE: charts/sonarqube/templates/service.yaml ================================================ apiVersion: v1 kind: Service metadata: name: {{ template "sonarqube.fullname" . }} labels: app: {{ template "sonarqube.name" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} {{- range $key, $value := .Values.service.labels }} {{ $key }}: {{ $value | quote }} {{- end }} {{ if .Values.service.annotations}} annotations: {{- range $key, $value := .Values.service.annotations }} {{ $key }}: {{ $value | quote }} {{- end }} {{- end }} spec: type: {{ .Values.service.type }} ports: - port: {{ .Values.service.externalPort }} targetPort: http protocol: TCP name: http {{- if .Values.service.nodePort }} nodePort: {{ .Values.service.nodePort }} {{- end }} selector: app: {{ template "sonarqube.name" . }} release: {{ .Release.Name }} {{- if eq .Values.service.type "LoadBalancer"}} {{- if .Values.service.loadBalancerSourceRanges }} loadBalancerSourceRanges: {{- range .Values.service.loadBalancerSourceRanges }} - {{ . }} {{- end }} {{- end -}} {{- if .Values.service.loadBalancerIP}} loadBalancerIP: {{.Values.service.loadBalancerIP}} {{- end }} {{- end }} ================================================ FILE: charts/sonarqube/templates/serviceaccount.yaml ================================================ {{- if .Values.serviceAccount.create -}} --- apiVersion: v1 kind: ServiceAccount metadata: {{- if .Values.serviceAccount.name }} name: {{ .Values.serviceAccount.name }} {{- else }} name: {{ include "sonarqube.fullname" . }} {{- end }} {{- if .Values.serviceAccount.annotations }} annotations: {{ toYaml .Values.serviceAccount.annotations | indent 4 }} {{- end }} {{- end -}} ================================================ FILE: charts/sonarqube/templates/sonarqube-scc.yaml ================================================ {{- if and (.Values.OpenShift.enabled) (.Values.OpenShift.createSCC) }} # This SCC allows any user ID but restricts capabilties and host access apiVersion: security.openshift.io/v1 kind: SecurityContextConstraints metadata: annotations: kubernetes.io/description: "allows pod to run as root, privileged and run sysctl" "helm.sh/hook": pre-install name: {{ .Release.Name }}-privileged-scc allowHostDirVolumePlugin: false allowHostIPC: false allowHostNetwork: false allowHostPID: false allowHostPorts: false allowPrivilegedContainer: true allowPrivilegeEscalation: true allowedCapabilities: [] allowedFlexVolumes: [] allowedUnsafeSysctls: [] defaultAddCapabilities: [] defaultAllowPrivilegeEscalation: true fsGroup: type: RunAsAny readOnlyRootFilesystem: false requiredDropCapabilities: - KILL - MKNOD - SETUID - SETGID runAsUser: type: RunAsAny # This can be customized for your host machine seLinuxContext: type: MustRunAs # seLinuxOptions: # level: # user: # role: # type: supplementalGroups: type: RunAsAny # This can be customized for your host machine volumes: - configMap - downwardAPI - emptyDir - persistentVolumeClaim - projected - secret # If you want a priority on your SCC -- set for a value more than 0 priority: 11 users: {{- if .Values.serviceAccount.name }} - system:serviceaccount:{{ .Release.Namespace }}:{{ .Release.Name }}-{{ .Values.serviceAccount.name }} {{- else }} - system:serviceaccount:{{ .Release.Namespace }}:{{ .Release.Name }}-sonarqube {{- end }} {{- if .Values.postgresql.securityContext.enabled }} - system:serviceaccount:{{ .Release.Namespace }}:{{ .Release.Name }}-postgresql {{- end }} {{- end }} ================================================ FILE: charts/sonarqube/templates/tests/sonarqube-test.yaml ================================================ {{- if .Values.tests.enabled -}} apiVersion: v1 kind: Pod metadata: name: "{{ .Release.Name }}-ui-test" annotations: "helm.sh/hook": test-success labels: app: {{ template "sonarqube.name" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} spec: initContainers: - name: "bats" image: "bats/bats:1.2.1" imagePullPolicy: {{ .Values.image.pullPolicy }} command: ["bash", "-c"] args: - |- set -ex cp -R /opt/bats /tools/bats/ volumeMounts: - mountPath: /tools name: tools containers: - name: {{ .Release.Name }}-ui-test image: {{ default "bitnami/minideb-extras" .Values.tests.image }} imagePullPolicy: {{ .Values.image.pullPolicy }} command: [ "/tools/bats/bin/bats", "--tap", "/tests/run.sh"] volumeMounts: - mountPath: /tests name: tests readOnly: true - mountPath: /tools name: tools volumes: - name: tests configMap: name: {{ template "sonarqube.fullname" . }}-tests - name: tools emptyDir: {{ toYaml .Values.emptyDir | nindent 6 }} restartPolicy: Never {{- end -}} ================================================ FILE: charts/sonarqube/templates/tests/test-config.yaml ================================================ {{- if .Values.tests.enabled -}} apiVersion: v1 kind: ConfigMap metadata: name: {{ template "sonarqube.fullname" . }}-tests labels: app: {{ template "sonarqube.name" . }} chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} data: run.sh: |- @test "Testing Sonarqube UI is accessible" { curl --connect-timeout 5 --retry 12 --retry-delay 1 --retry-max-time 60 {{ template "sonarqube.fullname" . }}:{{ .Values.service.internalPort }}/api/system/status } {{- end -}} ================================================ FILE: charts/sonarqube/values.yaml ================================================ # Default values for sonarqube. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 # This will use the default deployment strategy unless it is overriden deploymentStrategy: {} # Uncomment this to scheduler pods on priority # priorityClassName: "high-priority" ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## # schedulerName: ## Is this deployment for OpenShift? If so, we help with SCCs OpenShift: enabled: false createSCC: true image: repository: sonarqube tag: 8.9.7-community pullPolicy: IfNotPresent # If using a private repository, the name of the imagePullSecret to use # pullSecret: my-repo-secret # Set security context for sonarqube pod securityContext: fsGroup: 1000 # Set security context for sonarqube container containerSecurityContext: # Sonarqube dockerfile creates sonarqube user as UID and GID 1000 runAsUser: 1000 # Settings to configure elasticsearch host requirements elasticsearch: # DEPRECATED: Use initSysctl.enabled instead configureNode: true bootstrapChecks: true service: type: ClusterIP externalPort: 9000 internalPort: 9000 labels: annotations: {} # May be used in example for internal load balancing in GCP: # cloud.google.com/load-balancer-type: Internal # loadBalancerSourceRanges: # - 0.0.0.0/0 # loadBalancerIP: 1.2.3.4 # You can set the port to be exposed when type is NodePort or LoadBalancer. # nodePort: 30000 ingress: enabled: false # Used to create an Ingress record. hosts: - name: sonar.organization.com # Different clouds or configurations might need /* as the default path path: / # Only for API version "networking.k8s.io/v1" # https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types pathType: Prefix # For additional control over serviceName and servicePort # serviceName: someService # servicePort: somePort annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" # This property allows for reports up to a certain size to be uploaded to SonarQube # nginx.ingress.kubernetes.io/proxy-body-size: "8m" # Additional labels for Ingress manifest file # labels: # traffic-type: external # traffic-type: internal tls: [] # Secrets must be manually created in the namespace. # - secretName: chart-example-tls # hosts: # - chart-example.local # className: nginx # Affinity for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity affinity: {} # Tolerations for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ tolerations: [] # Node labels for pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} # hostAliases allows the modification of the hosts file inside a container hostAliases: [] # - ip: "192.168.1.10" # hostnames: # - "example.com" # - "www.example.com" readinessProbe: initialDelaySeconds: 60 periodSeconds: 30 failureThreshold: 6 # If an ingress *path* other than the root (/) is defined, it should be reflected here # A trailing "/" must be included sonarWebContext: / # sonarWebContext: /sonarqube/ livenessProbe: initialDelaySeconds: 60 periodSeconds: 30 # If an ingress *path* other than the root (/) is defined, it should be reflected here # A trailing "/" must be included sonarWebContext: / # sonarWebContext: /sonarqube/ # If an ingress *path* is defined, it should be reflected here # sonar.web.context: /sonarqube initContainers: # image: busybox:1.32 # We allow the init containers to have a separate security context declaration because # the initContainer may not require the same as SonarQube. # securityContext: {} # We allow the init containers to have a separate resources declaration because # the initContainer does not take as much resources. resources: {} extraInitContainers: {} # Extra init containers to e.g. download required artifacts # - name: "prometheus-exporter-downloader" # image: "busybox" # imagePullPolicy: "IfNotPresent" # securityContext: # runAsNonRoot: true # command: # - wget # - "-O" # - "/downloads/jmx_prometheus_javaagent.jar" # - ${JMX_EXPORTER_URL} # volumeMounts: # - name: "downloads" # mountPath: "/downloads" extraContainers: [] ## Array of extra containers to run alongside the sonarqube container ## Example: # - name: "example-container" # image: "busybox" # imagePullPolicy: Always # command: ['sh', '-c', 'tail -f /dev/null'] ## Provide a secret containing one or more certificate files in the keys that will be added to cacerts ## The cacerts file will be set via SONARQUBE_WEB_JVM_OPTS and SONAR_CE_JAVAOPTS ## # caCerts: # image: adoptopenjdk/openjdk11:alpine # secret: my-secret initSysctl: enabled: true vmMaxMapCount: 524288 fsFileMax: 131072 nofile: 131072 nproc: 8192 # image: busybox:1.32 securityContext: privileged: true # resources: {} # List of plugins to install. # For example: # plugins: # install: # - "https://github.com/AmadeusITGroup/sonar-stash/releases/download/1.3.0/sonar-stash-plugin-1.3.0.jar" # - "https://github.com/SonarSource/sonar-ldap/releases/download/2.2-RC3/sonar-ldap-plugin-2.2.0.601.jar" plugins: install: [] lib: [] # For use behind a corporate proxy when downloading plugins # httpProxy: "" # httpsProxy: "" # noProxy: "" # image: rjkernick/alpine-wget:latest # resources: {} # .netrc secret file with a key "netrc" to use basic auth while downloading plugins # netrcCreds: "" # Set to true to not validate the server's certificate to download plugin noCheckCertificate: false ## Values to add to SONARQUBE_WEB_JVM_OPTS ## # jvmOpts: "-Djava.net.preferIPv4Stack=true" jvmOpts: "" ## Environment variables to attach to the pods ## # env: # - name: VARIABLE # value: my-value # Set annotations for pods annotations: {} resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi persistence: enabled: false ## Set annotations on pvc annotations: {} ## Specify an existing volume claim instead of creating a new one. ## When using this option all following options like storageClass, accessMode and size are ignored. # existingClaim: ## If defined, storageClassName: ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## storageClass: accessMode: ReadWriteOnce size: 10Gi ## Specify extra volumes. Refer to ".spec.volumes" specification : https://kubernetes.io/fr/docs/concepts/storage/volumes/ volumes: [] ## Specify extra mounts. Refer to ".spec.containers.volumeMounts" specification : https://kubernetes.io/fr/docs/concepts/storage/volumes/ mounts: [] # In case you want to specify different resources for emptyDir than {} emptyDir: {} # Example of resouces that might be used: # medium: Memory # sizeLimit: 16Mi # A custom sonar.properties file can be provided via dictionary. # For example: # sonarProperties: # sonar.forceAuthentication: true # sonar.security.realm: LDAP # ldap.url: ldaps://organization.com # Additional sonar properties to load from a secret with a key "secret.properties" (must be a string) # sonarSecretProperties: # Kubernetes secret that contains the encryption key for the sonarqube instance. # The secret must contain the key 'sonar-secret.txt'. # The 'sonar.secretKeyPath' property will be set automatically. # sonarSecretKey: "settings-encryption-secret" ## JDBC Database Type; by default postgresql. To use a different Database type, adjust jdbcDatabaseType: postgresql createPostgresqlSecret: true # create the postgresql secret in Sonarqube chart, outside of the postgresql chart. ## Override JDBC URL # jdbcUrlOverride: "jdbc:postgresql://myPostgress/myDatabase;socketTimeout=1500" ## Configuration values for postgresql dependency ## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md postgresql: # Enable to deploy the PostgreSQL chart enabled: true # To use an external PostgreSQL instance, set enabled to false and uncomment # the line below: # postgresqlServer: "" # To use an external secret for the password for an external PostgreSQL # instance, set enabled to false and provide the name of the secret on the # line below: postgresqlUsername: "sonarUser" postgresqlPassword: "" postgresqlDatabase: "sonarDB" existingSecret: sonar-postgresql # This is the full name of the secret that will be created secretKey: postgresql-password # Specify the TCP port that PostgreSQL should use service: port: 5432 resources: limits: cpu: 2 memory: 2Gi requests: cpu: 100m memory: 200Mi persistence: enabled: true accessMode: ReadWriteOnce size: 20Gi storageClass: securityContext: # For standard Kubernetes deployment, set enabled=true # If using OpenShift, enabled=false for restricted SCC and enabled=true for anyuid/nonroot SCC enabled: true # fsGroup and runAsUser specifications below are not applied if enabled=false. enabled=false is the required setting for OpenShift "restricted SCC" to work successfully. # postgresql dockerfile sets user as 1001 fsGroup: 1001 runAsUser: 1001 volumePermissions: # For standard Kubernetes deployment, set enabled=false # For OpenShift, set enabled=true and ensure to set volumepermissions.securitycontext.runAsUser below. enabled: false # if using restricted SCC set runAsUser: "auto" and if running under anyuid/nonroot SCC - runAsUser needs to match runAsUser above securityContext: runAsUser: 0 shmVolume: chmod: enabled: false serviceAccount: ## If enabled = true, and name is not set, postgreSQL will create a serviceAccount enabled: false # name: # Additional labels to add to the pods: # podLabels: # key: value podLabels: {} # For compatibility with 8.0 replace by "/opt/sq" # For compatibility with 8.2, leave the default. They changed it back to /opt/sonarqube sonarqubeFolder: /opt/sonarqube tests: enabled: true # image: bitnami/minideb-extras serviceAccount: create: false # name: ## Annotations for the Service Account annotations: {} # extraConfig is used to load Environment Variables from Secrets and ConfigMaps # which may have been written by other tools, such as external orchestrators. # # These Secrets/ConfigMaps are expected to contain Key/Value pairs, such as: # # apiVersion: v1 # kind: ConfigMap # metadata: # name: external-sonarqube-opts # data: # SONARQUBE_JDBC_USERNAME: foo # SONARQUBE_JDBC_URL: jdbc:postgresql://db.example.com:5432/sonar # # These vars can then be injected into the environment by uncommenting the following: # # extraConfig: # configmaps: # - external-sonarqube-opts extraConfig: secrets: [] configmaps: [] # account: # adminPassword: admin # currentAdminPassword: admin # curlContainerImage: curlimages/curl:latest # adminJobAnnotations: {} terminationGracePeriodSeconds: 60 ================================================ FILE: charts/sonatype-nexus/.helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *~ # Various IDEs .project .idea/ *.tmproj # OWNERS file for Kubernetes OWNERS *.tar ================================================ FILE: charts/sonatype-nexus/Chart.yaml ================================================ apiVersion: v1 name: sonatype-nexus version: 5.5.0 appVersion: 3.38.1 description: Sonatype Nexus is an open source repository manager keywords: - artifacts - dependency - management - sonatype - nexus - repository home: https://www.sonatype.com/nexus-repository-oss icon: https://www.sonatype.com/hubfs/2021%20Design%20System/Logo_vert@2x.png sources: - https://github.com/sonatype/nexus-public - https://github.com/travelaudience/docker-nexus - https://github.com/travelaudience/kubernetes-nexus - https://github.com/travelaudience/docker-nexus-backup - https://github.com/dbccompany/docker-nexus-backup deprecated: true ================================================ FILE: charts/sonatype-nexus/README.md ================================================ # Nexus ***This chart has been deprecated, please use one of the other helm charts below*** ## Alternative Helm Charts * [Sonatype's Chart](https://github.com/sonatype/helm3-charts) * [PlatformOne's chart](https://repo1.dso.mil/platform-one/big-bang/apps/developer-tools/nexus) * [Artifact Hub](https://artifacthub.io) ## Nexus information [Nexus OSS](https://www.sonatype.com/nexus-repository-oss) is a free open source repository manager. It supports a wide range of package formats and it's used by hundreds of tech companies. ## Introduction This chart bootstraps a Nexus OSS deployment on a cluster using Helm. This setup is best configured in [GCP](https://cloud.google.com/) since: - [google cloud storage](https://cloud.google.com/storage/) is used for backups - [NEW: Rclone](https://rclone.org/) it uses Rclone to create backups, basically compatible with all the major clouds. - [GCE Ingress controller](https://github.com/kubernetes/ingress/blob/master/docs/faq/gce.md) is used for using a pre-allocated static IP in GCE. There is also the option of using a [proxy for Nexus](https://github.com/travelaudience/nexus-proxy) that authenticates Nexus against an external identity provider (only GCP IAM at the moment) which is **disabled** by default. ## Prerequisites - Kubernetes 1.15+ with Beta APIs enabled - PV provisioner support in the underlying infrastructure - [Fulfill Nexus kubernetes requirements](https://github.com/travelaudience/kubernetes-nexus#pre-requisites) ### With GCP IAM enabled All the [Prerequisites](#Prerequisites) should be in place, plus: - [Fulfill GCP IAM requirements](https://github.com/travelaudience/kubernetes-nexus/blob/master/docs/admin/configuring-nexus-proxy.md#pre-requisites) ## Testing the Chart To test the chart: ```bash helm install --dry-run --debug ./ ``` To test the chart with your own values: ```bash helm install --dry-run --debug -f my_values.yaml ./ ``` ## Installing the Chart To install the chart: ```bash helm repo add oteemocharts https://oteemo.github.io/charts helm install sonatype-nexus oteemocharts/sonatype-nexus ``` The above command deploys Nexus on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. The default login is admin/admin123 ## Uninstalling the Chart To uninstall/delete the deployment: ```bash $ helm list NAME REVISION UPDATED STATUS CHART NAMESPACE plinking-gopher 1 Fri Sep 1 13:19:50 2017 DEPLOYED sonatype-nexus-0.1.0 default $ helm delete plinking-gopher ``` The command removes all the Kubernetes components associated with the chart and deletes the release. ## Official Nexus image vs TravelAudience There are known issues with backups on the official image. If you want to swap in the official image, just override the values when installing the chart. Please note that backups will not work as expected with the official image. - [https://issues.sonatype.org/browse/NEXUS-23442](https://issues.sonatype.org/browse/NEXUS-23442) - [https://github.com/travelaudience/docker-nexus](https://github.com/travelaudience/docker-nexus) ## Configuration The following table lists the configurable parameters of the Nexus chart and their default values. | Parameter | Description | Default | | ------------------------------------------------------------ | ---------------------------------- | ----------------------------------------| | `namespaceOverride` | Override for namespace | `nil` | | `statefulset.enabled` | Use statefulset instead of deployment | `false` | | `replicaCount` | Number of Nexus service replicas | `1` | | `deploymentStrategy` | Deployment Strategy | `rollingUpdate` | | `initAdminPassword.enabled` | Enable initialization of admin password on Helm install | `false` | | `initAdminPassword.defaultPasswordOverride` | Override the default admin password | `nil` | | `initAdminPassword.password` | Admin password to be set | `admin321` | | `nexus.imageName` | Nexus image | `quay.io/travelaudience/docker-nexus` | | `nexus.imageTag` | Version of Nexus | `3.25.1` | | `nexus.imagePullPolicy` | Nexus image pull policy | `IfNotPresent` | | `nexus.imagePullSecret` | Secret to download Nexus image from private registry | `nil` | | `nexus.env` | Nexus environment variables | `[{install4jAddVmParams: -Xms1200M -Xmx1200M -XX:MaxDirectMemorySize=2G -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap}]` | | `nexus.resources` | Nexus resource requests and limits | `{}` | | `nexus.dockerPort` | Port to access docker | `5003` | | `nexus.nexusPort` | Internal port for Nexus service | `8081` | | `nexus.additionalPorts` | expose additional ports | `[]` | | `nexus.service.type` | Service for Nexus | `NodePort` | | `nexus.service.clusterIp` | Specific cluster IP when service type is cluster IP. Use None for headless service |`nil` | | `nexus.service.loadBalancerIP` | Custom loadBalancerIP |`nil` | | `nexus.securityContextEnabled` | Security Context (for enabling official image use `fsGroup: 200`) | `{}` | | `nexus.labels` | Service labels | `{}` | | `nexus.podAnnotations` | Pod Annotations | `{}` | `nexus.livenessProbe.initialDelaySeconds` | LivenessProbe initial delay | 30 | | `nexus.livenessProbe.periodSeconds` | Seconds between polls | 30 | | `nexus.livenessProbe.failureThreshold` | Number of attempts before failure | 6 | | `nexus.livenessProbe.timeoutSeconds` | Time in seconds after liveness probe times out | `nil` | | `nexus.livenessProbe.path` | Path for LivenessProbe | / | | `nexus.readinessProbe.initialDelaySeconds` | ReadinessProbe initial delay | 30 | | `nexus.readinessProbe.periodSeconds` | Seconds between polls | 30 | | `nexus.readinessProbe.failureThreshold` | Number of attempts before failure | 6 | | `nexus.readinessProbe.timeoutSeconds` | Time in seconds after readiness probe times out | `nil` | | `nexus.readinessProbe.path` | Path for ReadinessProbe | / | | `nexus.startupProbe.initialDelaySeconds` | StartupProbe initial delay | 30 | | `nexus.startupProbe.periodSeconds` | Seconds between polls | 30 | | `nexus.startupProbe.failureThreshold` | Number of attempts before failure | 6 | | `nexus.startupProbe.timeoutSeconds` | Time in seconds after startup probe times out | `nil` | | `nexus.startupProbe.path` | Path for StartupProbe | / | | `nexus.hostAliases` | Aliases for IPs in /etc/hosts | [] | | `nexus.context` | Non-root path to run Nexus at | `nil` | | `nexus.chownNexusData` | Set false to not execute chown to the mounted nexus-data directory at startup | `true` | | `nexus.terminationGracePeriodSeconds` | Let Nexus terminate gracefully [More informations here](#graceful-shutdown-with-terminationGracePeriodSeconds). | `nil` | | `nexusProxy.enabled` | Enable nexus proxy | `true` | | `nexusProxy.svcName` | Nexus proxy service name | `nil` | | `nexusProxy.targetPort` | Container Port for Nexus proxy | `8080` | | `nexusProxy.port` | Port for exposing Nexus | `8080` | | `nexusProxy.imageName` | Proxy image | `quay.io/travelaudience/docker-nexus-proxy` | | `nexusProxy.imageTag` | Proxy image version | `2.6.0` | | `nexusProxy.imagePullPolicy` | Proxy image pull policy | `IfNotPresent` | | `nexusProxy.resources` | Proxy resource requests and limits | `{}` | | `nexusProxy.env.nexusHttpHost` | Nexus url to access Nexus | `nil` | | `nexusProxy.env.nexusDockerHost` | Containers url to be used with docker | `nil` | | `nexusProxy.env.enforceHttps` | Allow only https access or not | `false` | | `nexusProxy.env.cloudIamAuthEnabled` | Enable GCP IAM authentication in Nexus proxy | `false` | | `nexusProxyRoute.enabled` | Set to true to create route for additional service | `false` | | `nexusProxyRoute.labels` | Labels to be added to proxy route | `{}` | | `nexusProxyRoute.annotations` | Annotations to be added to proxy route | `{}` | | `nexusProxyRoute.path` | Host name of Route e.g jenkins.example.com | nil | | `persistence.enabled` | Create a volume for storage | `true` | | `persistence.accessMode` | ReadWriteOnce or ReadOnly | `ReadWriteOnce` | | `persistence.storageClass` | Storage class of Nexus PVC | `nil` | | `persistence.storageSize` | Size of Nexus data volume | `8Gi` | | `persistence.annotations` | Persistent Volume annotations | `{}` | | `persistence.existingClaim` | Existing PVC name | `nil` | | `nexusBackup.enabled` | Nexus backup process | `false` | | `nexusBackup.imageName` | Nexus backup image | `dbcc/docker-nexus-backup` | | `nexusBackup.imageTag` | Nexus backup image version | `0.0.1` | | `nexusBackup.imagePullPolicy` | Backup image pull policy | `IfNotPresent` | | `nexusBackup.env.rcloneRemote` | Required if `nexusBackup` is enabled. Name of the Rclone remote as defined in the `rcloneConfig` entry. Example: `AWS` | `nil` | | `nexusBackup.env.targetBucket` | Required if `nexusBackup` is enabled. Name of the target bucket or bucket/path. Example: `my_bucket` or `my_bucket/my_folder` | `nil` | | `nexusBackup.env.streamingUploadCutoff` | Size of the data chunks to send to the Rclone remote, this value affects the maximum size of the backup file to upload. | `"5000000"` | | `nexusBackup.env.nexusAuthorization` | If set, `nexusBackup.nexusAdminPassword` will be disregarded. | `nil` | | `nexusBackup.env.offlineRepos` | Space separated list of repositories must be taken down to achieve a consistent backup. | `"maven-central maven-public maven-releases maven-snapshots"` | | `nexusBackup.env.gracePeriod` | The amount of time in seconds to wait between stopping repositories and starting the upload. | `60` | | `nexusBackup.nexusAdminPassword` | Nexus admin password used by the backup container to access Nexus API. This password should match the one that gets chosen by the user to replace the default admin password after the first login | `admin123` | | `nexusBackup.persistence.enabled` | Create a volume for backing Nexus configuration | `true` | | `nexusBackup.persistence.accessMode` | ReadWriteOnce or ReadOnly | `ReadWriteOnce` | | `nexusBackup.persistence.storageClass` | Storage class of Nexus backup PVC | `nil` | | `nexusBackup.persistence.storageSize` | Size of Nexus backup data volume | `8Gi` | | `nexusBackup.persistence.annotations` | PV annotations for backup | `{}` | | `nexusBackup.persistence.existingClaim` | Existing PVC name for backup | `nil` | | `nexusBackup.resources` | Backup resource requests and limits | `{}` | | `nexusBackup.rcloneConfig.rclone.conf` | Rclone remote configuration, can be generated using the `rclone config` command, or using docker: `docker run -it --rm rclone/rclone config` | `[AWS]`
`type = s3`
`provider = AWS`
`env_auth = true`
`region = us-east-1`
`acl = authenticated-read` | | `nexusCloudiam.enabled` | Nexus Cloud IAM service account key path | `false` | | `nexusCloudiam.persistence.accessMode` | ReadWriteOnce or ReadOnly | `ReadWriteOnce` | | `nexusCloudiam.persistence.annotations` | PV annotations for Cloud IAM service account key path | `{}` | | `nexusCloudiam.persistence.enabled` | Create a volume for Cloud IAM service account key path | `true` | | `nexusCloudiam.persistence.existingClaim` | Existing PVC name for Cloud IAM service account key path | `nil` | | `nexusCloudiam.persistence.storageClass` | Storage class of Cloud IAM service account path PVC | `nil` | | `nexusCloudiam.persistence.storageSize` | Size of Cloud IAM service account path volume | `8Gi` | | `ingress.enabled` | Create an ingress for Nexus | `false` | | `ingress.annotations` | Annotations to enhance ingress configuration | `{}` | | `ingress.tls.enabled` | Enable TLS | `true` | | `ingress.tls.secretName` | Name of the secret storing TLS cert, `false` to use the Ingress' default certificate | `nexus-tls` | | `ingress.tls.hosts` | Custom TLS hosts configuration | `{}` | | `ingress.path` | Path for ingress rules. GCP users should set to `/*` | `/` | | `ingressDocker.enabled` | Create an ingress for Docker registry | `false` | | `ingressDocker.annotations` | Annotations to enhance docker ingress configuration | `{}` | | `ingressDocker.tls.enabled` | Enable TLS | `true` | | `ingressDocker.tls.secretName` | Name of the secret storing TLS cert, `false` to use the Ingress' default certificate | `nexus-tls` | | `ingressDocker.tls.hosts` | Custom TLS hosts configuration | `{}` | | `ingressDocker.path` | Path for docker ingress rules. GCP users should set to `/*` | `/` | | `tolerations` | tolerations list | `[]` | | `config.enabled` | Enable configmap | `false` | | `config.mountPath` | Path to mount the config | `/sonatype-nexus-conf` | | `config.data` | Configmap data | `nil` | | `deployment.annotations` | Annotations to enhance deployment configuration | `{}` | | `deployment.initContainers` | Init containers to run before main containers | `nil` | | `deployment.postStart.command` | Command to run after starting the nexus container | `nil` | | `deployment.additionalContainers` | Add additional Container | `nil` | | `deployment.additionalVolumes` | Add additional Volumes | `nil` | | `deployment.additionalVolumeMounts` | Add additional Volume mounts | `nil` | | `secret.enabled` | Enable secret | `false` | | `secret.mountPath` | Path to mount the secret | `/etc/secret-volume` | | `secret.readOnly` | Secret readonly state | `true` | | `secret.data` | Secret data to add to secret. If nil then expects that a secret by name of `${.Values.nameOverride}-secret` or `${.Chart.Name}-secret` exists | `nil` | | `service.enabled` | Enable additional service | `nil` | | `service.name` | Service name | `nil` | | `service.portName` | Service port name | `nil` | | `service.labels` | Service labels | `nil` | | `service.annotations` | Service annotations | `nil` | | `service.loadBalancerSourceRanges` | Service LoadBalancer source IP whitelist | `nil` | | `service.loadBalancerIP` | Custom loadBalancerIP |`nil` | | `service.targetPort` | Service port | `nil` | | `service.port` | Port for exposing service | `nil` | | `serviceAccount.create` | Automatically create a service account | `true` | | `serviceAccount.name` | Service account to use | `nil` | | `serviceAccount.annotations` | Service account annotations | `nil` | | `rbac.create` | Creates a ClusterRoleBinding attached to the Service account. | `false` | | `rbac.roleRef` | ClusterRoleBinding field `roleRef` content. See examples [here](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-example). | `nil` | | `rbac.annotations` | ClusterRoleBinding annotations. | `nil` | | `route.enabled` | Set to true to create route for additional service | `false` | | `route.name` | Name of route | `docker` | | `route.portName` | Target port name of service | `docker` | | `route.labels` | Labels to be added to route | `{}` | | `route.annotations` | Annotations to be added to route | `{}` | | `route.path` | Host name of Route e.g jenkins.example.com | nil | | `additionalConfigMaps` | List of ConfigMap data containing Name, Data and Labels | nil | If `nexusProxy.env.cloudIamAuthEnabled` is set to `true` the following variables need to be configured | Parameter | Description | Default | | ----------------------------- | ---------------------------------- | ---------------------------------------------------- | | `nexusProxy.env.clientId` | GCP OAuth client ID | `nil` | | `nexusProxy.env.clientSecret` | GCP OAuth client Secret | `nil` | | `nexusProxy.env.organizationId` | GCP organization ID | `nil` | | `nexusProxy.env.redirectUrl` | OAuth callback url. example `https://nexus.example.com/oauth/callback` | `nil` | | `nexusProxy.env.requiredMembershipVerification` | Whether users presenting valid JWT tokens must still be verified for membership within the GCP organization. | `true` | | `nexusProxy.secrets.keystore` | base-64 encoded value of the keystore file needed for the proxy to sign user tokens. Example: cat keystore.jceks | base64 | `nil` | | `nexusProxy.secrets.password` | Password to the Java Keystore file | `nil` | ```bash helm install --set persistence.enabled=false my-release oteemocharts/sonatype-nexus ``` The above example turns off the persistence. Data will not be kept between restarts or deployments Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, ```bash helm install -f my-values.yaml sonatype-nexus oteemocharts/sonatype-nexus ``` ### Persistence By default a PersistentVolumeClaim is created and mounted into the `/nexus-data` directory. In order to disable this functionality you can change the `values.yaml` to disable persistence which will use an `emptyDir` instead. > *"An emptyDir volume is first created when a Pod is assigned to a Node, and exists as long as that Pod is running on that node. When a Pod is removed from a node for any reason, the data in the emptyDir is deleted forever."* You must enable StatefulSet (`statefulset.enabled=true`) for true data persistence. If using Deployment approach, you can not recover data after restart or delete of helm chart. Statefulset will make sure that it picks up the same old volume which was used by the previous life of the nexus pod, helping you recover your data. When enabling statefulset, its required to enable the persistence. ### Recommended settings As a minimum for running in production, the following settings are advised: ```yaml nexusProxy: env: nexusDockerHost: container.example.com nexusHttpHost: nexus.example.com nexusBackup: env: targetBucket: "gs://my-nexus-backup" persistence: storageClass: standard ingress: enabled: true annotations: kubernetes.io/ingress.class: gce kubernetes.io/tls-acme: true persistence: storageClass: standard storageSize: 1024Gi resources: requests: cpu: 250m # Based on https://support.sonatype.com/hc/en-us/articles/115006448847#mem # and https://twitter.com/analytically/status/894592422382063616: # Xms == Xmx # Xmx <= 4G # MaxDirectMemory >= 2G # Xmx + MaxDirectMemory <= RAM * 2/3 (hence the request for 4800Mi) # MaxRAMFraction=1 is not being set as it would allow the heap # to use all the available memory. memory: 4800Mi ``` ### Using GCP Storage for Backup Irrespective of whether Nexus is deployed to Google's GKE, or to some other k8s installation, it is possible to configure the [nexus-backup](https://github.com/travelaudience/docker-nexus-backup) container to backup to GCP Cloud Storage. This makes for a cost effective solution for backups. To enable, add the following key to the values file: ```yaml nexusCloudiam: enabled: true ``` You should also deploy Nexus as a stateful app, rather than a deployment. That means also adding: ```yaml statefulset: enabled: true ``` Deploying the chart now will result in a new PV and PVC within the pod that runs the containers. Create a service account with privileges to upload to your GCP bucket, and creaet a key for this service account. Download that service account key as a file, call it `service-account-key.json`. This file now needs to be made available to the pod running in k8s, and should be called `/nexus-data/cloudiam/service-account-key.json`. How this is done will depend upon the storage class used for the PV. Confirm that the service account file is available to the pod, using: kubectl exec --stdin --tty \ --container nexus-backup \ sonatype-nexus-0 \ -- find /nexus-data/cloudiam -type f You might need to scale the deployment to zero and back up to pick up the changes: kubectl scale --replicas=0 statefulset.apps/sonatype-nexus kubectl scale --replicas=1 statefulset.apps/sonatype-nexus ### Graceful shutdown with terminationGracePeriodSeconds Customizing terminationGracePeriodSeconds maybe helpful to prevent Orientdb corruption during stop/start actions(eg : upgrade). **WARNING** : It has no effect with the [default image of this chart](https://quay.io/repository/travelaudience/docker-nexus?tag=latest&tab=tags) because of this [issue](https://github.com/travelaudience/docker-nexus/issues/56) However it can be useful when you switch to the official image [here](https://hub.docker.com/r/sonatype/nexus3/tags?page=1&ordering=last_updated) ## After Installing the Chart After installing the chart a couple of actions need still to be done in order to use nexus. Please follow the instructions below. ### Nexus Configuration The following steps need to be executed in order to use Nexus: - [Configure Nexus](https://github.com/travelaudience/kubernetes-nexus/blob/master/docs/admin/configuring-nexus.md) - [Configure Backups](https://github.com/travelaudience/kubernetes-nexus/blob/master/docs/admin/configuring-nexus.md#configure-backup) and if GCP IAM authentication is enabled, please also check: - [Enable GCP IAM authentication in Nexus](https://github.com/travelaudience/kubernetes-nexus/blob/master/docs/admin/configuring-nexus-proxy.md#enable-gcp-iam-auth) ### Nexus Usage To see how to use Nexus with different tools like Docker, Maven, Python, and so on please check: - [Nexus Usage](https://github.com/travelaudience/kubernetes-nexus#usage) ### Disaster Recovery In a disaster recovery scenario, the latest backup made by the nexus-backup container should be restored. In order to achieve this please follow the procedure described below: - [Restore Backups](https://github.com/travelaudience/kubernetes-nexus#restore) ================================================ FILE: charts/sonatype-nexus/templates/NOTES.txt ================================================ - To access Nexus: NOTE: It may take a few minutes for the ingress load balancer to become available or the backends to become HEALTHY. You can watch the status of the backends by running: `kubectl get ingress -o jsonpath='{.items[*].metadata.annotations.ingress\.kubernetes\.io/backends}'` To access Nexus you can check: {{- if .Values.nexusProxy.env.enforceHttps }} https://{{ .Values.nexusProxy.env.nexusHttpHost }} {{- else }} http://{{ .Values.nexusProxy.env.nexusHttpHost }} {{- end }} - Login with the following credentials username: admin {{- if .Values.initAdminPassword.enabled }} password: {{ .Values.initAdminPassword.password }} {{- else }} password: {{ .Values.nexusBackup.nexusAdminPassword }} {{- end }} {{- if .Values.initAdminPassword.enabled }} - Change Your password after the first login {{- if .Values.nexusBackup.enabled }} Once you login you should change your admin password to match the value of `nexusBackup.env.nexusAdminPassword` This is important for security reasons and also because backup container needs this password set for admin user to access Nexus API to run backups. {{- end }} {{- end }} - Next steps in configuration Please follow the link below to the README for nexus configuration, usage, backups and DR info: https://github.com/Oteemo/charts/tree/master/charts/sonatype-nexus#after-installing-the-chart ================================================ FILE: charts/sonatype-nexus/templates/_helpers.tpl ================================================ {{/* vim: set filetype=mustache: */}} {{/* Expand the name of the chart. */}} {{- define "nexus.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} {{- define "nexus.fullname" -}} {{- if .Values.fullnameOverride -}} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} {{- else -}} {{- $name := default .Chart.Name .Values.nameOverride -}} {{- if contains $name .Release.Name -}} {{- .Release.Name | trunc 63 | trimSuffix "-" -}} {{- else -}} {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} {{- end -}} {{- end -}} {{- end -}} {{/* Allow the release namespace to be overridden for multi-namespace deployments in combined charts. */}} {{- define "nexus.namespace" -}} {{- if .Values.namespaceOverride -}} {{- .Values.namespaceOverride -}} {{- else -}} {{- .Release.Namespace -}} {{- end -}} {{- end -}} {{/* Create a default fully qualified name for proxy keystore secret. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). */}} {{- define "nexus.proxy-ks.name" -}} {{- printf "%s-%s" (include "nexus.fullname" .) "proxy-ks" | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Manage the labels for each entity */}} {{- define "nexus.labels" -}} app: {{ template "nexus.name" . }} fullname: {{ template "nexus.fullname" . }} chart: {{ .Chart.Name }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} {{- end -}} {{/* Create a fully qualified name for docker ingress. */}} {{- define "nexus.ingress.docker" -}} {{- printf "%s-%s" (include "nexus.fullname" .) "docker" | trunc 63 | trimSuffix "-" -}} {{- end -}} ================================================ FILE: charts/sonatype-nexus/templates/adtl-configmap.yaml ================================================ {{ $root := . }} {{- if .Values.additionalConfigMaps }} {{- range $cm := .Values.additionalConfigMaps }} --- apiVersion: v1 kind: ConfigMap metadata: name: {{ $cm.name }} namespace: {{ template "nexus.namespace" $root }} labels: {{ include "nexus.labels" $root | indent 4 }} {{- if $.Values.nexus.labels }} {{ toYaml $.Values.nexus.labels | indent 4 }} {{- end }} {{- if $cm.labels }} {{ toYaml $cm.labels | indent 4 }} {{- end }} data: {{ toYaml $cm.data | indent 2 }} {{- end }} {{- end }} ================================================ FILE: charts/sonatype-nexus/templates/backup-pv.yaml ================================================ {{- if and .Values.nexusBackup.enabled (not .Values.statefulset.enabled) }} {{- if .Values.nexusBackup.persistence.pdName -}} apiVersion: v1 kind: PersistentVolume metadata: name: {{ .Values.nexusBackup.persistence.pdName }} namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} {{- if .Values.nexus.labels }} {{ toYaml .Values.nexus.labels | indent 4 }} {{- end }} spec: capacity: storage: {{ .Values.nexusBackup.persistence.storageSize }} accessModes: - ReadWriteOnce claimRef: name: {{ template "nexus.fullname" . }}-backup namespace: {{ .Release.Namespace }} gcePersistentDisk: pdName: {{ .Values.nexusBackup.persistence.pdName }} fsType: {{ .Values.nexusBackup.persistence.fsType }} {{- end }} {{- end }} ================================================ FILE: charts/sonatype-nexus/templates/backup-pvc.yaml ================================================ {{- if and .Values.nexusBackup.enabled (not .Values.statefulset.enabled) }} {{- if and .Values.nexusBackup.persistence.enabled (not .Values.nexusBackup.persistence.existingClaim) }} kind: PersistentVolumeClaim apiVersion: v1 metadata: name: {{ template "nexus.fullname" . }}-backup namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} {{- if .Values.nexus.labels }} {{ toYaml .Values.nexus.labels | indent 4 }} {{- end }} {{- if .Values.nexusBackup.persistence.annotations }} annotations: {{ toYaml .Values.nexusBackup.persistence.annotations | indent 4 }} {{- end }} spec: accessModes: - {{ .Values.nexusBackup.persistence.accessMode }} resources: requests: storage: {{ .Values.nexusBackup.persistence.storageSize | quote }} {{- if .Values.nexusBackup.persistence.storageClass }} {{- if (eq "-" .Values.nexusBackup.persistence.storageClass) }} storageClassName: "" {{- else }} storageClassName: "{{ .Values.nexusBackup.persistence.storageClass }}" {{- end }} {{- end }} {{- end }} {{- end }} ================================================ FILE: charts/sonatype-nexus/templates/backup-secret.yaml ================================================ {{- if and .Values.nexusBackup.enabled (not .Values.nexusBackup.env.nexusAuthorization) }} apiVersion: v1 kind: Secret metadata: name: {{ template "nexus.fullname" . }} namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} {{- if .Values.nexus.labels }} {{ toYaml .Values.nexus.labels | indent 4 }} {{- end }} type: Opaque data: nexus.nexusAdminPassword: {{ printf "%s%s" "Basic " (printf "%s%s" "admin:" .Values.nexusBackup.nexusAdminPassword | b64enc) | cat | b64enc | quote }} {{- end }} ================================================ FILE: charts/sonatype-nexus/templates/cloudiam-pv.yaml ================================================ {{- if and .Values.nexusCloudiam.enabled (not .Values.statefulset.enabled) }} {{- if .Values.nexusCloudiam.persistence.pdName -}} apiVersion: v1 kind: PersistentVolume metadata: name: {{ .Values.nexusCloudiam.persistence.pdName }} namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} spec: capacity: storage: {{ .Values.nexusCloudiam.persistence.storageSize }} accessModes: - ReadWriteOnce claimRef: name: {{ template "nexus.fullname" . }}-cloudiam namespace: {{ .Release.Namespace }} gcePersistentDisk: pdName: {{ .Values.nexusCloudiam.persistence.pdName }} fsType: {{ .Values.nexusCloudiam.persistence.fsType }} {{- end }} {{- end }} ================================================ FILE: charts/sonatype-nexus/templates/cloudiam-pvc.yaml ================================================ {{- if and .Values.nexusCloudiam.enabled (not .Values.statefulset.enabled) }} {{- if and .Values.nexusCloudiam.persistence.enabled (not .Values.nexusCloudiam.persistence.existingClaim) }} kind: PersistentVolumeClaim apiVersion: v1 metadata: name: {{ template "nexus.fullname" . }}-cloudiam namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} {{- if .Values.nexus.labels }} {{ toYaml .Values.nexus.labels | indent 4 }} {{- end }} {{- if .Values.nexusCloudiam.persistence.annotations }} annotations: {{ toYaml .Values.nexusCloudiam.persistence.annotations | indent 4 }} {{- end }} spec: accessModes: - {{ .Values.nexusCloudiam.persistence.accessMode }} resources: requests: storage: {{ .Values.nexusCloudiam.persistence.storageSize | quote }} {{- if .Values.nexusCloudiam.persistence.storageClass }} {{- if (eq "-" .Values.nexusCloudiam.persistence.storageClass) }} storageClassName: "" {{- else }} storageClassName: "{{ .Values.nexusCloudiam.persistence.storageClass }}" {{- end }} {{- end }} {{- end }} {{- end }} ================================================ FILE: charts/sonatype-nexus/templates/clusterrolebinding.yaml ================================================ {{- if .Values.rbac.create -}} apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: {{ template "nexus.fullname" . }} labels: {{ include "nexus.labels" . | indent 4 }} {{- with .Values.rbac.annotations }} annotations: {{ toYaml . | indent 4 }} {{- end }} roleRef: {{- with .Values.rbac.roleRef }} {{ toYaml . | indent 2 }} {{- end }} subjects: - kind: ServiceAccount {{- if .Values.serviceAccount.name }} name: {{ .Values.serviceAccount.name }} {{- else }} name: {{ template "nexus.fullname" . }} {{- end }} namespace: {{ template "nexus.namespace" . }} {{- end -}} ================================================ FILE: charts/sonatype-nexus/templates/configmap.yaml ================================================ {{- if .Values.config.enabled -}} apiVersion: v1 kind: ConfigMap metadata: name: {{ template "nexus.name" . }}-conf namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} {{- if .Values.nexus.labels }} {{ toYaml .Values.nexus.labels | indent 4 }} {{- end }} data: {{ toYaml .Values.config.data | indent 2 }} {{- end }} ================================================ FILE: charts/sonatype-nexus/templates/deployment-statefulset.yaml ================================================ {{- if .Values.statefulset.enabled }} apiVersion: apps/v1 kind: StatefulSet {{- else }} apiVersion: apps/v1 kind: Deployment {{- end }} metadata: name: {{ template "nexus.fullname" . }} namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} {{- if .Values.nexus.labels }} {{ toYaml .Values.nexus.labels | indent 4 }} {{- end }} {{- if .Values.deployment.annotations }} annotations: {{ toYaml .Values.deployment.annotations | indent 4 }} {{- end }} spec: replicas: {{ .Values.replicaCount }} {{- if .Values.statefulset.enabled }} {{- if .Values.nexusProxy.svcName }} serviceName: {{ .Values.nexusProxy.svcName }} {{- else }} serviceName: {{ template "nexus.fullname" . }} {{- end }} {{- end }} {{- if .Values.deploymentStrategy }} strategy: {{ toYaml .Values.deploymentStrategy | indent 4 }} {{- end }} selector: matchLabels: app: {{ template "nexus.name" . }} release: {{ .Release.Name }} template: metadata: {{- if .Values.nexus.podAnnotations }} annotations: {{ toYaml .Values.nexus.podAnnotations | indent 8}} {{- end }} labels: app: {{ template "nexus.name" . }} release: {{ .Release.Name }} {{- if .Values.nexus.labels }} {{ toYaml .Values.nexus.labels | indent 8 }} {{- end }} spec: {{- if .Values.deployment.initContainers }} initContainers: {{ toYaml .Values.deployment.initContainers | indent 6 }} {{- end }} {{- if .Values.nexus.nodeSelector }} nodeSelector: {{ toYaml .Values.nexus.nodeSelector | indent 8 }} {{- end }} {{- if .Values.nexus.hostAliases }} hostAliases: {{ toYaml .Values.nexus.hostAliases | indent 8 }} {{- end }} {{- if .Values.nexus.imagePullSecret }} imagePullSecrets: - name: {{ .Values.nexus.imagePullSecret }} {{- end }} {{- if .Values.serviceAccount.name }} serviceAccountName: {{ .Values.serviceAccount.name | quote }} {{- else if .Values.serviceAccount.create }} serviceAccountName: {{ template "nexus.fullname" . }} {{- end }} {{- if .Values.nexus.priorityClassName }} priorityClassName: {{ .Values.nexus.priorityClassName }} {{- end }} containers: - name: nexus image: {{ .Values.nexus.imageName }}:{{ .Values.nexus.imageTag }} imagePullPolicy: {{ .Values.nexus.imagePullPolicy }} {{- if .Values.nexus.containerSecurityContext }} securityContext: {{ toYaml .Values.nexus.containerSecurityContext | indent 12 }} {{- end }} {{- if .Values.deployment.postStart.command }} lifecycle: postStart: exec: command: {{ .Values.deployment.postStart.command }} {{- end }} {{- with .Values.nexus.envFrom }} envFrom: {{- toYaml . | nindent 12 }} {{- end }} env: {{ toYaml .Values.nexus.env | indent 12 }} - name: NEXUS_DATA_CHOWN value: {{ .Values.nexus.chownNexusData | quote }} {{- if .Values.nexus.context }} - name: NEXUS_CONTEXT value: {{ .Values.nexus.context }} {{- end }} resources: {{ toYaml .Values.nexus.resources | indent 12 }} ports: - containerPort: {{ .Values.nexus.dockerPort }} name: nexus-docker-g - containerPort: {{ .Values.nexus.nexusPort }} name: nexus-http {{- with .Values.nexus.additionalPorts }} {{ toYaml . | indent 12 }} {{- end }} livenessProbe: httpGet: path: {{ .Values.nexus.livenessProbe.path }} port: {{ .Values.nexus.nexusPort }} initialDelaySeconds: {{ .Values.nexus.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.nexus.livenessProbe.periodSeconds }} failureThreshold: {{ .Values.nexus.livenessProbe.failureThreshold }} {{- if .Values.nexus.livenessProbe.timeoutSeconds }} timeoutSeconds: {{ .Values.nexus.livenessProbe.timeoutSeconds }} {{- end }} readinessProbe: httpGet: path: {{ .Values.nexus.readinessProbe.path }} port: {{ .Values.nexus.nexusPort }} initialDelaySeconds: {{ .Values.nexus.readinessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.nexus.readinessProbe.periodSeconds }} failureThreshold: {{ .Values.nexus.readinessProbe.failureThreshold }} {{- if .Values.nexus.readinessProbe.timeoutSeconds }} timeoutSeconds: {{ .Values.nexus.readinessProbe.timeoutSeconds }} {{- end }} {{- if .Values.nexus.startupProbe }} startupProbe: httpGet: path: {{ .Values.nexus.startupProbe.path }} port: {{ .Values.nexus.nexusPort }} initialDelaySeconds: {{ .Values.nexus.startupProbe.initialDelaySeconds }} periodSeconds: {{ .Values.nexus.startupProbe.periodSeconds }} failureThreshold: {{ .Values.nexus.startupProbe.failureThreshold }} {{- if .Values.nexus.startupProbe.timeoutSeconds }} timeoutSeconds: {{ .Values.nexus.startupProbe.timeoutSeconds }} {{- end }} {{- end }} volumeMounts: - mountPath: /nexus-data name: {{ template "nexus.fullname" . }}-data {{- if .Values.nexusBackup.enabled }} - mountPath: /nexus-data/backup name: {{ template "nexus.fullname" . }}-backup {{- end }} {{- if .Values.nexusCloudiam.enabled }} - mountPath: /nexus-data/cloudiam name: {{ template "nexus.fullname" . }}-cloudiam {{- end }} {{- if .Values.config.enabled }} - mountPath: {{ .Values.config.mountPath }} name: {{ template "nexus.name" . }}-conf {{- end }} {{- if .Values.secret.enabled }} - mountPath: {{ .Values.secret.mountPath }} name: {{ template "nexus.name" . }}-secret readOnly: {{ .Values.secret.readOnly }} {{- end }} {{- if .Values.deployment.additionalVolumeMounts}} {{ toYaml .Values.deployment.additionalVolumeMounts | indent 12 }} {{- end }} {{- if .Values.nexusProxy.enabled }} - name: nexus-proxy image: {{ .Values.nexusProxy.imageName }}:{{ .Values.nexusProxy.imageTag }} resources: {{ toYaml .Values.nexusProxy.resources | indent 12 }} imagePullPolicy: {{ .Values.nexusProxy.imagePullPolicy }} {{- if .Values.nexusProxy.containerSecurityContext }} securityContext: {{ toYaml .Values.nexusProxy.containerSecurityContext | indent 12 }} {{- end }} env: - name: ALLOWED_USER_AGENTS_ON_ROOT_REGEX value: {{ default "GoogleHC" .Values.nexusProxy.env.allowedUserAgentsOnRootRegex | quote }} - name: CLOUD_IAM_AUTH_ENABLED value: {{ .Values.nexusProxy.env.cloudIamAuthEnabled | quote }} - name: BIND_PORT value: {{ .Values.nexusProxy.targetPort | quote }} - name: ENFORCE_HTTPS value: {{ .Values.nexusProxy.env.enforceHttps | quote }} - name: NEXUS_DOCKER_HOST value: {{ .Values.nexusProxy.env.nexusDockerHost | quote }} - name: NEXUS_HTTP_HOST value: {{ .Values.nexusProxy.env.nexusHttpHost | quote }} - name: UPSTREAM_DOCKER_PORT value: {{ .Values.nexus.dockerPort | quote }} - name: UPSTREAM_HTTP_PORT value: {{ .Values.nexus.nexusPort | quote }} - name: UPSTREAM_HOST value: "localhost" {{- if .Values.nexusProxy.env.cloudIamAuthEnabled }} - name: NEXUS_RUT_HEADER value: "X-Forwarded-User" - name: CLIENT_ID value: {{ .Values.nexusProxy.env.clientId | quote }} - name: CLIENT_SECRET value: {{ .Values.nexusProxy.env.clientSecret | quote }} - name: ORGANIZATION_ID value: {{ .Values.nexusProxy.env.organizationId | quote }} - name: REDIRECT_URL value: {{ .Values.nexusProxy.env.redirectUrl | quote }} - name: KEYSTORE_PASS valueFrom: secretKeyRef: name: {{ template "nexus.proxy-ks.name" . }} key: password - name: KEYSTORE_PATH value: "/nexus-proxy-ks/keystore" - name: AUTH_CACHE_TTL value: "60000" - name: SESSION_TTL value: "86400000" - name: JWT_REQUIRES_MEMBERSHIP_VERIFICATION value: {{ .Values.nexusProxy.env.requiredMembershipVerification | quote }} {{- end }} {{- with .Values.nexusProxy.env.javaOptions }} - name: JAVA_TOOL_OPTIONS value: {{ . | quote }} {{ end }} ports: - containerPort: {{ .Values.nexusProxy.targetPort }} name: nexus-proxy {{- if .Values.nexusProxy.env.cloudIamAuthEnabled }} volumeMounts: - mountPath: /nexus-proxy-ks name: {{ template "nexus.proxy-ks.name" . }} readOnly: true {{- end }} {{- end }} {{- if .Values.nexusBackup.enabled }} - name: nexus-backup image: {{ .Values.nexusBackup.imageName }}:{{ .Values.nexusBackup.imageTag }} imagePullPolicy: {{ .Values.nexusBackup.imagePullPolicy }} {{- if .Values.nexusBackup.containerSecurityContext }} securityContext: {{ toYaml .Values.nexusBackup.containerSecurityContext | indent 12 }} {{- end }} resources: {{ toYaml .Values.nexusBackup.resources | indent 12 }} env: - name: NEXUS_AUTHORIZATION {{- if not .Values.nexusBackup.env.nexusAuthorization }} valueFrom: secretKeyRef: key: nexus.nexusAdminPassword name: {{ template "nexus.fullname" . }} {{- else }} value: {{ .Values.nexusBackup.env.nexusAuthorization | quote }} {{- end }} - name: NEXUS_BACKUP_DIRECTORY value: /nexus-data/backup - name: NEXUS_DATA_DIRECTORY value: /nexus-data - name: NEXUS_LOCAL_HOST_PORT value: "localhost:{{ .Values.nexus.nexusPort }}" - name: OFFLINE_REPOS value: {{ .Values.nexusBackup.env.offlineRepos | quote }} - name: TARGET_BUCKET value: {{ .Values.nexusBackup.env.targetBucket | quote }} - name: GRACE_PERIOD value: {{ .Values.nexusBackup.env.gracePeriod | quote }} - name: TRIGGER_FILE value: .backup - name: RCLONE_REMOTE value: {{ .Values.nexusBackup.env.rcloneRemote | quote }} - name: STREAMING_UPLOAD_CUTOFF value: {{ .Values.nexusBackup.env.streamingUploadCutoff | default "5000000" | quote }} {{- if .Values.nexusCloudiam.enabled }} - name: CLOUD_IAM_SERVICE_ACCOUNT_KEY_PATH value: /nexus-data/cloudiam/service-account-key.json {{- end }} volumeMounts: - mountPath: /nexus-data name: {{ template "nexus.fullname" . }}-data - mountPath: /nexus-data/backup name: {{ template "nexus.fullname" . }}-backup {{- if .Values.nexusCloudiam.enabled }} - mountPath: /nexus-data/cloudiam name: {{ template "nexus.fullname" . }}-cloudiam {{- end }} - mountPath: /root/.config/rclone name: {{ template "nexus.fullname" . }}-rclone-config {{- end }} {{- if .Values.deployment.additionalContainers }} {{ toYaml .Values.deployment.additionalContainers | indent 8 }} {{- end }} {{- if .Values.nexus.securityContextEnabled }} securityContext: {{ toYaml .Values.nexus.securityContext | indent 8 }} {{- end }} volumes: {{- if .Values.nexusBackup.enabled }} - name: {{ template "nexus.fullname" . }}-rclone-config secret: secretName: {{ template "nexus.name" . }}-rclone-config-secret {{- end }} {{- if .Values.nexusProxy.env.cloudIamAuthEnabled }} - name: {{ template "nexus.proxy-ks.name" . }} secret: secretName: {{ template "nexus.proxy-ks.name" . }} {{- end }} {{- if .Values.statefulset.enabled }} {{- if not .Values.persistence.enabled }} - name: {{ template "nexus.fullname" . }}-data emptyDir: {} {{- end }} {{- if and .Values.nexusBackup.enabled (not .Values.nexusBackup.persistence.enabled) }} - name: {{ template "nexus.fullname" . }}-backup emptyDir: {} {{- end }} {{- if and .Values.nexusCloudiam.enabled (not .Values.nexusCloudiam.persistence.enabled) }} - name: {{ template "nexus.fullname" . }}-cloudiam emptyDir: {} {{- end }} {{- else }} - name: {{ template "nexus.fullname" . }}-data {{- if .Values.persistence.enabled }} persistentVolumeClaim: claimName: {{ .Values.persistence.existingClaim | default (printf "%s-%s" (include "nexus.fullname" .) "data") }} {{- else }} emptyDir: {} {{- end }} {{- if .Values.nexusBackup.enabled }} - name: {{ template "nexus.fullname" . }}-backup {{- if and .Values.nexusBackup.persistence.enabled .Values.nexusBackup.enabled }} persistentVolumeClaim: claimName: {{ .Values.nexusBackup.persistence.existingClaim | default (printf "%s-%s" (include "nexus.fullname" .) "backup") }} {{- else }} emptyDir: {} {{- end }} {{- end }} {{- if .Values.nexusCloudiam.enabled }} - name: {{ template "nexus.fullname" . }}-cloudiam {{- if and .Values.nexusCloudiam.persistence.enabled .Values.nexusCloudiam.enabled }} persistentVolumeClaim: claimName: {{ .Values.nexusCloudiam.persistence.existingClaim | default (printf "%s-%s" (include "nexus.fullname" .) "cloudiam") }} {{- else }} emptyDir: {} {{- end }} {{- end }} {{- end }} {{- if .Values.config.enabled }} - name: {{ template "nexus.name" . }}-conf configMap: name: {{ template "nexus.name" . }}-conf {{- end }} {{- if .Values.secret.enabled }} - name: {{ template "nexus.name" . }}-secret secret: secretName: {{ template "nexus.name" . }}-secret {{- end }} {{- if .Values.deployment.additionalVolumes }} {{ toYaml .Values.deployment.additionalVolumes | indent 8 }} {{- end }} {{- if .Values.nexus.terminationGracePeriodSeconds }} terminationGracePeriodSeconds: {{ .Values.nexus.terminationGracePeriodSeconds }} {{- end }} {{- with .Values.tolerations }} tolerations: {{ toYaml . | indent 8 }} {{- end }} {{- with .Values.affinity }} affinity: {{ toYaml . | indent 8 }} {{- end }} ## create pvc in case of statefulsets {{- if .Values.statefulset.enabled }} volumeClaimTemplates: {{- if .Values.persistence.enabled }} - metadata: name: {{ template "nexus.fullname" . }}-data labels: {{ include "nexus.labels" . | indent 10 }} {{- if .Values.nexus.labels }} {{ toYaml .Values.nexus.labels | indent 10 }} {{- end }} {{- if .Values.persistence.annotations }} annotations: {{ toYaml .Values.persistence.annotations | indent 10 }} {{- end }} spec: accessModes: - {{ .Values.persistence.accessMode | quote }} resources: requests: storage: {{ .Values.persistence.storageSize | quote }} {{- if .Values.persistence.storageClass }} {{- if (eq "-" .Values.persistence.storageClass) }} storageClassName: "" {{- else }} storageClassName: "{{ .Values.persistence.storageClass }}" {{- end }} {{- end }} {{- end }} {{- if .Values.nexusBackup.persistence.enabled }} - metadata: name: {{ template "nexus.fullname" . }}-backup labels: {{ include "nexus.labels" . | indent 10 }} {{- if .Values.nexusBackup.persistence.annotations }} annotations: {{ toYaml .Values.nexusBackup.persistence.annotations | indent 10 }} {{- end }} spec: accessModes: - {{ .Values.nexusBackup.persistence.accessMode }} resources: requests: storage: {{ .Values.nexusBackup.persistence.storageSize | quote }} {{- if .Values.nexusBackup.persistence.storageClass }} {{- if (eq "-" .Values.nexusBackup.persistence.storageClass) }} storageClassName: "" {{- else }} storageClassName: "{{ .Values.nexusBackup.persistence.storageClass }}" {{- end }} {{- end }} {{- end }} {{- if .Values.nexusCloudiam.persistence.enabled }} - metadata: name: {{ template "nexus.fullname" . }}-cloudiam labels: {{ include "nexus.labels" . | indent 10 }} {{- if .Values.nexusCloudiam.persistence.annotations }} annotations: {{ toYaml .Values.nexusCloudiam.persistence.annotations | indent 10 }} {{- end }} spec: accessModes: - {{ .Values.nexusCloudiam.persistence.accessMode }} resources: requests: storage: {{ .Values.nexusCloudiam.persistence.storageSize | quote }} {{- if .Values.nexusCloudiam.persistence.storageClass }} {{- if (eq "-" .Values.nexusCloudiam.persistence.storageClass) }} storageClassName: "" {{- else }} storageClassName: "{{ .Values.nexusCloudiam.persistence.storageClass }}" {{- end }} {{- end }} {{- end }} {{- end }} ================================================ FILE: charts/sonatype-nexus/templates/ingress-docker.yaml ================================================ {{- if .Values.ingressDocker.enabled }} {{- $serviceName := include "nexus.fullname" . -}} {{- if .Values.nexusProxy.svcName }} {{- $serviceName = .Values.nexusProxy.svcName -}} {{- end }} {{- $servicePort := .Values.nexus.dockerPort -}} {{- if .Values.nexusProxy.enabled }} {{- $servicePort = .Values.nexusProxy.port -}} {{- end }} {{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1" -}} apiVersion: networking.k8s.io/v1 {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} apiVersion: networking.k8s.io/v1beta1 {{- else -}} apiVersion: extensions/v1beta1 {{- end }} kind: Ingress metadata: name: {{ template "nexus.ingress.docker" . }} namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} {{- range $key, $value := .Values.ingressDocker.labels }} {{ $key }}: {{ $value | quote }} {{- end }} annotations: {{- range $key, $value := .Values.ingressDocker.annotations }} {{ $key }}: {{ $value | quote }} {{- end }} spec: {{- if .Values.ingressDocker.className }} ingressClassName: {{ .Values.ingressDocker.className }} {{- end }} rules: {{- if .Values.nexusProxy.env.nexusDockerHost }} - host: {{ .Values.nexusProxy.env.nexusDockerHost }} http: paths: - path: {{ .Values.ingressDocker.path }} {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }} pathType: {{ .Values.ingressDocker.pathType }} backend: service: name: {{ $serviceName }} port: number: {{ $servicePort }} {{- else }} backend: serviceName: {{ $serviceName }} servicePort: {{ $servicePort }} {{- end }} {{- end }} {{- with .Values.ingressDocker.rules }} {{- toYaml . | nindent 4 }} {{- end -}} {{- if .Values.ingressDocker.tls.enabled }} tls: - hosts: {{- if .Values.nexusProxy.env.nexusDockerHost }} - {{ .Values.nexusProxy.env.nexusDockerHost }} {{- end }} {{- if .Values.ingressDocker.tls.secretName }} secretName: {{ .Values.ingressDocker.tls.secretName | quote }} {{- end }} {{- with .Values.ingressDocker.tls.hosts }} {{- toYaml . | nindent 4 }} {{- end -}} {{- end -}} {{- end }} ================================================ FILE: charts/sonatype-nexus/templates/ingress.yaml ================================================ {{- if .Values.ingress.enabled -}} {{- $serviceName := include "nexus.fullname" . -}} {{- if .Values.nexusProxy.svcName }} {{- $serviceName = .Values.nexusProxy.svcName -}} {{- end }} {{- $servicePort := .Values.nexus.nexusPort -}} {{- if .Values.nexusProxy.enabled }} {{- $servicePort = .Values.nexusProxy.port -}} {{- end }} {{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1" -}} apiVersion: networking.k8s.io/v1 {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} apiVersion: networking.k8s.io/v1beta1 {{- else -}} apiVersion: extensions/v1beta1 {{- end }} kind: Ingress metadata: name: {{ template "nexus.fullname" . }} namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} {{- range $key, $value := .Values.ingress.labels }} {{ $key }}: {{ $value | quote }} {{- end }} annotations: {{- range $key, $value := .Values.ingress.annotations }} {{ $key }}: {{ $value | quote }} {{- end }} spec: {{- if .Values.ingress.className }} ingressClassName: {{ .Values.ingress.className }} {{- end }} rules: {{- if .Values.nexusProxy.env.nexusHttpHost }} - host: {{ .Values.nexusProxy.env.nexusHttpHost }} http: paths: - path: {{ .Values.ingress.path }} {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }} pathType: {{ .Values.ingress.pathType }} backend: service: name: {{ $serviceName }} port: number: {{ $servicePort }} {{- else }} backend: serviceName: {{ $serviceName }} servicePort: {{ $servicePort }} {{- end }} {{- end }} {{- with .Values.ingress.rules }} {{- toYaml . | nindent 4 }} {{- end -}} {{- if .Values.ingress.tls.enabled }} tls: - hosts: {{- if .Values.nexusProxy.env.nexusHttpHost }} - {{ .Values.nexusProxy.env.nexusHttpHost }} {{- end }} {{- if .Values.ingress.tls.secretName }} secretName: {{ .Values.ingress.tls.secretName | quote }} {{- end }} {{- with .Values.ingress.tls.hosts }} {{- toYaml . | nindent 4 }} {{- end -}} {{- end -}} {{- end }} ================================================ FILE: charts/sonatype-nexus/templates/initpwd-job.yaml ================================================ {{- if .Values.initAdminPassword.enabled }} --- apiVersion: batch/v1 kind: Job metadata: name: {{ template "nexus.fullname" . }}-initpwd-job annotations: helm.sh/hook: "post-install" helm.sh/hook-weight: "10" helm.sh/hook-delete-policy: "before-hook-creation" labels: {{ include "nexus.labels" . | indent 4 }} spec: backoffLimit: 5 template: metadata: name: {{ template "nexus.fullname" . }}-initpwd labels: app: {{ template "nexus.name" . }} release: {{ .Release.Name }} spec: restartPolicy: Never containers: - name: nexus-initpwd image: curlimages/curl:7.73.0 command: - /bin/sh - -ec - | curl --fail -X PUT \ http://$(NEXUS_HOST)/service/rest/beta/security/users/admin/change-password \ -H "Content-Type: text/plain" \ -H "Authorization: Basic $(NEXUS_AUTHORIZATION)" \ -d "$(ADMIN_PASSWORD)" env: - name: NEXUS_AUTHORIZATION valueFrom: secretKeyRef: key: nexusAuthorization name: {{ template "nexus.fullname" . }}-initpwd-secret - name: NEXUS_HOST value: {{ template "nexus.fullname" . }}.{{ .Release.Namespace }}:{{ .Values.nexus.nexusPort }} - name: ADMIN_PASSWORD valueFrom: secretKeyRef: key: adminPassword name: {{ template "nexus.fullname" . }}-initpwd-secret --- apiVersion: v1 kind: Secret metadata: name: {{ template "nexus.fullname" . }}-initpwd-secret annotations: helm.sh/hook: "post-install" helm.sh/hook-weight: "-10" helm.sh/hook-delete-policy: "hook-succeeded,before-hook-creation" labels: {{ include "nexus.labels" . | indent 4 }} type: Opaque data: nexusAuthorization: {{ printf "%s%s" "admin:" (default "admin123" .Values.initAdminPassword.defaultPasswordOverride) | b64enc | b64enc }} adminPassword: {{ .Values.initAdminPassword.password | b64enc }} {{- end }} ================================================ FILE: charts/sonatype-nexus/templates/proxy-ks-secret.yaml ================================================ {{- if .Values.nexusProxy.env.cloudIamAuthEnabled }} apiVersion: v1 kind: Secret metadata: name: {{ template "nexus.proxy-ks.name" . }} namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} type: Opaque data: keystore: {{ .Values.nexusProxy.secrets.keystore }} password: {{ .Values.nexusProxy.secrets.password | b64enc }} {{- end}} ================================================ FILE: charts/sonatype-nexus/templates/proxy-route.yaml ================================================ {{- if .Values.nexusProxyRoute.enabled }} apiVersion: route.openshift.io/v1 kind: Route metadata: name: {{ template "nexus.fullname" . }} namespace: {{ template "nexus.namespace" . }} labels: {{- range $key, $value := .Values.nexusProxyRoute.labels }} {{ $key }}: {{ $value | quote }} {{- end }} annotations: {{- range $key, $value := .Values.nexusProxyRoute.annotations }} {{ $key }}: {{ $value | quote }} {{- end }} spec: host: {{ .Values.nexusProxyRoute.path }} port: {{- if .Values.nexusProxy.svcName }} targetPort: {{ .Values.nexusProxy.svcName }} {{- else }} targetPort: {{ template "nexus.fullname" . }} {{- end }} tls: insecureEdgeTerminationPolicy: Redirect termination: edge to: kind: Service {{- if .Values.nexusProxy.svcName }} name: {{ .Values.nexusProxy.svcName }} {{- else }} name: {{ template "nexus.fullname" . }} {{- end }} weight: 100 wildcardPolicy: None {{- end }} ================================================ FILE: charts/sonatype-nexus/templates/proxy-svc.yaml ================================================ {{- if or .Values.nexusProxy.enabled .Values.ingress.enabled }} apiVersion: v1 kind: Service metadata: {{- if .Values.nexusProxy.svcName }} name: {{ .Values.nexusProxy.svcName }} {{- else }} name: {{ template "nexus.fullname" . }} {{- end }} namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} {{- if .Values.nexus.labels }} {{ toYaml .Values.nexus.labels | indent 4 }} {{- end }} {{- if .Values.nexusProxy.labels }} {{ toYaml .Values.nexusProxy.labels | indent 4 }} {{- end }} {{- if .Values.nexus.annotations }} annotations: {{ toYaml .Values.nexus.annotations | indent 4 }} {{- end }} spec: ports: {{- if .Values.nexusProxy.enabled }} - port: {{ .Values.nexusProxy.port }} {{- else }} - port: {{ .Values.nexus.nexusPort }} {{- end }} {{- if .Values.nexusProxy.svcName }} name: {{ .Values.nexusProxy.svcName }} {{- else }} name: {{ template "nexus.fullname" . }} {{- end }} protocol: TCP {{- if .Values.nexusProxy.enabled }} targetPort: {{ .Values.nexusProxy.targetPort }} {{- else }} targetPort: {{ .Values.nexus.nexusPort }} - port: {{ .Values.nexus.dockerPort }} name: docker protocol: TCP targetPort: {{ .Values.nexus.dockerPort }} {{- range $ports := .Values.nexus.additionalPorts }} - port: {{ $ports.containerPort }} name: {{ $ports.name }} protocol: TCP targetPort: {{ $ports.containerPort }} {{- end }} {{- end }} selector: app: {{ template "nexus.name" . }} release: {{ .Release.Name }} type: {{ .Values.nexus.service.type }} {{- if and (eq .Values.nexus.service.type "ClusterIP") .Values.nexus.service.clusterIP }} clusterIP: {{ .Values.nexus.service.clusterIP }} {{- end }} {{- if and (eq .Values.nexus.service.type "LoadBalancer") .Values.nexus.service.loadBalancerIP }} loadBalancerIP: {{ .Values.nexus.service.loadBalancerIP }} {{- end }} {{- end }} ================================================ FILE: charts/sonatype-nexus/templates/pv.yaml ================================================ {{- if not .Values.statefulset.enabled }} {{- if .Values.persistence.pdName -}} apiVersion: v1 kind: PersistentVolume metadata: name: {{ .Values.persistence.pdName }} namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} {{- if .Values.nexus.labels }} {{ toYaml .Values.nexus.labels | indent 4 }} {{- end }} spec: capacity: storage: {{ .Values.persistence.storageSize }} accessModes: - ReadWriteOnce claimRef: namespace: {{ .Release.Namespace }} name: {{ template "nexus.fullname" . }}-data gcePersistentDisk: pdName: {{ .Values.persistence.pdName }} fsType: {{ .Values.persistence.fsType }} {{- end }} {{- end }} ================================================ FILE: charts/sonatype-nexus/templates/pvc.yaml ================================================ {{- if not .Values.statefulset.enabled }} {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} kind: PersistentVolumeClaim apiVersion: v1 metadata: name: {{ template "nexus.fullname" . }}-data namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} {{- if .Values.nexus.labels }} {{ toYaml .Values.nexus.labels | indent 4 }} {{- end }} {{- if .Values.persistence.annotations }} annotations: {{ toYaml .Values.persistence.annotations | indent 4 }} {{- end }} spec: accessModes: - {{ .Values.persistence.accessMode | quote }} resources: requests: storage: {{ .Values.persistence.storageSize | quote }} {{- if .Values.persistence.storageClass }} {{- if (eq "-" .Values.persistence.storageClass) }} storageClassName: "" {{- else }} storageClassName: "{{ .Values.persistence.storageClass }}" {{- end }} {{- end }} {{- end }} {{- end }} ================================================ FILE: charts/sonatype-nexus/templates/rclone-config-secret.yaml ================================================ {{- if and .Values.nexusBackup.enabled -}} apiVersion: v1 kind: Secret metadata: name: {{ template "nexus.name" . }}-rclone-config-secret namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} {{- if .Values.nexus.labels }} {{ toYaml .Values.nexus.labels | indent 4 }} {{- end }} data: {{- if .Values.nexusBackup.rcloneConfig }} {{- range $key, $val := .Values.nexusBackup.rcloneConfig }} {{ $key }}: |- {{ $val | b64enc| indent 4}} {{- end }} {{- end -}} {{- end}} ================================================ FILE: charts/sonatype-nexus/templates/route.yaml ================================================ {{- if .Values.route.enabled }} apiVersion: route.openshift.io/v1 kind: Route metadata: name: {{ .Values.route.name }} namespace: {{ template "nexus.namespace" . }} labels: {{- range $key, $value := .Values.route.labels }} {{ $key }}: {{ $value | quote }} {{- end }} annotations: {{- range $key, $value := .Values.route.annotations }} {{ $key }}: {{ $value | quote }} {{- end }} spec: host: {{ .Values.route.path }} port: targetPort: {{ .Values.route.portName }} tls: insecureEdgeTerminationPolicy: Redirect termination: edge to: kind: Service {{- if .Values.service.name }} name: {{ .Values.service.name }} {{- else }} name: {{ template "nexus.name" . }}-service {{- end }} weight: 100 wildcardPolicy: None status: ingress: [] {{- end }} ================================================ FILE: charts/sonatype-nexus/templates/secret.yaml ================================================ {{- if and .Values.secret.enabled .Values.secret.data -}} apiVersion: v1 kind: Secret metadata: name: {{ template "nexus.name" . }}-secret namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} {{- if .Values.nexus.labels }} {{ toYaml .Values.nexus.labels | indent 4 }} {{- end }} data: {{ toYaml .Values.secret.data | indent 2 }} {{- end}} ================================================ FILE: charts/sonatype-nexus/templates/service.yaml ================================================ {{- if .Values.service.enabled -}} apiVersion: v1 kind: Service metadata: {{- if .Values.service.name }} name: {{ .Values.service.name }} {{- else }} name: {{ template "nexus.name" . }}-service {{- end }} namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} {{- if .Values.service.labels }} {{ toYaml .Values.service.labels | indent 4 }} {{- end }} {{- if .Values.service.annotations }} annotations: {{ toYaml .Values.service.annotations | indent 4 }} {{- end }} spec: ports: {{- if .Values.service.portName }} - name: {{ .Values.service.portName }} port: {{ .Values.service.port }} targetPort: {{ .Values.service.targetPort }} {{- end }} {{- with .Values.service.ports }} {{ toYaml . | indent 2 }} {{- end }} selector: app: {{ template "nexus.name" . }} release: {{ .Release.Name }} type: {{ .Values.service.type }} {{ if .Values.service.loadBalancerSourceRanges }} loadBalancerSourceRanges: {{- range .Values.service.loadBalancerSourceRanges }} - {{ . }} {{- end }} {{ end }} {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerIP }} loadBalancerIP: {{ .Values.service.loadBalancerIP }} {{- end }} {{- end}} ================================================ FILE: charts/sonatype-nexus/templates/serviceaccount.yaml ================================================ {{- if .Values.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: {{- if .Values.serviceAccount.name }} name: {{ .Values.serviceAccount.name }} {{- else }} name: {{ template "nexus.fullname" . }} {{- end }} namespace: {{ template "nexus.namespace" . }} labels: {{ include "nexus.labels" . | indent 4 }} {{- if .Values.nexus.labels }} {{ toYaml .Values.nexus.labels | indent 4 }} {{- end }} {{- if .Values.serviceAccount.annotations }} annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} {{- end }} {{- end }} ================================================ FILE: charts/sonatype-nexus/values.yaml ================================================ ## Overrides for generated resource names # namespaceOverride: statefulset: enabled: false replicaCount: 1 # By default deploymentStrategy is set to rollingUpdate with maxSurge of 25% and maxUnavailable of 25% . you can change type to `Recreate` or can uncomment `rollingUpdate` specification and adjust them to your usage. deploymentStrategy: {} # rollingUpdate: # maxSurge: 25% # maxUnavailable: 25% # type: RollingUpdate # If enabled, a Job will be launched after the chart is installed to initialize the admin password of your choice initAdminPassword: enabled: false # Set this in the instance where default admin password is different defaultPasswordOverride: password: "admin321" nexus: imageName: quay.io/travelaudience/docker-nexus imageTag: 3.38.1-01 imagePullPolicy: IfNotPresent # Uncomment this to scheduler pods on priority # priorityClassName: "high-priority" env: - name: INSTALL4J_ADD_VM_PARAMS value: "-Xms1200M -Xmx1200M -XX:MaxDirectMemorySize=2G -XX:ActiveProcessorCount=4" - name: NEXUS_SECURITY_RANDOMPASSWORD value: "false" # envFrom: # - configMapRef: # name: special-config # nodeSelector: # cloud.google.com/gke-nodepool: default-pool resources: {} # requests: ## Based on https://support.sonatype.com/hc/en-us/articles/115006448847#mem ## and https://twitter.com/analytically/status/894592422382063616: ## Xms == Xmx ## Xmx <= 4G ## MaxDirectMemory >= 2G ## Xmx + MaxDirectMemory <= RAM * 2/3 (hence the request for 4800Mi) ## MaxRAMFraction=1 is not being set as it would allow the heap ## to use all the available memory. # cpu: 250m # memory: 4800Mi containerSecurityContext: {} # The ports should only be changed if the nexus image uses a different port dockerPort: 5003 nexusPort: 8081 additionalPorts: [] service: type: NodePort # clusterIP: None # annotations: {} ## When using LoadBalancer service type, use the following AWS certificate from ACM ## https://aws.amazon.com/documentation/acm/ # service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:eu-west-1:123456789:certificate/abc123-abc123-abc123-abc123" # service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "https" # service.beta.kubernetes.io/aws-load-balancer-backend-port: "https" ## When using LoadBalancer service type, whitelist these source IP ranges ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ # loadBalancerSourceRanges: # - 192.168.1.10/32 # labels: {} ## Configures the requested IP on the loadBalancer when using LoadBalancer service type # loadBalancerIP: "192.168.1.10" securityContextEnabled: true securityContext: fsGroup: 200 podAnnotations: {} livenessProbe: initialDelaySeconds: 300 periodSeconds: 30 failureThreshold: 6 # timeoutSeconds: 10 path: / readinessProbe: initialDelaySeconds: 30 periodSeconds: 30 failureThreshold: 6 # timeoutSeconds: 10 path: / # startupProbe: # initialDelaySeconds: 10 # periodSeconds: 10 # failureThreshold: 300 # # timeoutSeconds: 10 # path: / # hostAliases allows the modification of the hosts file inside a container hostAliases: [] # - ip: "192.168.1.10" # hostnames: # - "example.com" # - "www.example.com" context: # When using nexus it is important that all the files in the data directory have the proper owner configured. Therefore this # value defaults to true to apply chown -R nexus:nexus to the mounted directory at every startup of the container. chownNexusData: true # terminationGracePeriodSeconds : 30 # Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. # WARNING : It has no effect with quay.io/travelaudience/docker-nexus which is using Runsvdir (https://github.com/travelaudience/docker-nexus/issues/56) # However it can be useful when you switch to the official image (https://hub.docker.com/r/sonatype/nexus3/tags?page=1&ordering=last_updated) route: enabled: false name: docker portName: docker labels: annotations: # path: docker.apps.ocp01.cluster.local nexusProxy: enabled: true # svcName: proxy-svc imageName: quay.io/travelaudience/docker-nexus-proxy imageTag: 2.6.0 imagePullPolicy: IfNotPresent port: 8080 containerSecurityContext: {} targetPort: 8080 # labels: {} env: # Example for javaOptions : "-Xms3G -Xmx3G -XX:MaxDirectMemorySize=3G" javaOptions: nexusDockerHost: nexusHttpHost: enforceHttps: false cloudIamAuthEnabled: false ## If cloudIamAuthEnabled is set to true uncomment the variables below and remove this line # clientId: "" # clientSecret: "" # organizationId: "" # redirectUrl: "" # requiredMembershipVerification: "true" # secrets: # keystore: "" # password: "" resources: {} # requests: # cpu: 100m # memory: 256Mi # limits: # cpu: 200m # memory: 512Mi nexusProxyRoute: enabled: false labels: annotations: # path: /nexus persistence: enabled: true accessMode: ReadWriteOnce ## If defined, storageClass: ## If set to "-", storageClass: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClass spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # existingClaim: # annotations: # "helm.sh/resource-policy": keep # storageClass: "-" storageSize: 8Gi # If PersistentDisk already exists you can create a PV for it by including the 2 following keypairs. # pdName: nexus-data-disk # fsType: ext4 nexusBackup: enabled: false imageName: dbcc/docker-nexus-backup imageTag: 0.0.1 imagePullPolicy: IfNotPresent env: rcloneRemote: targetBucket: nexusAuthorization: # Size of the data chunk to stream to the remote streamingUploadCutoff: "5000000" offlineRepos: "maven-central maven-public maven-releases maven-snapshots" gracePeriod: 60 # This should match the value of `initAdminPassword.password` if `initAdminPassword.enabled` is true nexusAdminPassword: "admin123" persistence: enabled: true # existingClaim: # annotations: # "helm.sh/resource-policy": keep accessMode: ReadWriteOnce # See comment above for information on setting the backup storageClass # storageClass: "-" storageSize: 8Gi # If PersistentDisk already exists you can create a PV for it by including the 2 following keypairs. # pdName: nexus-backup-disk # fsType: ext4 resources: {} # requests: # cpu: 100m # memory: 256Mi # limits: # cpu: 200m # memory: 512Mi rcloneConfig: rclone.conf: | [AWS] type = s3 provider = AWS env_auth = true region = us-east-1 acl = authenticated-read containerSecurityContext: {} nexusCloudiam: enabled: false persistence: enabled: true # existingClaim: # annotations: # "helm.sh/resource-policy": keep accessMode: ReadWriteOnce # See comment above for information on setting the backup storageClass # storageClass: "-" storageSize: 1Mi # If PersistentDisk already exists you can create a PV for it by including the 2 following keypairs. # pdName: nexus-cloudiam-path # fsType: ext4 serviceAccount: # Specifies whether a service account should be created create: true # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: annotations: {} rbac: create: false roleRef: {} annotations: {} ingress: enabled: false path: / pathType: Prefix labels: {} annotations: {} # # NOTE: Can't use 'false' due to https://github.com/jetstack/kube-lego/issues/173. # kubernetes.io/ingress.allow-http: true # kubernetes.io/ingress.class: gce # kubernetes.io/ingress.global-static-ip-name: "" # kubernetes.io/tls-acme: true tls: enabled: true secretName: nexus-tls hosts: # Specify custom rules in addition to or instead of the nexus-proxy rules rules: # - host: http://nexus.127.0.0.1.nip.io # http: # paths: # - backend: # serviceName: additional-svc # servicePort: 80 # className: nginx ingressDocker: enabled: false path: / pathType: Prefix labels: {} annotations: {} # # NOTE: Can't use 'false' due to https://github.com/jetstack/kube-lego/issues/173. # kubernetes.io/ingress.allow-http: true # kubernetes.io/ingress.class: gce # kubernetes.io/ingress.global-static-ip-name: "" # kubernetes.io/tls-acme: true tls: enabled: true secretName: nexus-tls hosts: # Specify custom rules in addition to or instead of the nexus-proxy rules rules: # - host: http://nexus.127.0.0.1.nip.io # http: # paths: # - backend: # serviceName: additional-svc # servicePort: 80 # className: nginx affinity: {} tolerations: [] # # Enable configmap and add data in configmap config: enabled: false mountPath: /sonatype-nexus-conf data: deployment: # # Add annotations in deployment to enhance deployment configurations annotations: {} # # Add init containers. e.g. to be used to give specific permissions for nexus-data. # # Add your own init container or uncomment and modify the given example. initContainers: # - name: fmp-volume-permission # image: busybox # imagePullPolicy: IfNotPresent # command: ['chown','-R', '200', '/nexus-data'] # volumeMounts: # - name: nexus-data # mountPath: /nexus-data # # Uncomment and modify this to run a command after starting the nexus container. postStart: command: # '["/bin/sh", "-c", "ls"]' # # Enable nexus scripts settings, disabled by default for versions >= 3.21.2 # command: '["/bin/sh", "-c", "if [[ -z $(grep ^nexus.scripts.allowCreation=true /nexus-data/etc/nexus.properties) ]];then echo nexus.scripts.allowCreation=true >> /nexus-data/etc/nexus.properties;fi"]' additionalContainers: additionalVolumes: additionalVolumeMounts: # # To use an additional secret, set enable to true and add data secret: enabled: false mountPath: /etc/secret-volume readOnly: true data: # # To use an additional service, set enable to true service: type: ClusterIP # name: additional-svc enabled: false labels: {} annotations: {} ports: - name: nexus-service targetPort: 80 port: 80 ## Configures the requested IP on the loadBalancer when using LoadBalancer service type # loadBalancerIP: "192.168.1.10" additionalConfigMaps: [] # - name: maven-central # labels: # nexus-type: repository # data: # recipe: 'MavenProxy' # remoteUrl: 'https://repo.maven.apache.org/maven2/' # blobStoreName: 'default' # strictContentTypeValidation: 'true' # versionPolicy: 'RELEASE' # layoutPolicy: 'STRICT' ================================================ FILE: docs/index.html ================================================ Oteemo Chart Repository

Oteemo Helm Charts Repo

Point Helm at this repository to access Oteemo's helm charts

helm repo add oteemocharts https://oteemo.github.io/charts

================================================ FILE: docs/index.yaml ================================================ apiVersion: v1 entries: che: - apiVersion: v1 appVersion: 7.3.1 created: "2019-11-01T14:10:55.698277-04:00" description: A Helm chart for deploying Eclipse Che to Kubernetes digest: 788127cd4e5a1d43e92b5007b5a18fb867780180cc2de01d68f9f2358b491910 name: che urls: - https://oteemo.github.io/charts/che-0.1.2.tgz version: 0.1.2 - apiVersion: v1 created: "2019-11-01T14:10:55.694465-04:00" description: A Helm chart for deploying Eclipse Che to Kubernetes digest: c5fc2099b6b86d9dd25b4fa31bf7bbd3bbd969ce079a06bef5ac1558b74d67ee name: che urls: - https://oteemo.github.io/charts/che-0.1.1.tgz version: 0.1.1 generated: "2019-11-01T14:10:55.688664-04:00" ================================================ FILE: lint.yaml ================================================ # See https://github.com/helm/chart-testing#configuration remote: origin chart-dirs: - charts chart-repos: - bitnami=https://charts.bitnami.com/bitnami helm-extra-args: --timeout 600s ================================================ FILE: old-charts/che/.gitignore ================================================ charts requirements.lock ================================================ FILE: old-charts/che/.helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *~ # Various IDEs .project .idea/ *.tmproj ================================================ FILE: old-charts/che/Chart.yaml ================================================ apiVersion: v1 appVersion: 7.3.1 description: A Helm chart for deploying Eclipse Che to Kubernetes name: che version: 0.1.6 keywords: - che - eclipse maintainers: home: https://www.eclipse.org/che/ icon: https://www.eclipse.org/che/images/logo-eclipseche.svg deprecated: true ================================================ FILE: old-charts/che/Readme.md ================================================ # Eclipse Che ***This chart has been deprecated*** ================================================ FILE: old-charts/che/custom-charts/che-devfile-registry/Chart.yaml ================================================ # # Copyright (c) 2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: "v1" name: "che-devfile-registry" version: "0.0.1" home: "https://github.com/eclipse/che-devfile-registry/" ================================================ FILE: old-charts/che/custom-charts/che-devfile-registry/README.md ================================================ # Che devfile Registry Helm Chart This Helm Chart install [Che](https://github.com/eclipse/che) devfile Registry. More information about Che devfile Registry can be found [here](https://github.com/eclipse/che-devfile-registry). ================================================ FILE: old-charts/che/custom-charts/che-devfile-registry/templates/deployment.yaml ================================================ # # Copyright (c) 2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: apps/v1 kind: Deployment metadata: labels: app: che component: devfile-registry name: devfile-registry spec: replicas: 1 revisionHistoryLimit: 2 selector: matchLabels: app: che component: devfile-registry strategy: type: RollingUpdate rollingUpdate: maxSurge: 25% maxUnavailable: 25% template: metadata: labels: app: che component: devfile-registry spec: containers: - image: {{ .Values.cheDevfileRegistry.repository }}:{{ .Values.global.cheImageTag }} imagePullPolicy: {{ .Values.cheDevfileRegistry.imagePullPolicy }} name: che-devfile-registry ports: - containerPort: 8080 livenessProbe: httpGet: path: /devfiles/ port: 8080 scheme: HTTP initialDelaySeconds: 30 periodSeconds: 10 timeoutSeconds: 3 readinessProbe: httpGet: path: /devfiles/ port: 8080 scheme: HTTP initialDelaySeconds: 3 periodSeconds: 10 timeoutSeconds: 3 resources: limits: memory: {{ .Values.cheDevfileRegistry.memoryLimit }} requests: memory: {{ .Values.cheDevfileRegistry.memoryRequests }} ================================================ FILE: old-charts/che/custom-charts/che-devfile-registry/templates/ingress.yaml ================================================ # # Copyright (c) 2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: extensions/v1beta1 kind: Ingress metadata: name: devfile-registry annotations: kubernetes.io/ingress.class: {{ .Values.cheDevfileRegistry.ingress.class }} {{- range $key, $value := .Values.cheDevfileRegistry.ingress.annotations }} {{ $key }}: {{ $value | quote }} {{- end }} spec: rules: - host: {{ printf .Values.global.cheDevfileRegistryUrlFormat .Release.Namespace .Values.global.ingressDomain }} http: paths: - path: / backend: serviceName: devfile-registry servicePort: 8080 {{- if .Values.global.tls.enabled }} tls: - hosts: - {{ printf .Values.global.cheDevfileRegistryUrlFormat .Release.Namespace .Values.global.ingressDomain }} secretName: {{ .Values.global.tls.secretName }} {{- end -}} ================================================ FILE: old-charts/che/custom-charts/che-devfile-registry/templates/service.yaml ================================================ # # Copyright (c) 2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 kind: Service metadata: labels: app: che component: devfile-registry name: devfile-registry spec: ports: - protocol: TCP port: 8080 targetPort: 8080 selector: app: che component: devfile-registry ================================================ FILE: old-charts/che/custom-charts/che-devfile-registry/values.yaml ================================================ # # Copyright (c) 2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # cheDevfileRegistry: repository: quay.io/eclipse/che-devfile-registry imagePullPolicy: Always memoryLimit: 256Mi memoryRequests: 16Mi ingress: class: nginx annotations: {} ================================================ FILE: old-charts/che/custom-charts/che-jaeger/.helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *~ # Various IDEs .project .idea/ *.tmproj ================================================ FILE: old-charts/che/custom-charts/che-jaeger/Chart.yaml ================================================ # # Copyright (c) 2012-2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 description: A Jaeger Helm chart, used by Che name: che-jaeger version: 1.0.0 ================================================ FILE: old-charts/che/custom-charts/che-jaeger/templates/deployment.yaml ================================================ # # Copyright (c) 2012-2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: extensions/v1beta1 kind: Deployment metadata: name: jaeger labels: app: jaeger jaeger-infra: jaeger-deployment spec: replicas: 1 strategy: type: RollingUpdate rollingUpdate: maxUnavailable: 0 template: metadata: labels: app: jaeger jaeger-infra: jaeger-pod annotations: prometheus.io/scrape: "true" prometheus.io/port: "16686" spec: containers: - image: {{ .Values.image }} name: jaeger ports: - containerPort: 5775 protocol: UDP - containerPort: 6831 protocol: UDP - containerPort: 6832 protocol: UDP - containerPort: 16686 protocol: TCP - containerPort: 5778 protocol: TCP readinessProbe: httpGet: path: "/" port: 14269 initialDelaySeconds: 5 ================================================ FILE: old-charts/che/custom-charts/che-jaeger/templates/ingress.yaml ================================================ # # Copyright (c) 2012-2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # {{- define "jaegerHost" }} {{- printf "jaeger-%s.%s" .Release.Namespace .Values.global.ingressDomain }} {{- end }} apiVersion: extensions/v1beta1 kind: Ingress metadata: name: jaeger-query annotations: kubernetes.io/ingress.class: "nginx" {{ .Values.global.ingressAnnotationsPrefix }}ingress.kubernetes.io/proxy-read-timeout: "3600" {{ .Values.global.ingressAnnotationsPrefix }}ingress.kubernetes.io/proxy-connect-timeout: "3600" {{- if .Values.global.tls.enabled }} {{ .Values.global.ingressAnnotationsPrefix }}ingress.kubernetes.io/ssl-redirect: "true" {{- else }} {{ .Values.global.ingressAnnotationsPrefix }}ingress.kubernetes.io/ssl-redirect: "false" {{- end }} spec: {{- if .Values.global.tls.enabled }} tls: - hosts: - {{ template "jaegerHost" . }} secretName: {{ .Values.global.tls.secretName }} {{- end }} rules: - host: {{ template "jaegerHost" . }} http: paths: - path: / backend: serviceName: jaeger-query servicePort: 16686 ================================================ FILE: old-charts/che/custom-charts/che-jaeger/templates/service-agent.yaml ================================================ # # Copyright (c) 2012-2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 kind: Service metadata: name: jaeger-agent labels: app: jaeger jaeger-infra: agent-service spec: ports: - name: agent-zipkin-thrift port: 5775 protocol: UDP targetPort: 5775 - name: agent-compact port: 6831 protocol: UDP targetPort: 6831 - name: agent-binary port: 6832 protocol: UDP targetPort: 6832 - name: agent-sampler-manager port: 5778 protocol: TCP targetPort: 5778 selector: jaeger-infra: jaeger-pod ================================================ FILE: old-charts/che/custom-charts/che-jaeger/templates/service-collector.yaml ================================================ # # Copyright (c) 2012-2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 kind: Service metadata: name: jaeger-collector labels: app: jaeger jaeger-infra: collector-service spec: ports: - name: jaeger-collector-tchannel port: 14267 protocol: TCP targetPort: 14267 - name: jaeger-collector-http port: 14268 protocol: TCP targetPort: 14268 selector: jaeger-infra: jaeger-pod ================================================ FILE: old-charts/che/custom-charts/che-jaeger/templates/service-query.yaml ================================================ # # Copyright (c) 2012-2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 kind: Service metadata: name: jaeger-query labels: app: jaeger jaeger-infra: jaeger-service spec: ports: - name: query-http port: 80 protocol: TCP targetPort: 16686 selector: jaeger-infra: jaeger-pod ================================================ FILE: old-charts/che/custom-charts/che-jaeger/values.yaml ================================================ # # Copyright (c) 2012-2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # # Default values for postgres. # This is a YAML-formatted file. # Declare variables to be passed into your templates. image: jaegertracing/all-in-one:latest ================================================ FILE: old-charts/che/custom-charts/che-keycloak/.helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *~ # Various IDEs .project .idea/ *.tmproj ================================================ FILE: old-charts/che/custom-charts/che-keycloak/Chart.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 description: A Helm chart for Keycloak, used by Che name: che-keycloak version: 1.0.0 ================================================ FILE: old-charts/che/custom-charts/che-keycloak/templates/deployment.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: extensions/v1beta1 kind: Deployment metadata: labels: app: che component: keycloak io.kompose.service: keycloak name: keycloak spec: replicas: 1 template: metadata: labels: app: che component: keycloak io.kompose.service: keycloak spec: initContainers: - name: wait-for-postgres image: eclipse/che-endpoint-watcher:{{ .Values.global.cheImageTag }} env: - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: ENDPOINT value: postgres containers: - env: - name: DB_VENDOR value: POSTGRES - name: DB_ADDR value: postgres - name: DB_PORT value: "5432" - name: DB_DATABASE value: keycloak - name: DB_USER value: keycloak - name: DB_PASSWORD value: keycloak - name: KEYCLOAK_USER value: admin - name: KEYCLOAK_PASSWORD value: admin - name: CHE_HOST value: {{ template "cheHost" . }} - name: ROUTING_SUFFIX value: {{ .Values.global.ingressDomain }} - name: NAMESPACE value: {{ .Release.Namespace }} - name: PROTOCOL {{- if .Values.global.tls.enabled }} value: "https" - name: PROXY_ADDRESS_FORWARDING value: "true" {{- else }} value: "http" {{- end }} image: {{ .Values.repository }}:{{ .Values.global.cheImageTag }} command: ["/scripts/kc_realm_user.sh"] imagePullPolicy: Always name: keycloak livenessProbe: failureThreshold: 11 initialDelaySeconds: 5 periodSeconds: 5 successThreshold: 1 tcpSocket: port: 8080 timeoutSeconds: 30 readinessProbe: failureThreshold: 10 httpGet: path: auth/js/keycloak.js port: 8080 scheme: HTTP initialDelaySeconds: 10 periodSeconds: 3 successThreshold: 1 timeoutSeconds: 1 ports: - containerPort: 8080 resources: limits: memory: 1536Mi requests: memory: 1024Mi volumeMounts: - mountPath: /opt/jboss/keycloak/standalone/data name: keycloak-data - mountPath: /opt/jboss/keycloak/standalone/log name: keycloak-log restartPolicy: Always securityContext: # `fsGroup`, `runAsGroup`, and `runAsUser` must be # same values that `USER` in the container image. fsGroup: 1000 runAsGroup: 1000 runAsUser: 1000 runAsNonRoot: true serviceAccountName: che-keycloak volumes: - name: keycloak-data persistentVolumeClaim: claimName: keycloak-data - name: keycloak-log persistentVolumeClaim: claimName: keycloak-log status: {} ================================================ FILE: old-charts/che/custom-charts/che-keycloak/templates/endpoints-monitor-role.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: namespace: {{ .Release.Namespace }} name: che-endpoints-monitor rules: - apiGroups: [""] resources: ["services", "endpoints", "pods"] verbs: ["get", "list"] ================================================ FILE: old-charts/che/custom-charts/che-keycloak/templates/endpoints-monitor-rolebinding.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: {{ .Release.Namespace -}} -keycloak-role-binding roleRef: kind: Role name: che-endpoints-monitor apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: che-keycloak namespace: {{ .Release.Namespace }} ================================================ FILE: old-charts/che/custom-charts/che-keycloak/templates/ingress.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: extensions/v1beta1 kind: Ingress metadata: name: keycloak-ingress annotations: kubernetes.io/ingress.class: {{ .Values.ingress.class }} {{- range $key, $value := .Values.ingress.annotations }} {{ $key }}: {{ $value | quote }} {{- end }} spec: {{- if .Values.global.tls.enabled }} tls: - hosts: - {{ template "keycloakHost" . }} secretName: {{ .Values.global.tls.secretName }} {{- end }} rules: {{- if eq .Values.global.serverStrategy "default-host" }} - http: paths: - path: /auth/ {{- else if eq .Values.global.serverStrategy "single-host" }} - host: {{ template "keycloakHost" . }} http: paths: - path: /auth/ {{- else }} - host: {{ template "keycloakHost" . }} http: paths: - path: / {{- end }} backend: serviceName: keycloak servicePort: 5050 ================================================ FILE: old-charts/che/custom-charts/che-keycloak/templates/keycloak-data-claim.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 kind: PersistentVolumeClaim metadata: labels: io.kompose.service: keycloak-data name: keycloak-data spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi status: {} ================================================ FILE: old-charts/che/custom-charts/che-keycloak/templates/keycloak-log-claim.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 kind: PersistentVolumeClaim metadata: labels: io.kompose.service: keycloak-log name: keycloak-log spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi status: {} ================================================ FILE: old-charts/che/custom-charts/che-keycloak/templates/keycloak-serviceaccount.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 kind: ServiceAccount metadata: labels: app: che component: keycloak name: che-keycloak ================================================ FILE: old-charts/che/custom-charts/che-keycloak/templates/service.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 kind: Service metadata: name: "keycloak" spec: ports: - name: "5050" port: 5050 targetPort: 8080 selector: io.kompose.service: keycloak status: loadBalancer: {} ================================================ FILE: old-charts/che/custom-charts/che-keycloak/values.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # # Default values for postgres. # This is a YAML-formatted file. # Declare variables to be passed into your templates. repository: eclipse/che-keycloak requireAdminPasswordChange: true ingress: class: nginx annotations: {} ================================================ FILE: old-charts/che/custom-charts/che-plugin-registry/Chart.yaml ================================================ # # Copyright (c) 2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: "v1" name: "che-plugin-registry" version: "0.0.1" home: "https://github.com/eclipse/che-plugin-registry/" ================================================ FILE: old-charts/che/custom-charts/che-plugin-registry/README.md ================================================ # Che Plugin Registry Helm Chart This Helm Chart install [Che](https://github.com/eclipse/che) Plugin Registry. More information about Che Plugin Registry can be found [here](https://github.com/eclipse/che-plugin-registry). ================================================ FILE: old-charts/che/custom-charts/che-plugin-registry/templates/deployment.yaml ================================================ # # Copyright (c) 2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: apps/v1 kind: Deployment metadata: labels: app: che component: plugin-registry name: plugin-registry spec: replicas: 1 revisionHistoryLimit: 2 selector: matchLabels: app: che component: plugin-registry strategy: type: RollingUpdate rollingUpdate: maxSurge: 25% maxUnavailable: 25% template: metadata: labels: app: che component: plugin-registry spec: containers: - image: {{ .Values.chePluginRegistry.repository }}:{{ .Values.global.cheImageTag }} imagePullPolicy: {{ .Values.chePluginRegistry.imagePullPolicy }} name: che-plugin-registry ports: - containerPort: 8080 livenessProbe: httpGet: path: /v3/plugins/ port: 8080 scheme: HTTP initialDelaySeconds: 30 periodSeconds: 10 timeoutSeconds: 3 readinessProbe: httpGet: path: /v3/plugins/ port: 8080 scheme: HTTP initialDelaySeconds: 3 periodSeconds: 10 timeoutSeconds: 3 resources: limits: memory: {{ .Values.chePluginRegistry.memoryLimit }} requests: memory: {{ .Values.chePluginRegistry.memoryRequests }} ================================================ FILE: old-charts/che/custom-charts/che-plugin-registry/templates/ingress.yaml ================================================ # # Copyright (c) 2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: extensions/v1beta1 kind: Ingress metadata: name: plugin-registry annotations: kubernetes.io/ingress.class: {{ .Values.chePluginRegistry.ingress.class }} {{- range $key, $value := .Values.chePluginRegistry.ingress.annotations }} {{ $key }}: {{ $value | quote }} {{- end }} spec: rules: - host: {{ printf .Values.global.chePluginRegistryUrlFormat .Release.Namespace .Values.global.ingressDomain }} http: paths: - path: / backend: serviceName: plugin-registry servicePort: 8080 {{- if .Values.global.tls.enabled }} tls: - hosts: - {{ printf .Values.global.chePluginRegistryUrlFormat .Release.Namespace .Values.global.ingressDomain }} secretName: {{ .Values.global.tls.secretName }} {{- end -}} ================================================ FILE: old-charts/che/custom-charts/che-plugin-registry/templates/service.yaml ================================================ # # Copyright (c) 2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 kind: Service metadata: labels: app: che component: plugin-registry name: plugin-registry spec: ports: - protocol: TCP port: 8080 targetPort: 8080 selector: app: che component: plugin-registry ================================================ FILE: old-charts/che/custom-charts/che-plugin-registry/values.yaml ================================================ # # Copyright (c) 2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # chePluginRegistry: repository: quay.io/eclipse/che-plugin-registry imagePullPolicy: Always memoryLimit: 256Mi memoryRequests: 16Mi ingress: class: nginx annotations: {} ================================================ FILE: old-charts/che/custom-charts/che-postgres/.helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *~ # Various IDEs .project .idea/ *.tmproj ================================================ FILE: old-charts/che/custom-charts/che-postgres/Chart.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 description: A Helm chart for postgresql, used by Che name: che-postgres version: 1.0.0 ================================================ FILE: old-charts/che/custom-charts/che-postgres/templates/deployment.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: extensions/v1beta1 kind: Deployment metadata: creationTimestamp: null labels: app: che component: postgres io.kompose.service: postgres name: postgres spec: replicas: 1 template: metadata: labels: app: che component: postgres io.kompose.service: postgres spec: securityContext: runAsUser: 26 fsGroup: 26 containers: - env: - name: POSTGRESQL_USER value: keycloak - name: POSTGRESQL_PASSWORD value: keycloak - name: POSTGRESQL_DATABASE value: keycloak - name: "CHE_POSTGRES_USERNAME" value: "pgche" - name: "CHE_POSTGRES_PASSWORD" value: "pgchepassword" - name: "CHE_POSTGRES_DATABASE" value: "dbche" {{- if .Values.global.postgresDebugLogs }} - name: "POSTGRESQL_LOG_DEBUG" value: "true" {{- end }} image: {{ .Values.repository }}:{{ .Values.global.cheImageTag }} securityContext: runAsUser: 26 imagePullPolicy: Always name: postgres livenessProbe: failureThreshold: 3 initialDelaySeconds: 30 periodSeconds: 10 successThreshold: 1 tcpSocket: port: 5432 timeoutSeconds: 1 readinessProbe: exec: command: - bash - -c - psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d $POSTGRESQL_DATABASE -c "SELECT 1" failureThreshold: 10 initialDelaySeconds: 5 periodSeconds: 3 successThreshold: 1 timeoutSeconds: 1 ports: - containerPort: 5432 resources: {} volumeMounts: - mountPath: /var/lib/pgsql/data name: postgres-data restartPolicy: Always volumes: - name: postgres-data persistentVolumeClaim: claimName: postgres-data status: {} ================================================ FILE: old-charts/che/custom-charts/che-postgres/templates/postgres-data-claim.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 kind: PersistentVolumeClaim metadata: creationTimestamp: null labels: io.kompose.service: postgres-data name: postgres-data spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi status: {} ================================================ FILE: old-charts/che/custom-charts/che-postgres/templates/service.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: io.kompose.service: postgres name: postgres spec: ports: - name: "5432" port: 5432 targetPort: 5432 selector: io.kompose.service: postgres status: loadBalancer: {} ================================================ FILE: old-charts/che/custom-charts/che-postgres/values.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # # Default values for postgres. # This is a YAML-formatted file. # Declare variables to be passed into your templates. repository: eclipse/che-postgres ================================================ FILE: old-charts/che/requirements.yaml ================================================ # # Copyright (c) 2012-2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # dependencies: - name: che-devfile-registry repository: file://./custom-charts/che-devfile-registry/ version: 0.0.1 condition: cheDevfileRegistry.deploy - name: che-plugin-registry repository: file://./custom-charts/che-plugin-registry/ version: 0.0.1 condition: chePluginRegistry.deploy - name: che-postgres repository: file://./custom-charts/che-postgres/ version: 1.0.0 condition: global.multiuser - name: che-keycloak repository: file://./custom-charts/che-keycloak/ version: 1.0.0 condition: global.cheDedicatedKeycloak,global.multiuser - name: che-jaeger repository: file://./custom-charts/che-jaeger version: 1.0.0 condition: jaeger.deploy - name: prometheus repository: https://kubernetes-charts.storage.googleapis.com/ version: ^7.4.5 condition: prometheus.deploy - name: grafana repository: https://kubernetes-charts.storage.googleapis.com/ version: ^1.19.0 condition: grafana.deploy ================================================ FILE: old-charts/che/templates/NOTES.txt ================================================ You can access your che instance through {{ template "cheHost" . }} ================================================ FILE: old-charts/che/templates/_hostHelper.tpl ================================================ {{- define "cheHost" }} {{- if or (eq .Values.global.serverStrategy "default-host") (eq .Values.global.serverStrategy "single-host") }} {{- printf "%s" .Values.global.ingressDomain }} {{- else }} {{- printf "che-%s.%s" .Release.Namespace .Values.global.ingressDomain }} {{- end }} {{- end }} ================================================ FILE: old-charts/che/templates/_keycloakAuthUrlHelper.tpl ================================================ {{- define "keycloakAuthUrl" }} {{- if or (eq .Values.global.serverStrategy "default-host") (eq .Values.global.serverStrategy "single-host") }} {{- if .Values.global.tls.enabled }} {{- printf "https://%s/auth" .Values.global.ingressDomain }} {{- else }} {{- printf "http://%s/auth" .Values.global.ingressDomain }} {{- end }} {{- else }} {{- if .Values.global.tls.enabled }} {{- printf "https://keycloak-%s.%s/auth" .Release.Namespace .Values.global.ingressDomain }} {{- else }} {{- printf "http://keycloak-%s.%s/auth" .Release.Namespace .Values.global.ingressDomain }} {{- end }} {{- end }} {{- end }} ================================================ FILE: old-charts/che/templates/_keycloakHostHelper.tpl ================================================ {{- define "keycloakHost" }} {{- if or (eq .Values.global.serverStrategy "default-host") (eq .Values.global.serverStrategy "single-host") }} {{- printf "%s" .Values.global.ingressDomain }} {{- else }} {{- printf "keycloak-%s.%s" .Release.Namespace .Values.global.ingressDomain }} {{- end }} {{- end }} ================================================ FILE: old-charts/che/templates/_secretHelper.tpl ================================================ {{- define "imagePullSecret" }} {{- if .Values.registry }} {{- if and .Values.registry.password .Values.registry.username }} {{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.registry.host (printf "%s:%s" .Values.registry.username .Values.registry.password | b64enc) | b64enc }} {{- end }} {{- end }} {{- end }} ================================================ FILE: old-charts/che/templates/cluster-role-binding.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: {{ .Release.Namespace -}} -che-clusterrole-binding roleRef: kind: ClusterRole name: cluster-admin apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: che namespace: {{ .Release.Namespace }} ================================================ FILE: old-charts/che/templates/configmap.yaml ================================================ # # Copyright (c) 2012-2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 kind: ConfigMap metadata: labels: app: che component: che name: che data: CHE_HOST: {{ template "cheHost" . }} CHE_PORT: "8080" {{- if and .Values.global.tls .Values.global.tls.enabled }} CHE_API: https://{{ template "cheHost" . }}/api CHE_WEBSOCKET_ENDPOINT: wss://{{ template "cheHost" . }}/api/websocket CHE_INFRA_KUBERNETES_BOOTSTRAPPER_BINARY__URL: https://{{ template "cheHost" . }}/agent-binaries/linux_amd64/bootstrapper/bootstrapper {{ else }} CHE_API: http://{{ template "cheHost" . }}/api CHE_WEBSOCKET_ENDPOINT: ws://{{ template "cheHost" . }}/api/websocket CHE_WEBSOCKET_ENDPOINT__MINOR: ws://{{ template "cheHost" . }}/api/websocket-minor CHE_INFRA_KUBERNETES_BOOTSTRAPPER_BINARY__URL: http://{{ template "cheHost" . }}/agent-binaries/linux_amd64/bootstrapper/bootstrapper {{- end }} CHE_DEBUG_SERVER: "true" CHE_INFRASTRUCTURE_ACTIVE: "kubernetes" CHE_INFRA_KUBERNETES_INGRESS_DOMAIN: {{ .Values.global.ingressDomain }} CHE_INFRA_KUBERNETES_MACHINE__START__TIMEOUT__MIN: "5" CHE_INFRA_KUBERNETES_MASTER__URL: "" {{- if and .Values.global.tls .Values.global.tls.enabled }} CHE_INFRA_KUBERNETES_TLS__ENABLED: {{ .Values.global.tls.enabled | quote}} CHE_INFRA_KUBERNETES_TLS__SECRET: {{ .Values.global.tls.secretName }} {{- else }} CHE_INFRA_KUBERNETES_TLS__ENABLED: "false" CHE_INFRA_KUBERNETES_TLS__SECRET: "" {{- end }} {{- if .Values.global.multiuser }} CHE_KEYCLOAK_CLIENT__ID: {{ .Values.cheKeycloakClientId | quote}} CHE_KEYCLOAK_AUTH__SERVER__URL: {{ template "keycloakAuthUrl" . }} CHE_KEYCLOAK_REALM: {{ .Values.cheKeycloakRealm }} {{- end }} {{- if (and .Values.global.multiuser .Values.customOidcProvider) }} CHE_KEYCLOAK_OIDC__PROVIDER: {{ .Values.customOidcProvider }} {{- end }} {{- if and .Values.global.multiuser .Values.customOidcUsernameClaim }} CHE_KEYCLOAK_USERNAME__CLAIM: {{ .Values.customOidcUsernameClaim }} {{- end }} CHE_INFRA_KUBERNETES_NAMESPACE: {{ .Values.global.cheWorkspacesNamespace | quote}} CHE_INFRA_KUBERNETES_SERVICE__ACCOUNT__NAME: {{ .Values.global.cheWorkspaceServiceAccount }} CHE_INFRA_KUBERNETES_TRUST__CERTS: "false" CHE_INFRA_KUBERNETES_PVC_STRATEGY: "common" CHE_INFRA_KUBERNETES_PVC_QUANTITY: {{ .Values.global.pvcClaim }} CHE_INFRA_KUBERNETES_PVC_PRECREATE__SUBPATHS: "true" CHE_INFRA_KUBERNETES_POD_SECURITY__CONTEXT_RUN__AS__USER: "{{ .Values.global.securityContext.runAsUser }}" CHE_INFRA_KUBERNETES_POD_SECURITY__CONTEXT_FS__GROUP: "{{ .Values.global.securityContext.fsGroup }}" CHE_LOCAL_CONF_DIR: /etc/conf CHE_LOGS_DIR: /data/logs CHE_LOG_LEVEL: "INFO" CHE_MULTIUSER: {{ .Values.global.multiuser | quote }} CHE_OAUTH_GITHUB_CLIENTID: {{ .Values.global.gitHubClientID | quote}} CHE_OAUTH_GITHUB_CLIENTSECRET: {{ .Values.global.gitHubClientSecret | quote}} JAVA_OPTS: "-XX:MaxRAMFraction=2 -XX:+UseParallelGC -XX:MinHeapFreeRatio=10 -XX:MaxHeapFreeRatio=20 -XX:GCTimeRatio=4 -XX:AdaptiveSizePolicyWeight=90 -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap -Dsun.zip.disableMemoryMapping=true -Xms20m " CHE_WORKSPACE_AUTO_START: "false" CHE_INFRA_KUBERNETES_INGRESS_ANNOTATIONS__JSON: '{"kubernetes.io/ingress.class": "{{ .Values.workspace.ingress.class }}" {{- range $key, $value := .Values.workspace.ingress.annotations }},{{ $key | quote }}: {{ $value | quote }}{{- end }}}' CHE_INFRA_KUBERNETES_INGRESS_PATH__TRANSFORM: '%s(.*)' CHE_INFRA_KUBERNETES_SERVER__STRATEGY: {{ .Values.global.serverStrategy }} CHE_LOGGER_CONFIG: {{ .Values.global.log.loggerConfig | quote}} CHE_LOGS_APPENDERS_IMPL: {{ .Values.global.log.appenderName }} CHE_WORKSPACE_HTTP__PROXY: {{ .Values.cheWorkspaceHttpProxy | quote}} CHE_WORKSPACE_HTTPS__PROXY: {{ .Values.cheWorkspaceHttpsProxy | quote}} CHE_WORKSPACE_NO__PROXY: {{ .Values.cheWorkspaceNoProxy | quote}} CHE_LIMITS_WORKSPACE_IDLE_TIMEOUT: '{{ .Values.global.workspaceIdleTimeout }}' {{- if .Values.workspaceDefaultRamRequest }} CHE_WORKSPACE_DEFAULT_MEMORY_REQUEST_MB: {{ .Values.workspaceDefaultRamRequest }} {{- end }} {{- if .Values.workspaceDefaultRamLimit }} CHE_WORKSPACE_DEFAULT_MEMORY_LIMIT_MB: {{ .Values.workspaceDefaultRamLimit }} {{- end }} {{- if .Values.che.workspace.devfileRegistryUrl }} CHE_WORKSPACE_DEVFILE__REGISTRY__URL: {{ .Values.che.workspace.devfileRegistryUrl | quote }} {{- else if .Values.cheDevfileRegistry.deploy }} {{- if .Values.global.tls.enabled }} CHE_WORKSPACE_DEVFILE__REGISTRY__URL: https://{{ printf .Values.global.cheDevfileRegistryUrlFormat .Release.Namespace .Values.global.ingressDomain }} {{- else }} CHE_WORKSPACE_DEVFILE__REGISTRY__URL: http://{{ printf .Values.global.cheDevfileRegistryUrlFormat .Release.Namespace .Values.global.ingressDomain }} {{- end }} {{- end }} {{- if .Values.che.workspace.pluginRegistryUrl }} CHE_WORKSPACE_PLUGIN__REGISTRY__URL: {{ .Values.che.workspace.pluginRegistryUrl | quote }} {{- else if .Values.chePluginRegistry.deploy }} {{- if .Values.global.tls.enabled }} CHE_WORKSPACE_PLUGIN__REGISTRY__URL: https://{{ printf .Values.global.chePluginRegistryUrlFormat .Release.Namespace .Values.global.ingressDomain }}/v3 {{- else }} CHE_WORKSPACE_PLUGIN__REGISTRY__URL: http://{{ printf .Values.global.chePluginRegistryUrlFormat .Release.Namespace .Values.global.ingressDomain }}/v3 {{- end }} {{- end }} {{- if .Values.workspaceSidecarDefaultRamLimit }} CHE_WORKSPACE_SIDECAR_DEFAULT__MEMORY__LIMIT__MB: {{ .Values.workspaceSidecarDefaultRamLimit }} {{- end }} {{- if .Values.global.cheWorkspaceClusterRole }} CHE_INFRA_KUBERNETES_CLUSTER__ROLE__NAME: {{ .Values.global.cheWorkspaceClusterRole }} {{- end }} CHE_CORS_ENABLED: "true" CHE_CORS_ALLOW__CREDENTIALS: "false" CHE_CORS_ALLOWED__ORIGINS: "*" CHE_WSAGENT_CORS_ENABLED: "true" CHE_WSAGENT_CORS_ALLOW__CREDENTIALS: "true" CHE_WSAGENT_CORS_ALLOWED__ORIGINS: "NULL" CHE_TRACING_ENABLED: {{ .Values.global.tracingEnabled | quote }} JAEGER_ENDPOINT: "http://jaeger-collector:14268/api/traces" JAEGER_SERVICE_NAME: "che-server" JAEGER_SAMPLER_MANAGER_HOST_PORT: "jaeger:5778" JAEGER_SAMPLER_TYPE: "const" JAEGER_SAMPLER_PARAM: "1" JAEGER_REPORTER_MAX_QUEUE_SIZE: "10000" CHE_METRICS_ENABLED: {{ .Values.global.metricsEnabled | quote }} CHE_WORKSPACE_JAVA__OPTIONS: "-Xmx2000m" CHE_WORKSPACE_MAVEN__OPTIONS: "-Xmx20000m" CHE_INFRA_KUBERNETES_WORKSPACE__START__TIMEOUT__MIN: "15" ================================================ FILE: old-charts/che/templates/deployment.yaml ================================================ # # Copyright (c) 2012-2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: extensions/v1beta1 kind: Deployment metadata: labels: app: che component: che name: che spec: replicas: 1 revisionHistoryLimit: 2 selector: matchLabels: app: che strategy: type: Recreate template: metadata: labels: app: che component: che spec: securityContext: fsGroup: {{ .Values.global.securityContext.fsGroup }} initContainers: {{- if .Values.global.multiuser }} - name: wait-for-postgres image: eclipse/che-endpoint-watcher:{{ .Values.global.cheImageTag}} env: - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: ENDPOINT value: postgres {{- end }} #wait for keycloak if in multiuser mode and .Values.customOidcProvider was not defined {{- if (and .Values.global.multiuser (not .Values.customOidcProvider)) }} - name: wait-for-keycloak image: eclipse/che-endpoint-watcher:{{ .Values.global.cheImageTag}} env: - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: ENDPOINT value: keycloak {{- end }} {{- if not .Values.global.multiuser }} - name: fmp-volume-permission image: busybox command: ["chmod", "777", "/data"] volumeMounts: [{ "mountPath": "/data", "name": "che-data-volume" }] {{- end }} containers: - envFrom: - configMapRef: name: che env: - name: OPENSHIFT_KUBE_PING_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace {{- if .Values.global.tls.enabled }} # If self-signed-cert is used then configure Che Server with certificate content # to propagate it to trust store {{- if .Values.global.tls.useSelfSignedCerts }} - name: CHE_SELF__SIGNED__CERT valueFrom: secretKeyRef: key: ca.crt name: {{ .Values.global.tls.selfSignedCertSecretName }} optional: false {{- end }} # If workspaces are created in different namespace than Che Server's one # then configure Che Server to propagate TLS secret to workspaces' namespaces {{- if ne .Release.Namespace .Values.global.cheWorkspacesNamespace }} - name: "CHE_INFRA_KUBERNETES_TLS__CERT" valueFrom: secretKeyRef: key: tls.crt name: {{ .Values.global.tls.secretName }} optional: false - name: "CHE_INFRA_KUBERNETES_TLS__KEY" valueFrom: secretKeyRef: key: tls.key name: {{ .Values.global.tls.secretName }} optional: false {{- end }} {{- end }} image: {{ .Values.cheImage }}:{{ .Values.global.cheImageTag}} imagePullPolicy: {{ .Values.cheImagePullPolicy }} securityContext: runAsUser: {{ .Values.global.securityContext.runAsUser }} livenessProbe: httpGet: path: /api/system/state port: 8080 scheme: HTTP initialDelaySeconds: 120 timeoutSeconds: 10 name: che ports: - containerPort: 8080 name: http - containerPort: 8000 name: http-debug - containerPort: 8888 name: jgroups-ping - containerPort: 8087 name: http-metrics readinessProbe: httpGet: path: /api/system/state port: 8080 scheme: HTTP initialDelaySeconds: 15 timeoutSeconds: 60 resources: limits: memory: 600Mi requests: memory: 256Mi {{- if not .Values.global.multiuser }} volumeMounts: - mountPath: /data name: che-data-volume volumes: - name: che-data-volume persistentVolumeClaim: claimName: che-data-volume {{- end }} {{- if .Values.registry }} {{- if and .Values.registry.password .Values.registry.username }} imagePullSecrets: - name: registry-pull-secret {{- end }} {{- end }} serviceAccountName: che ================================================ FILE: old-charts/che/templates/exec-role.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # {{- if (.Values.global.cheWorkspacesNamespace) }} kind: Role apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: exec namespace: {{ .Values.global.cheWorkspacesNamespace }} rules: - apiGroups: - "" attributeRestrictions: null resources: - pods/exec verbs: - create {{- end }} ================================================ FILE: old-charts/che/templates/ingress.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: extensions/v1beta1 kind: Ingress metadata: name: che-ingress annotations: kubernetes.io/ingress.class: {{ .Values.che.ingress.class }} {{- range $key, $value := .Values.che.ingress.annotations }} {{ $key }}: {{ $value | quote }} {{- end }} spec: {{- if .Values.global.tls.enabled }} tls: - hosts: - {{ template "cheHost" . }} secretName: {{ .Values.global.tls.secretName }} {{- end }} rules: {{- if ne .Values.global.serverStrategy "default-host" }} - host: {{ template "cheHost" . }} http: {{- else }} - http: {{- end }} paths: - path: / backend: serviceName: che-host servicePort: 8080 ================================================ FILE: old-charts/che/templates/metrics-ingress.yaml ================================================ # # Copyright (c) 2012-2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # {{- define "prometheusHost" }} {{- printf "prometheus-%s.%s" .Release.Namespace .Values.global.ingressDomain }} {{- end }} {{- define "grafanaHost" }} {{- printf "grafana-%s.%s" .Release.Namespace .Values.global.ingressDomain }} {{- end }} {{ if or .Values.prometheus.deploy .Values.grafana.deploy }} apiVersion: extensions/v1beta1 kind: Ingress metadata: name: che-metrics-ingress annotations: kubernetes.io/ingress.class: "{{ .Values.metrics.ingress.class }}" {{- range $key, $value := .Values.metrics.ingress.annotations }} {{ $key }}: {{ $value | quote }} {{- end }} spec: {{- if .Values.global.tls.enabled }} tls: - hosts: {{- if .Values.prometheus.deploy }} - {{ template "prometheusHost" . }} {{- end }} {{- if .Values.grafana.deploy }} - {{ template "grafanaHost" . }} {{- end }} secretName: {{ .Values.global.tls.secretName }} {{- end }} {{- if .Values.prometheus.deploy }} rules: - host: {{ template "prometheusHost" . }} http: paths: - path: / backend: serviceName: che-prometheus-server servicePort: 80 {{- end }} {{- if .Values.grafana.deploy }} - host: {{ template "grafanaHost" . }} http: paths: - path: / backend: serviceName: che-grafana servicePort: 80 {{- end }} {{- end }} ================================================ FILE: old-charts/che/templates/pvc.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # {{- if not .Values.global.multiuser }} apiVersion: v1 kind: PersistentVolumeClaim metadata: labels: app: che component: che name: che-data-volume spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi {{- end }} ================================================ FILE: old-charts/che/templates/registry-pull-secret.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # {{- if .Values.registry }} {{- if and .Values.registry.password .Values.registry.username }} # When creating a pod based on an image that resides in a private Docker registry (a secure registry protected by basic authentication), # one must specify an imagePullSecrets as part of the pod's spec. # (see https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret) # This resource creates a docker registry pull secret that's based on values provided in values.yaml (it relies on a template defined in _secretHelper.tpl). The secret must be deployed to # the same namespace in which the pod will be created. apiVersion: v1 kind: Secret metadata: name: registry-pull-secret type: kubernetes.io/dockerconfigjson data: .dockerconfigjson: {{ template "imagePullSecret" . }} {{- end }} {{- end }} ================================================ FILE: old-charts/che/templates/service.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 kind: Service metadata: labels: app: che component: che name: che-host spec: ports: - name: http port: 8080 protocol: TCP targetPort: 8080 - name: metrics port: 8087 protocol: TCP targetPort: 8087 selector: app: che component: che ================================================ FILE: old-charts/che/templates/serviceaccount.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # apiVersion: v1 kind: ServiceAccount metadata: labels: app: che component: che name: che ================================================ FILE: old-charts/che/templates/wildcard-certificate.yaml ================================================ {{- if .Values.global.tls.letsEncrypt.isDeployed }} apiVersion: cert-manager.io/v1alpha2 kind: Certificate metadata: name: che-wildcard-certificate spec: secretName: {{ .Values.global.tls.secretName }} dnsNames: - {{ .Values.global.ingressDomain }} - "*.{{ .Values.global.ingressDomain }}" issuerRef: kind: ClusterIssuer name: {{ .Values.global.tls.letsEncrypt.clusterIssuer }} commonName: "*.{{ .Values.global.ingressDomain }}" {{- end }} ================================================ FILE: old-charts/che/templates/workspace-exec-role-binding.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # {{- if (.Values.global.cheWorkspacesNamespace) }} kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: che-workspace-exec namespace: {{ .Values.global.cheWorkspacesNamespace }} roleRef: kind: Role name: exec apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ .Values.global.cheWorkspaceServiceAccount }} namespace: {{ .Values.global.cheWorkspacesNamespace }} {{- end }} ================================================ FILE: old-charts/che/templates/workspace-service-account.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # {{- if (.Values.global.cheWorkspacesNamespace) }} kind: ServiceAccount apiVersion: v1 metadata: name: {{ .Values.global.cheWorkspaceServiceAccount }} namespace: {{ .Values.global.cheWorkspacesNamespace }} {{- end }} ================================================ FILE: old-charts/che/templates/workspace-view-role-binding.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # {{- if (.Values.global.cheWorkspacesNamespace) }} kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: che-workspace-view namespace: {{ .Values.global.cheWorkspacesNamespace }} roleRef: kind: Role name: workspace-view apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: {{ .Values.global.cheWorkspaceServiceAccount }} namespace: {{ .Values.global.cheWorkspacesNamespace }} {{- end }} ================================================ FILE: old-charts/che/templates/workspace-view-role.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # {{- if (.Values.global.cheWorkspacesNamespace) }} kind: Role apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: workspace-view namespace: {{ .Values.global.cheWorkspacesNamespace }} rules: - apiGroups: - "" attributeRestrictions: null resources: - pods - services verbs: - list {{- end }} ================================================ FILE: old-charts/che/tiller-rbac.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: tiller-role-binding roleRef: kind: ClusterRole name: cluster-admin apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: tiller namespace: kube-system --- apiVersion: v1 kind: ServiceAccount metadata: name: tiller namespace: kube-system #################################################### # after applying this resource, run this command: # helm init --service-account tiller # or if your already performed helm init, run this command: # kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' # see: https://github.com/kubernetes/helm/issues/2224, https://stackoverflow.com/a/45306258/2365824 #################################################### ================================================ FILE: old-charts/che/values/default-host.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # global: multiuser: true ingressDomain: 192.168.99.100 serverStrategy: default-host ================================================ FILE: old-charts/che/values/multi-user.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # global: multiuser: true ================================================ FILE: old-charts/che/values/tls.yaml ================================================ # # Copyright (c) 2012-2019 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # global: tls: enabled: true secretName: che-tls ================================================ FILE: old-charts/che/values.yaml ================================================ # # Copyright (c) 2012-2018 Red Hat, Inc. # This program and the accompanying materials are made # available under the terms of the Eclipse Public License 2.0 # which is available at https://www.eclipse.org/legal/epl-2.0/ # # SPDX-License-Identifier: EPL-2.0 # # the following section is for secure registries. when uncommented, a pull secret will be created # registry: # host: my-secure-private-registry.com # username: myUser # password: myPass cheWorkspaceHttpProxy: "" cheWorkspaceHttpsProxy: "" cheWorkspaceNoProxy: "" cheImage: eclipse/che-server cheImagePullPolicy: Always cheKeycloakRealm: "che" cheKeycloakClientId: "che-public" # customOidcUsernameClaim: "" # customOidcProvider: "" # workspaceDefaultRamRequest: "" # workspaceDefaultRamLimit: "" # workspaceSidecarDefaultRamLimit: "" global: ## ImageTag used for all che related images cheImageTag: nightly multiuser: false # This value can be passed if custom Oidc provider is used, and there is no need to deploy keycloak in multiuser mode # default (if empty) is true # cheDedicatedKeycloak: false ingressDomain: 192.168.99.100.nip.io # See --annotations-prefix flag (https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/cli-arguments.md) ingressAnnotationsPrefix: "nginx." # options: default-host, single-host, multi-host serverStrategy: multi-host ## Allows to enable and configure TLS tls: enabled: false ## Secret name that will be used in ALL Che Ingresses for setting TLS up ## Note the helm chart does not create this secret and ## it MUST be pre-created in the configured Che namespace as a wildcard certificate secretName: che-tls ## If isDeployed is set to `true` then a Certificate CRD will be created that will request ## a wildcard certificate from Let's Encrypt using your specified cluster-issuer and ingressDomain. ## The name of the secret will be the `secretName` specified above. letsEncrypt: isDeployed: false clusterIissuer: ## If self-signed certificate is enabled ## then certificate from `tls.selfSignedCertSecretName` will be propagated to Che components' trust stores useSelfSignedCerts: false selfSignedCertSecretName: self-signed-cert gitHubClientID: "" gitHubClientSecret: "" pvcClaim: "1Gi" cheWorkspacesNamespace: "" # Service account name that will be mounted to workspaces pods # Note that: # if `cheWorkspacesNamespace` is configured then service account with configured name will be created by helm chart during deploying Che # if `cheWorkspacesNamespace` is empty then Che Server creates new namespace for each workspace and ensures that configured SA exists there cheWorkspaceServiceAccount: "che-workspace" # If set, Che will bind the specified cluster role to the workspace service account when creating a workspace. cheWorkspaceClusterRole: "" workspaceIdleTimeout: "-1" log: loggerConfig: "" appenderName: "plaintext" tracingEnabled: false metricsEnabled: false # Run Che and Che workspaces as the same non-root user securityContext: runAsUser: 1724 fsGroup: 1724 postgresDebugLogs: false cheDevfileRegistryUrlFormat: "devfile-registry-%s.%s" chePluginRegistryUrlFormat: "plugin-registry-%s.%s" che: workspace: {} ingress: class: nginx annotations: nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" nginx.ingress.kubernetes.io/proxy-connect-timeout: "3600" # devfileRegistryUrl: "https://che-devfile-registry.openshift.io/" # pluginRegistryUrl: "https://che-plugin-registry.openshift.io/v3" workspace: ingress: class: nginx annotations: nginx.ingress.kubernetes.io/rewrite-target: /$1 nginx.ingress.kubernetes.io/proxy-connect-timeout: 3600 nginx.ingress.kubernetes.io/proxy-read-timeout: 3600 cheDevfileRegistry: deploy: true chePluginRegistry: deploy: true jaeger: deploy: false metrics: ingress: class: nginx annotations: nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" nginx.ingress.kubernetes.io/proxy-connect-timeout: "3600" prometheus: deploy: false alertmanager: enabled: false kubeStateMetrics: enabled: false nodeExporter: enabled: false pushgateway: enabled: false rbac: enabled: false serverFiles: prometheus.yml: scrape_configs: - job_name: che static_configs: - targets: - che-host:8087 grafana: deploy: false adminUser: admin adminPassword: admin datasources: datasources.yaml: apiVersion: 1 datasources: - name: che type: prometheus access: proxy url: http://che-prometheus-server isDefault: true dashboardProviders: dashboardproviders.yaml: apiVersion: 1 providers: - name: default editable: true type: file folder: '' orgId: 1 options: path: /var/lib/grafana/dashboards/default dashboards: default: chejvm: json: | { "annotations": { "list": [ { "builtIn": 1, "datasource": "-- Grafana --", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", "type": "dashboard" } ] }, "editable": true, "gnetId": null, "graphTooltip": 0, "id": 1, "links": [], "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "che", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 0, "y": 0 }, "id": 2, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "jvm_memory_used_bytes{job=\"che\"}", "format": "time_series", "interval": "", "intervalFactor": 1, "legendFormat": "{{ id }}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeShift": null, "title": "Che JVM Heap", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } } ], "refresh": false, "schemaVersion": 16, "style": "dark", "tags": [], "templating": { "list": [] }, "timepicker": { "refresh_intervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "time_options": [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, "timezone": "", "title": "Che JVM Heap", "uid": "IkmWvLLmk", "version": 1 } ================================================ FILE: pom.xml ================================================ 4.0.0 uk.co.haywood-associates charts 1.0.0-SNAPSHOT pom ================================================ FILE: test.yaml ================================================ # See https://github.com/helm/chart-testing#configuration remote: origin chart-dirs: - charts chart-repos: - bitnami=https://charts.bitnami.com/bitnami excluded-charts: - che helm-extra-args: --timeout 600s