main 5488282c3e3e cached
188 files
506.3 KB
147.1k tokens
13 symbols
1 requests
Download .txt
Showing preview only (555K chars total). Download the full file or copy to clipboard to get everything.
Repository: bgruening/docker-galaxy-stable
Branch: main
Commit: 5488282c3e3e
Files: 188
Total size: 506.3 KB

Directory structure:
gitextract__x213t1e/

├── .dive-ci
├── .editorconfig
├── .github/
│   └── workflows/
│       ├── compose.yml
│       ├── cvmfs.yml
│       ├── lint.yml
│       ├── pull-request.yml
│       ├── release.yml
│       ├── single.sh
│       ├── single_container.yml
│       └── update-site.yml
├── .gitignore
├── .travis.yml
├── Changelog.md
├── LICENSE
├── README.md
├── compose/
│   ├── README.md
│   ├── base-images/
│   │   ├── galaxy-cluster-base/
│   │   │   ├── Dockerfile
│   │   │   └── files/
│   │   │       ├── common_cleanup.sh
│   │   │       └── cvmfs/
│   │   │           ├── default.local
│   │   │           ├── domain.d/
│   │   │           │   └── galaxyproject.org.conf
│   │   │           └── keys/
│   │   │               └── galaxyproject.org/
│   │   │                   ├── data.galaxyproject.org.pub
│   │   │                   └── singularity.galaxyproject.org.pub
│   │   └── galaxy-container-base/
│   │       ├── Dockerfile
│   │       └── files/
│   │           └── common_cleanup.sh
│   ├── base_config.yml
│   ├── docker-compose.htcondor.yml
│   ├── docker-compose.k8s.yml
│   ├── docker-compose.pulsar.mq.yml
│   ├── docker-compose.pulsar.yml
│   ├── docker-compose.singularity.yml
│   ├── docker-compose.slurm.yml
│   ├── docker-compose.yml
│   ├── galaxy-configurator/
│   │   ├── Dockerfile
│   │   ├── customize.py
│   │   ├── run.sh
│   │   └── templates/
│   │       ├── galaxy/
│   │       │   ├── GALAXY_PROXY_PREFIX.txt.j2
│   │       │   ├── container_resolvers_conf.yml.j2
│   │       │   ├── dependency_resolvers_conf.xml.j2
│   │       │   ├── galaxy.yml.j2
│   │       │   ├── job_conf.xml.j2
│   │       │   └── job_metrics.xml.j2
│   │       ├── htcondor/
│   │       │   ├── executor.conf.j2
│   │       │   ├── galaxy.conf.j2
│   │       │   └── master.conf.j2
│   │       ├── kind/
│   │       │   ├── k8s_config/
│   │       │   │   ├── persistent_volumes.yml.j2
│   │       │   │   └── pv_claims.yml.j2
│   │       │   └── kind_config.yml.j2
│   │       ├── nginx/
│   │       │   └── nginx.conf.j2
│   │       ├── pulsar/
│   │       │   ├── app.yml.j2
│   │       │   └── server.ini.j2
│   │       └── slurm/
│   │           └── slurm.conf.j2
│   ├── galaxy-htcondor/
│   │   ├── Dockerfile
│   │   ├── start.sh
│   │   └── supervisord.conf
│   ├── galaxy-kind/
│   │   ├── Dockerfile
│   │   └── docker-entrypoint.sh
│   ├── galaxy-nginx/
│   │   ├── Dockerfile
│   │   └── start.sh
│   ├── galaxy-server/
│   │   ├── Dockerfile
│   │   └── files/
│   │       ├── common_cleanup.sh
│   │       ├── create_galaxy_user.py
│   │       └── start.sh
│   ├── galaxy-slurm/
│   │   ├── Dockerfile
│   │   └── start.sh
│   ├── galaxy-slurm-node-discovery/
│   │   ├── Dockerfile
│   │   └── run.sh
│   ├── pulsar/
│   │   ├── Dockerfile
│   │   ├── docker-entrypoint.sh
│   │   └── files/
│   │       └── common_cleanup.sh
│   └── tests/
│       ├── docker-compose.test.bioblend.yml
│       ├── docker-compose.test.selenium.yml
│       ├── docker-compose.test.workflows.yml
│       ├── docker-compose.test.yml
│       ├── galaxy-bioblend-test/
│       │   ├── Dockerfile
│       │   └── run.sh
│       ├── galaxy-selenium-test/
│       │   ├── Dockerfile
│       │   └── run.sh
│       └── galaxy-workflow-test/
│           ├── Dockerfile
│           └── run.sh
├── cvmfs/
│   ├── Dockerfile
│   ├── README.md
│   ├── ansible/
│   │   ├── playbook.yml
│   │   └── requirements.yml
│   └── docker-entrypoint.sh
├── docs/
│   ├── README.md
│   ├── Running_jobs_outside_of_the_container.md
│   ├── css/
│   │   └── landing_page.css
│   ├── js/
│   │   └── landing_page.js
│   └── src/
│       ├── generate_docs.py
│       └── requirements.txt
├── galaxy/
│   ├── Dockerfile
│   ├── ansible/
│   │   ├── condor.yml
│   │   ├── cvmfs_client.yml
│   │   ├── docker.yml
│   │   ├── files/
│   │   │   ├── 413.html
│   │   │   ├── 500.html
│   │   │   ├── 502.html
│   │   │   ├── nginx_sample.crt
│   │   │   ├── nginx_sample.key
│   │   │   └── production_b2drop.yml
│   │   ├── flower.yml
│   │   ├── galaxy_file_source_templates.yml
│   │   ├── galaxy_job_conf.yml
│   │   ├── galaxy_job_metrics.yml
│   │   ├── galaxy_object_store_templates.yml
│   │   ├── galaxy_scripts.yml
│   │   ├── galaxy_vault_config.yml
│   │   ├── gravity.yml
│   │   ├── group_vars/
│   │   │   └── all.yml
│   │   ├── k8s.yml
│   │   ├── nginx.yml
│   │   ├── pbs.yml
│   │   ├── postgresql.yml
│   │   ├── proftpd.yml
│   │   ├── provision.yml
│   │   ├── rabbitmq.yml
│   │   ├── redis.yml
│   │   ├── requirements.yml
│   │   ├── slurm.yml
│   │   ├── supervisor.yml
│   │   ├── templates/
│   │   │   ├── add_tool_shed.py.j2
│   │   │   ├── cgroupfs_mount.sh.j2
│   │   │   ├── check_database.py.j2
│   │   │   ├── configure_rabbitmq_users.yml.j2
│   │   │   ├── configure_slurm.py.j2
│   │   │   ├── container_resolvers_conf.yml.j2
│   │   │   ├── create_galaxy_user.py.j2
│   │   │   ├── export_user_files.py.j2
│   │   │   ├── file_source_templates.yml.j2
│   │   │   ├── gravity.yml.j2
│   │   │   ├── job_conf.xml.j2
│   │   │   ├── job_metrics_conf.yml.j2
│   │   │   ├── macros.xml.j2
│   │   │   ├── nginx/
│   │   │   │   ├── delegated_uploads.conf.j2
│   │   │   │   ├── flower_auth.conf.j2
│   │   │   │   ├── galaxy_common.conf.j2
│   │   │   │   ├── galaxy_http.j2
│   │   │   │   ├── galaxy_https.j2
│   │   │   │   ├── galaxy_redirect_ssl.j2
│   │   │   │   ├── htpasswd.j2
│   │   │   │   ├── interactive_tools_common.conf.j2
│   │   │   │   ├── interactive_tools_http.j2
│   │   │   │   ├── interactive_tools_https.j2
│   │   │   │   └── interactive_tools_redirect_ssl.j2
│   │   │   ├── object_store_templates.yml.j2
│   │   │   ├── rabbitmq.sh.j2
│   │   │   ├── startup_lite.sh.j2
│   │   │   ├── supervisor.conf.j2
│   │   │   ├── update_yaml_value.py.j2
│   │   │   └── vault_conf.yml.j2
│   │   └── tusd.yml
│   ├── bashrc
│   ├── cgroupfs_mount.sh
│   ├── common_cleanup.sh
│   ├── docker-compose.yaml
│   ├── install_tools_wrapper.sh
│   ├── run.sh
│   ├── sample_tool_list.yaml
│   ├── setup_postgresql.py
│   ├── startup.sh
│   ├── startup2.sh
│   ├── tool_conf_interactive.xml.sample
│   ├── tool_sheds_conf.xml
│   └── welcome.html
├── skills/
│   └── galaxy-docker/
│       ├── SKILL.md
│       └── references/
│           └── upgrade-25.1.md
└── test/
    ├── bioblend/
    │   ├── Dockerfile
    │   └── test.sh
    ├── container_resolvers_conf.ci.yml
    ├── cvmfs/
    │   └── test.sh
    ├── gridengine/
    │   ├── Dockerfile
    │   ├── act_qmaster
    │   ├── job_conf.xml.sge
    │   ├── master_script.sh
    │   ├── outputhostname/
    │   │   └── outputhostname.xml
    │   ├── outputhostname.tool.xml
    │   ├── setup_gridengine.sh
    │   ├── setup_tool.sh
    │   ├── test.sh
    │   ├── test_outputhostname.py
    │   └── tool_conf.xml
    └── slurm/
        ├── Dockerfile
        ├── configure_slurm.py
        ├── job_conf.xml
        ├── munge.conf
        ├── startup.sh
        ├── supervisor_slurm.conf
        └── test.sh

================================================
FILE CONTENTS
================================================

================================================
FILE: .dive-ci
================================================
rules:
  # If the efficiency is measured below X%, mark as failed.
  # Expressed as a ratio between 0-1.
  lowestEfficiency: 0.95

  # If the amount of wasted space is at least X or larger than X, mark as failed.
  # Expressed in B, KB, MB, and GB.
  # highestWastedBytes: 20MB

  # If the amount of wasted space makes up for X% or more of the image, mark as failed.
  # Note: the base image layer is NOT included in the total image size.
  # Expressed as a ratio between 0-1; fails if the threshold is met or crossed.
  highestUserWastedPercent: 0.10


================================================
FILE: .editorconfig
================================================
root = true

[*]
indent_style = space
indent_size = 2
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true

[*.py]
indent_size = 4


================================================
FILE: .github/workflows/compose.yml
================================================
name: build-and-test
on: [push]
jobs:
  build_container_base:
    if: false  # Temporarily disable workflow 
    runs-on: ubuntu-22.04
    steps:
      - name: Checkout
        uses: actions/checkout@v6
      - name: Set image tag
        id: image_tag
        run: |
          if [ "${GITHUB_REF#refs/heads/}" = "master" ]; then
            echo "image_tag=latest" >> $GITHUB_OUTPUT;
          else
            echo "image_tag=${GITHUB_REF#refs/heads/}" >> $GITHUB_OUTPUT;
          fi
      - name: Docker Login
        run: echo "${{ secrets.docker_registry_password }}" | docker login -u ${{ secrets.docker_registry_username }} --password-stdin ${{ secrets.docker_registry }}
      - name: Set up Docker Buildx
        id: buildx
        uses: docker/setup-buildx-action@v3
        with:
          version: v0.17.1
      - name: Run Buildx
        env:
          image_name: galaxy-container-base
        run: |
          for i in {1..4}; do
            set +e
            docker buildx build  \
            --output "type=image,name=${{ secrets.docker_registry }}/${{ secrets.docker_registry_username }}/$image_name:${{ steps.image_tag.outputs.image_tag }},push=true" \
            --cache-from type=gha \
            --cache-to type=gha,mode=max \
            --build-arg IMAGE_TAG=${{ steps.image_tag.outputs.image_tag }} \
            --build-arg DOCKER_REGISTRY=${{ secrets.docker_registry }} \
            --build-arg DOCKER_REGISTRY_USERNAME=${{ secrets.docker_registry_username }} \
            $image_name && break || echo "Fail.. Retrying"
          done;
        shell: bash
        working-directory: ./compose/base-images
  build_cluster_base:
    needs: build_container_base
    runs-on: ubuntu-latest
    steps:
      - name: Checkout
        uses: actions/checkout@v6
      - name: Set image tag
        id: image_tag
        run: |
          if [ "${GITHUB_REF#refs/heads/}" = "master" ]; then
            echo "image_tag=latest" >> $GITHUB_OUTPUT;
          else
            echo "image_tag=${GITHUB_REF#refs/heads/}" >> $GITHUB_OUTPUT;
          fi
      - name: Docker Login
        run: echo "${{ secrets.docker_registry_password }}" | docker login -u ${{ secrets.docker_registry_username }} --password-stdin ${{ secrets.docker_registry }}
      - name: Set up Docker Buildx
        id: buildx
        uses: docker/setup-buildx-action@v3
        with:
          version: v0.17.1
      - name: Run Buildx
        env:
          image_name: galaxy-cluster-base
        run: |
          for i in {1..4}; do
            set +e
            docker buildx build  \
            --output "type=image,name=${{ secrets.docker_registry }}/${{ secrets.docker_registry_username }}/$image_name:${{ steps.image_tag.outputs.image_tag }},push=true" \
            --cache-from type=gha \
            --cache-to type=gha,mode=max \
            --build-arg IMAGE_TAG=${{ steps.image_tag.outputs.image_tag }} \
            --build-arg DOCKER_REGISTRY=${{ secrets.docker_registry }} \
            --build-arg DOCKER_REGISTRY_USERNAME=${{ secrets.docker_registry_username }} \
            $image_name && break || echo "Fail.. Retrying"
          done;
        shell: bash
        working-directory: ./compose/base-images
  build:
    needs: build_cluster_base
    runs-on: ubuntu-latest
    strategy:
      matrix:
        image:
          - name: galaxy-server
          - name: galaxy-nginx
          - name: galaxy-htcondor
          - name: galaxy-slurm
          - name: galaxy-slurm-node-discovery
          - name: galaxy-kind
          - name: pulsar
          - name: galaxy-configurator
          - name: galaxy-bioblend-test
            subdir: tests/
          - name: galaxy-workflow-test
            subdir: tests/
          - name: galaxy-selenium-test
            subdir: tests/
      fail-fast: false
    steps:
      - name: Checkout
        uses: actions/checkout@v6
      - name: Set image tag
        id: image_tag
        run: |
          if [ "${GITHUB_REF#refs/heads/}" = "master" ]; then
            echo "image_tag=latest" >> $GITHUB_OUTPUT;
          else
            echo "image_tag=${GITHUB_REF#refs/heads/}" >> $GITHUB_OUTPUT;
          fi
      - name: Docker Login
        run: echo "${{ secrets.docker_registry_password }}" | docker login -u ${{ secrets.docker_registry_username }} --password-stdin ${{ secrets.docker_registry }}
      - name: Set up Docker Buildx
        id: buildx
        uses: docker/setup-buildx-action@v3
        with:
          version: v0.17.1
      - name: Run Buildx
        run: |
          for i in {1..4}; do
            set +e
            docker buildx build \
            --output "type=image,name=${{ secrets.docker_registry }}/${{ secrets.docker_registry_username }}/${{ matrix.image.name }}:${{ steps.image_tag.outputs.image_tag }},push=true" \
            --cache-from type=gha \
            --cache-to type=gha,mode=max \
            --build-arg IMAGE_TAG=${{ steps.image_tag.outputs.image_tag }} \
            --build-arg DOCKER_REGISTRY=${{ secrets.docker_registry }} \
            --build-arg DOCKER_REGISTRY_USERNAME=${{ secrets.docker_registry_username }} \
            --build-arg GALAXY_REPO=https://github.com/galaxyproject/galaxy \
            ${{ matrix.image.subdir }}${{ matrix.image.name }} && break || echo "Fail.. Retrying"
          done;
        shell: bash
        working-directory: ./compose
  test:
    needs: [build]
    runs-on: ubuntu-latest
    strategy:
      matrix:
        infrastructure:
          - name: galaxy-base
            files: -f docker-compose.yml
          - name: galaxy-proxy-prefix
            files: -f docker-compose.yml
            env: GALAXY_PROXY_PREFIX=/arbitrary_Galaxy-prefix GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=http://localhost/arbitrary_Galaxy-prefix EXTRA_SKIP_TESTS_BIOBLEND="not test_import_export_workflow_dict and not test_import_export_workflow_from_local_path"
            exclude_test:
              - selenium
          - name: galaxy-htcondor
            files: -f docker-compose.yml -f docker-compose.htcondor.yml
          - name: galaxy-slurm
            files: -f docker-compose.yml -f docker-compose.slurm.yml
            env: SLURM_NODE_COUNT=3
            options: --scale slurm_node=3
          - name: galaxy-pulsar
            files: -f docker-compose.yml -f docker-compose.pulsar.yml
            exclude_test:
              - workflow_quality_control
            env: EXTRA_SKIP_TESTS_BIOBLEND="not test_wait_for_job"
          - name: galaxy-pulsar-mq
            files: -f docker-compose.yml -f docker-compose.pulsar.yml -f docker-compose.pulsar.mq.yml
            exclude_test:
              - workflow_quality_control
            env: EXTRA_SKIP_TESTS_BIOBLEND="not test_wait_for_job"
          - name: galaxy-k8s
            files: -f docker-compose.yml -f docker-compose.k8s.yml
          - name: galaxy-singularity
            files: -f docker-compose.yml -f docker-compose.singularity.yml
            env: EXTRA_SKIP_TESTS_BIOBLEND="not test_get_container_resolvers and not test_show_container_resolver"
          - name: galaxy-pulsar-mq-singularity
            files: -f docker-compose.yml -f docker-compose.pulsar.yml -f docker-compose.pulsar.mq.yml -f docker-compose.singularity.yml
            env: EXTRA_SKIP_TESTS_BIOBLEND="not test_wait_for_job and not test_get_container_resolvers and not test_show_container_resolver"
            exclude_test:
              - workflow_quality_control
          - name: galaxy-slurm-singularity
            files: -f docker-compose.yml -f docker-compose.slurm.yml -f docker-compose.singularity.yml
            env: EXTRA_SKIP_TESTS_BIOBLEND="not test_get_container_resolvers and not test_show_container_resolver"
          - name: galaxy-htcondor-singularity
            files: -f docker-compose.yml -f docker-compose.htcondor.yml -f docker-compose.singularity.yml
            env: EXTRA_SKIP_TESTS_BIOBLEND="not test_get_container_resolvers and not test_show_container_resolver"
        test:
          - name: bioblend
            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.bioblend.yml
            exit-from: galaxy-bioblend-test
            timeout: 60
            second_run: "true"
          - name: workflow_ard
            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml
            exit-from: galaxy-workflow-test
            workflow: sklearn/ard/ard.ga
            timeout: 60
            second_run: "true"
          - name: workflow_quality_control
            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml
            exit-from: galaxy-workflow-test
            workflow: training/sequence-analysis/quality-control/quality_control.ga
            timeout: 60
          - name: workflow_example1
            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml
            exit-from: galaxy-workflow-test
            workflow: example1/wf3-shed-tools.ga
            timeout: 60
          - name: selenium
            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.selenium.yml
            exit-from: galaxy-selenium-test
            timeout: 60
      fail-fast: false
    steps:
      # Self-made `exclude` as Github Actions currently does not support
      # exclude/including of dicts in matrices
      - name: Check if test should be run
        id: run_check
        if: contains(matrix.infrastructure.exclude_test, matrix.test.name) != true
        run: echo "run=true" >> $GITHUB_OUTPUT
      - name: Checkout
        uses: actions/checkout@v6
      - name: Set image tag in env
        run: echo "IMAGE_TAG=${GITHUB_REF#refs/heads/}" >> $GITHUB_ENV
      - name: Master branch - Set image to to 'latest'
        if: github.ref == 'refs/heads/master'
        run: echo "IMAGE_TAG=latest" >> $GITHUB_ENV
      - name: Set WORKFLOWS env for worfklows-test
        if: matrix.test.workflow
        run: echo "WORKFLOWS=${{ matrix.test.workflow }}" >> $GITHUB_ENV
      - name: Install Docker Compose
        run: |
          sudo apt-get update -qq && sudo apt-get install docker-compose -y
      - name: Run tests for the first time
        if: steps.run_check.outputs.run
        run: |
          export DOCKER_REGISTRY=${{ secrets.docker_registry }}
          export DOCKER_REGISTRY_USERNAME=${{ secrets.docker_registry_username }}
          export ${{ matrix.infrastructure.env }}
          export TIMEOUT=${{ matrix.test.timeout }}
          docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} config
          env
          for i in {1..4}; do
            echo "Running test - try \#$i"
            echo "Removing export directory if existent";
            sudo rm -rf export
            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} pull
            set +e
            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} up ${{ matrix.infrastructure.options }} --exit-code-from ${{ matrix.test.exit-from }}
            test_exit_code=$?
            error_exit_codes_count=$(expr $(docker ps -a --filter exited=1 | wc -l) - 1)
            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} down
            if [ $error_exit_codes_count != 0 ] || [ $test_exit_code != 0 ] ; then
              echo "Test failed..";
              continue;
            else
              exit $test_exit_code;
            fi
          done;
          exit 1
        shell: bash
        working-directory: ./compose
        continue-on-error: false
      - name: Fix file names before saving artifacts
        if: failure()
        run: |
          sudo find ./compose/export/galaxy/database -depth -name '*:*' -execdir bash -c 'mv "$1" "${1//:/-}"' bash {} \;
      - name: Allow upload-artifact read access
        if: failure()
        run: sudo chmod -R +r ./compose/export/galaxy/database
      - name: Save artifacts for debugging a failed test
        uses: actions/upload-artifact@v6
        if: failure()
        with:
          name: ${{ matrix.infrastructure.name }}_${{ matrix.test.name }}_first-run
          path: ./compose/export/galaxy/database
      - name: Clean up after first run
        if: matrix.test.second_run == 'true'
        run: |
          sudo rm -rf export/postgres
          sudo rm -rf export/galaxy/database
        working-directory: ./compose
      - name: Run tests a second time
        if: matrix.test.second_run == 'true' && steps.run_check.outputs.run
        run: |
          export DOCKER_REGISTRY=${{ secrets.docker_registry }}
          export DOCKER_REGISTRY_USERNAME=${{ secrets.docker_registry_username }}
          export ${{ matrix.infrastructure.env }}
          export TIMEOUT=${{ matrix.test.timeout }}
          for i in {1..4}; do
            echo "Running test - try \#$i"
            echo "Removing export directory if existent";
            sudo rm -rf export
            set +e
            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} up ${{ matrix.infrastructure.options }} --exit-code-from ${{ matrix.test.exit-from }}
            test_exit_code=$?
            error_exit_codes_count=$(expr $(docker ps -a --filter exited=1 | wc -l) - 1)
            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} down
            if [ $error_exit_codes_count != 0 ] || [ $test_exit_code != 0 ] ; then
              echo "Test failed..";
              continue;
            else
              exit $test_exit_code;
            fi
          done;
          exit 1
        shell: bash
        working-directory: ./compose
        continue-on-error: false
      - name: Fix file names before saving artifacts
        if: failure() && matrix.test.second_run == 'true'
        run: |
          sudo find ./compose/export/galaxy/database -depth -name '*:*' -execdir bash -c 'mv "$1" "${1//:/-}"' bash {} \;
      - name: Allow upload-artifact read access
        if: failure() && matrix.test.second_run == 'true'
        run: sudo chmod -R +r ./compose/export/galaxy/database
      - name: Save artifacts for debugging a failed test
        uses: actions/upload-artifact@v6
        if: failure() && matrix.test.second_run == 'true'
        with:
          name: ${{ matrix.infrastructure.name }}_${{ matrix.test.name }}_second-run
          path: ./compose/export/galaxy/database


================================================
FILE: .github/workflows/cvmfs.yml
================================================
name: cvmfs-sidecar
on:
  push:
    branches:
      - '**'
    tags:
      - '*'
  pull_request:
    paths:
      - 'cvmfs/**'
      - 'test/cvmfs/**'
      - '.github/workflows/cvmfs.yml'

jobs:
  build_test_publish:
    runs-on: ubuntu-latest
    steps:
      - name: Checkout
        uses: actions/checkout@v6

      - name: Detect CVMFS changes
        id: changes
        uses: dorny/paths-filter@v3
        with:
          filters: |
            cvmfs:
              - 'cvmfs/**'
              - 'test/cvmfs/**'
              - '.github/workflows/cvmfs.yml'

      - name: Run CVMFS sidecar tests
        if: github.event_name == 'pull_request' || steps.changes.outputs.cvmfs == 'true' || startsWith(github.ref, 'refs/tags/')
        run: bash test/cvmfs/test.sh

      - name: Set image version
        id: version
        if: github.event_name == 'push' && (steps.changes.outputs.cvmfs == 'true' || startsWith(github.ref, 'refs/tags/'))
        run: |
          set -euo pipefail
          if [[ "${GITHUB_REF}" == refs/tags/* ]]; then
            version="${GITHUB_REF_NAME}"
          else
            ref="${GITHUB_REF_NAME//\//-}"
            version="${ref}-${GITHUB_SHA::7}"
          fi
          echo "version=$version" >> "$GITHUB_OUTPUT"

      - name: Set up Docker Buildx
        if: github.event_name == 'push' && (steps.changes.outputs.cvmfs == 'true' || startsWith(github.ref, 'refs/tags/'))
        uses: docker/setup-buildx-action@v3

      - name: Login to Quay IO
        if: github.event_name == 'push' && (steps.changes.outputs.cvmfs == 'true' || startsWith(github.ref, 'refs/tags/'))
        uses: docker/login-action@v3
        with:
          registry: quay.io
          username: '$oauthtoken'
          password: ${{ secrets.QUAY_OAUTH_TOKEN }}

      - name: Build and push CVMFS image
        if: github.event_name == 'push' && (steps.changes.outputs.cvmfs == 'true' || startsWith(github.ref, 'refs/tags/'))
        uses: docker/build-push-action@v6
        with:
          context: "{{defaultContext}}:cvmfs"
          push: true
          tags: quay.io/bgruening/cvmfs:${{ steps.version.outputs.version }}
          cache-from: type=gha
          cache-to: type=gha,mode=max


================================================
FILE: .github/workflows/lint.yml
================================================
name: Lint
on: [push]
jobs:
  lint:
    runs-on: ubuntu-latest
    steps:
    - name: Checkout
      uses: actions/checkout@v6
    # - name: Cleanup to only use compose
    #   run: rm -R docs galaxy test
    - name: Run shellcheck with reviewdog
      uses: reviewdog/action-shellcheck@v1.27.0
      with:
        github_token: ${{ secrets.GITHUB_TOKEN }}
        reporter: github-check
        level: warning
        pattern: "*.sh"
    - name: Run hadolint with reviewdog
      uses: reviewdog/action-hadolint@v1.46.0
      with:
        github_token: ${{ secrets.GITHUB_TOKEN }}
        reporter: github-check


================================================
FILE: .github/workflows/pull-request.yml
================================================
name: pr-test
on: pull_request
jobs:
  test:
    if: false  # Temporarily disable workflow 
    runs-on: ubuntu-22.04
    strategy:
      matrix:
        infrastructure:
          - name: galaxy-base
            files: -f docker-compose.yml
          - name: galaxy-proxy-prefix
            files: -f docker-compose.yml
            env: GALAXY_PROXY_PREFIX=/arbitrary_Galaxy-prefix GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=http://localhost/arbitrary_Galaxy-prefix EXTRA_SKIP_TESTS_BIOBLEND="not test_import_export_workflow_dict and not test_import_export_workflow_from_local_path"
            exclude_test:
              - selenium
          - name: galaxy-htcondor
            files: -f docker-compose.yml -f docker-compose.htcondor.yml
          - name: galaxy-slurm
            files: -f docker-compose.yml -f docker-compose.slurm.yml
            env: SLURM_NODE_COUNT=3
            options: --scale slurm_node=3
          - name: galaxy-pulsar
            files: -f docker-compose.yml -f docker-compose.pulsar.yml
            env: EXTRA_SKIP_TESTS_BIOBLEND="not test_wait_for_job"
            exclude_test:
              - workflow_quality_control
          - name: galaxy-pulsar-mq
            files: -f docker-compose.yml -f docker-compose.pulsar.yml -f docker-compose.pulsar.mq.yml
            env: EXTRA_SKIP_TESTS_BIOBLEND="not test_wait_for_job"
            exclude_test:
              - workflow_quality_control
          - name: galaxy-k8s
            files: -f docker-compose.yml -f docker-compose.k8s.yml
          - name: galaxy-singularity
            files: -f docker-compose.yml -f docker-compose.singularity.yml
            env: EXTRA_SKIP_TESTS_BIOBLEND="not test_get_container_resolvers and not test_show_container_resolver"
          - name: galaxy-pulsar-mq-singularity
            files: -f docker-compose.yml -f docker-compose.pulsar.yml -f docker-compose.pulsar.mq.yml -f docker-compose.singularity.yml
            env: EXTRA_SKIP_TESTS_BIOBLEND="not test_wait_for_job and not test_get_container_resolvers and not test_show_container_resolver"
            exclude_test:
              - workflow_quality_control
          - name: galaxy-slurm-singularity
            files: -f docker-compose.yml -f docker-compose.slurm.yml -f docker-compose.singularity.yml
            env: EXTRA_SKIP_TESTS_BIOBLEND="not test_get_container_resolvers and not test_show_container_resolver"
          - name: galaxy-htcondor-singularity
            files: -f docker-compose.yml -f docker-compose.htcondor.yml -f docker-compose.singularity.yml
            env: EXTRA_SKIP_TESTS_BIOBLEND="not test_get_container_resolvers and not test_show_container_resolver"
        test:
          - name: bioblend
            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.bioblend.yml
            exit-from: galaxy-bioblend-test
            timeout: 60
            second_run: "true"
          - name: workflow_ard
            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml
            exit-from: galaxy-workflow-test
            workflow: sklearn/ard/ard.ga
            timeout: 60
            second_run: "true"
          - name: workflow_quality_control
            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml
            exit-from: galaxy-workflow-test
            workflow: training/sequence-analysis/quality-control/quality_control.ga
            timeout: 60
          - name: workflow_example1
            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml
            exit-from: galaxy-workflow-test
            workflow: example1/wf3-shed-tools.ga
            timeout: 60
          - name: selenium
            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.selenium.yml
            exit-from: galaxy-selenium-test
            timeout: 60
      fail-fast: false
    steps:
      # Self-made `exclude` as Github Actions currently does not support
      # exclude/including of dicts in matrices
      - name: Check if test should be run
        id: run_check
        if: contains(matrix.infrastructure.exclude_test, matrix.test.name) != true
        run: echo "run=true" >> $GITHUB_OUTPUT
      - name: Checkout
        uses: actions/checkout@v6
      - name: Set WORKFLOWS env for worfklows-test
        if: matrix.test.workflow
        run: echo "WORKFLOWS=${{ matrix.test.workflow }}" >> $GITHUB_ENV
      - name: Build galaxy-container-base
        env:
          image_name: galaxy-container-base
        run: |
          docker buildx build  \
            --output "type=image,name=quay.io/bgruening/$image_name:ci-testing" \
            --cache-from type=gha \
            --cache-to type=gha,mode=max \
            --build-arg IMAGE_TAG=ci-testing \
            $image_name
        working-directory: ./compose/base-images
      - name: Build galaxy-cluster-base
        env:
          image_name: galaxy-cluster-base
        run: |
          docker buildx build  \
            --output "type=image,name=quay.io/bgruening/$image_name:ci-testing" \
            --cache-from type=gha \
            --cache-to type=gha,mode=max \
            --build-arg IMAGE_TAG=ci-testing \
            $image_name
        working-directory: ./compose/base-images
      - name: Install Docker Compose
        run: |
          sudo apt-get update -qq && sudo apt-get install docker-compose -y
      - name: Run tests for the first time
        if: steps.run_check.outputs.run
        run: |
          export IMAGE_TAG=ci-testing
          export COMPOSE_DOCKER_CLI_BUILD=1
          export DOCKER_BUILDKIT=1
          export ${{ matrix.infrastructure.env }}
          export TIMEOUT=${{ matrix.test.timeout }}
          docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} config
          env
          for i in {1..4}; do
            echo "Running test - try \#$i"
            echo "Removing export directory if existent";
            sudo rm -rf export
            set +e
            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} build --build-arg IMAGE_TAG=ci-testing --build-arg GALAXY_REPO=https://github.com/galaxyproject/galaxy
            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} up ${{ matrix.infrastructure.options }} --exit-code-from ${{ matrix.test.exit-from }}
            test_exit_code=$?
            error_exit_codes_count=$(expr $(docker ps -a --filter exited=1 | wc -l) - 1)
            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} down
            if [ $error_exit_codes_count != 0 ] || [ $test_exit_code != 0 ] ; then
              echo "Test failed..";
              continue;
            else
              exit $test_exit_code;
            fi
          done;
          exit 1
        shell: bash
        working-directory: ./compose
        continue-on-error: false
      - name: Fix file names before saving artifacts
        if: failure()
        run: |
          sudo find ./compose/export/galaxy/database -depth -name '*:*' -execdir bash -c 'mv "$1" "${1//:/-}"' bash {} \;
      - name: Allow upload-artifact read access
        if: failure()
        run: sudo chmod -R +r ./compose/export/galaxy/database
      - name: Save artifacts for debugging a failed test
        uses: actions/upload-artifact@v6
        if: failure()
        with:
          name: ${{ matrix.infrastructure.name }}_${{ matrix.test.name }}_first-run
          path: ./compose/export/galaxy/database
      - name: Clean up after first run
        if: matrix.test.second_run == 'true'
        run: |
          sudo rm -rf export/postgres
          sudo rm -rf export/galaxy/database
        working-directory: ./compose
      - name: Run tests a second time
        if: matrix.test.second_run == 'true' && steps.run_check.outputs.run
        run: |
          export IMAGE_TAG=ci-testing
          export COMPOSE_DOCKER_CLI_BUILD=1
          export DOCKER_BUILDKIT=1
          export ${{ matrix.infrastructure.env }}
          export TIMEOUT=${{ matrix.test.timeout }}
          for i in {1..4}; do
            echo "Running test - try \#$i"
            echo "Removing export directory if existent";
            sudo rm -rf export
            set +e
            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} up ${{ matrix.infrastructure.options }} --exit-code-from ${{ matrix.test.exit-from }}
            test_exit_code=$?
            error_exit_codes_count=$(expr $(docker ps -a --filter exited=1 | wc -l) - 1)
            if [ $error_exit_codes_count != 0 ] || [ $test_exit_code != 0 ] ; then
              echo "Test failed..";
              continue;
            else
              exit $test_exit_code;
            fi
          done;
          exit 1
        shell: bash
        working-directory: ./compose
        continue-on-error: false
      - name: Fix file names before saving artifacts
        if: failure() && matrix.test.second_run == 'true'
        run: |
          sudo find ./compose/export/galaxy/database -depth -name '*:*' -execdir bash -c 'mv "$1" "${1//:/-}"' bash {} \;
      - name: Allow upload-artifact read access
        if: failure() && matrix.test.second_run == 'true'
        run: sudo chmod -R +r ./compose/export/galaxy/database
      - name: Save artifacts for debugging a failed test
        uses: actions/upload-artifact@v6
        if: failure() && matrix.test.second_run == 'true'
        with:
          name: ${{ matrix.infrastructure.name }}_${{ matrix.test.name }}_second-run
          path: ./compose/export/galaxy/database


================================================
FILE: .github/workflows/release.yml
================================================
name: release-CI

on:
  release:
    types: [published]

  # Allows you to run this workflow manually from the Actions tab
  workflow_dispatch:

jobs:
  build_and_publish:
    runs-on: ubuntu-latest

    steps:
      # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
      - uses: actions/checkout@v6
          
      - name: Set up Docker Buildx
        uses: docker/setup-buildx-action@v3
        
      - name: Login to Quay IO
        uses: docker/login-action@v3
        with:
          registry: quay.io
          username: '$oauthtoken'
          password: ${{ secrets.QUAY_OAUTH_TOKEN }}
    
      - name: Build docker image and push to quay.io
        uses: docker/build-push-action@v6
        with:
          context: "{{defaultContext}}:galaxy"
          push: true
          tags: quay.io/bgruening/galaxy:${{ github.event.release.tag_name }}
          cache-from: type=gha
          cache-to: type=gha,mode=max



================================================
FILE: .github/workflows/single.sh
================================================
#!/bin/bash
set -ex

docker --version
docker info

export GALAXY_HOME=/home/galaxy
export GALAXY_USER=admin@example.org
export GALAXY_USER_EMAIL=admin@example.org
export GALAXY_USER_PASSWD=password
export BIOBLEND_GALAXY_API_KEY=fakekey
export BIOBLEND_GALAXY_URL=http://localhost:8080
export EPHEMERIS_IMAGE=${EPHEMERIS_IMAGE:-quay.io/biocontainers/ephemeris:0.10.11--pyhdfd78af_0}
export GALAXY_WAIT_TIMEOUT=${GALAXY_WAIT_TIMEOUT:-600}

SKIP_SFTP=false
SKIP_DIVE=false

if [[ "${CI:-}" == "true" ]]; then
    sudo apt-get update -qq
    #sudo apt-get install docker-ce --no-install-recommends -y -o Dpkg::Options::="--force-confmiss" -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confnew"
    sudo apt-get install sshpass --no-install-recommends -y
else
    if ! command -v sshpass >/dev/null 2>&1; then
        echo "sshpass not found; skipping SFTP test."
        SKIP_SFTP=true
    fi
fi

if [[ "${CI:-}" == "true" ]]; then
    DIVE_VERSION=$(curl -sL "https://api.github.com/repos/wagoodman/dive/releases/latest" | grep '"tag_name":' | sed -E 's/.*"v([^"]+)".*/\1/')
    curl -OL https://github.com/wagoodman/dive/releases/download/v${DIVE_VERSION}/dive_${DIVE_VERSION}_linux_amd64.deb
    sudo apt install ./dive_${DIVE_VERSION}_linux_amd64.deb
    rm ./dive_${DIVE_VERSION}_linux_amd64.deb
else
    if ! command -v dive >/dev/null 2>&1; then
        echo "dive not found; skipping image analysis."
        SKIP_DIVE=true
    fi
fi

galaxy_wait() {
    docker run --rm --link galaxy:galaxy \
        "${EPHEMERIS_IMAGE}" galaxy-wait -g http://galaxy --timeout "${1:-$GALAXY_WAIT_TIMEOUT}"
}

# start building this repo
if [[ "${CI:-}" == "true" ]]; then
    sudo chown 1450 /tmp && sudo chmod a=rwx /tmp
fi

## define a container size check function, first parameter is the container name, second the max allowed size in MB
container_size_check () {

    # check that the image size is not growing too much between releases
    # the 19.05 monolithic image was around 1.500 MB
    size="${docker image inspect $1 --format='{{.Size}}'}"
    size_in_mb=$(($size/(1024*1024)))
    if [[ $size_in_mb -ge $2 ]]
    then
        echo "The new compiled image ($1) is larger than allowed. $size_in_mb vs. $2"
        sleep 2
        #exit
    fi
}

export WORKING_DIR=${GITHUB_WORKSPACE:-$PWD}

export DOCKER_RUN_CONTAINER="quay.io/bgruening/galaxy"
SAMPLE_TOOLS=$GALAXY_HOME/ephemeris/sample_tool_list.yaml
GALAXY_EXTRA_MOUNTS=()
if [ -f "$WORKING_DIR/test/container_resolvers_conf.ci.yml" ]; then
    GALAXY_EXTRA_MOUNTS+=(-v "$WORKING_DIR/test/container_resolvers_conf.ci.yml:/etc/galaxy/container_resolvers_conf.yml:ro")
fi
cd "$WORKING_DIR"
docker buildx build \
    --load \
    --cache-from type=gha \
    --cache-to type=gha,mode=max \
    -t quay.io/bgruening/galaxy \
    galaxy/
#container_size_check   quay.io/bgruening/galaxy  1500

docker rm -f galaxy httpstest || true
mkdir -p local_folder
docker run -d -p 8080:80 -p 8021:21 -p 8022:22 \
    --name galaxy \
    --privileged=true \
    -v "$(pwd)/local_folder:/export/" \
    "${GALAXY_EXTRA_MOUNTS[@]}" \
    -e GALAXY_CONFIG_ALLOW_USER_DATASET_PURGE=True \
    -e GALAXY_CONFIG_ALLOW_PATH_PASTE=True \
    -e GALAXY_CONFIG_ALLOW_USER_DELETION=True \
    -e GALAXY_CONFIG_ENABLE_BETA_WORKFLOW_MODULES=True \
    -v /tmp/:/tmp/ \
    quay.io/bgruening/galaxy

sleep 30
docker logs galaxy
# Define start functions
docker_exec() {
      cd "$WORKING_DIR"
      docker exec galaxy "$@"
}
docker_exec_run() {
   cd "$WORKING_DIR"
   docker run quay.io/bgruening/galaxy "$@"
}
docker_run() {
   cd "$WORKING_DIR"
   docker run "$@"
}

docker ps

# Test submitting jobs to an external slurm cluster
cd "${WORKING_DIR}/test/slurm/" && bash test.sh && cd "$WORKING_DIR"

# Test submitting jobs to an external gridengine cluster
cd $WORKING_DIR/test/gridengine/ && bash test.sh || exit 1 && cd $WORKING_DIR

echo "SLURM and SGE tests have finished."

docker ps
echo 'Waiting for Galaxy to come up.'
galaxy_wait_timeout=$GALAXY_WAIT_TIMEOUT
galaxy_wait_interval=30
galaxy_wait_end=$((SECONDS + galaxy_wait_timeout))
while [ $SECONDS -lt $galaxy_wait_end ]; do
    if galaxy_wait 30; then
        break
    fi
    echo "Galaxy still starting, tailing logs..."
    docker logs --tail 200 galaxy || true
    sleep $galaxy_wait_interval
done
if [ $SECONDS -ge $galaxy_wait_end ]; then
    echo "Galaxy did not become ready within ${galaxy_wait_timeout}s."
    docker logs --tail 400 galaxy || true
    exit 1
fi

curl -v --fail $BIOBLEND_GALAXY_URL/api/version

# Test self-signed HTTPS
docker_run -d --name httpstest -p 443:443 -e "USE_HTTPS=True" $DOCKER_RUN_CONTAINER
sleep 30
docker logs httpstest

sleep 180s && curl -v -k --fail https://127.0.0.1:443/api/version
echo | openssl s_client -connect 127.0.0.1:443 2>/dev/null | openssl x509 -issuer -noout| grep localhost

docker rm -f httpstest || true

# Test FTP Server upload
date > time.txt
# FIXME passive mode does not work, it would require the container to run with --net=host
#curl -v --fail -T time.txt ftp://localhost:8021 --user $GALAXY_USER:$GALAXY_USER_PASSWD || true
# Test FTP Server get
#curl -v --fail ftp://localhost:8021 --user $GALAXY_USER:$GALAXY_USER_PASSWD

# Test SFTP Server
if [[ "$SKIP_SFTP" != "true" ]]; then
    sshpass -p $GALAXY_USER_PASSWD sftp -v -P 8022 -o User=$GALAXY_USER -o "StrictHostKeyChecking no" localhost <<< $'put time.txt'
fi

# Test FTP Server from within the container (avoids host NAT/passive issues)
docker_exec python - <<'PY'
import ftplib

ftp = ftplib.FTP()
ftp.connect("localhost", 21, timeout=30)
ftp.login("admin@example.org", "password")
ftp.retrlines("LIST")
ftp.quit()
PY

# Test CVMFS
docker_exec bash -c "service autofs start"
docker_exec bash -c "cvmfs_config chksetup"
docker_exec bash -c "ls /cvmfs/data.galaxyproject.org/byhand"

# Run a ton of BioBlend test against our servers.
cd "$WORKING_DIR/test/bioblend/" && . ./test.sh && cd "$WORKING_DIR/"

# Test without install-repository wrapper
curl -v --fail POST -H "Content-Type: application/json" -H "x-api-key: fakekey" -d \
    '{
        "tool_shed_url": "https://toolshed.g2.bx.psu.edu",
        "name": "cut_columns",
        "owner": "devteam",
        "changeset_revision": "cec635fab700",
        "new_tool_panel_section_label": "BEDTools"
    }' \
"http://localhost:8080/api/tool_shed_repositories"


# Test the 'new' tool installation script
docker_exec install-tools "$SAMPLE_TOOLS"
# Test the Conda installation
docker_exec_run bash -c 'export PATH=$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR/_conda/bin/:$PATH && conda --version && conda install samtools -c bioconda --yes'

# Test if data persistence works
docker stop galaxy
docker rm -f galaxy

cd "$WORKING_DIR"
docker run -d -p 8080:80 \
    --name galaxy \
    --privileged=true \
    -v "$(pwd)/local_folder:/export/" \
    "${GALAXY_EXTRA_MOUNTS[@]}" \
    -e GALAXY_CONFIG_ALLOW_USER_DATASET_PURGE=True \
    -e GALAXY_CONFIG_ALLOW_PATH_PASTE=True \
    -e GALAXY_CONFIG_ALLOW_USER_DELETION=True \
    -e GALAXY_CONFIG_ENABLE_BETA_WORKFLOW_MODULES=True \
    -v /tmp/:/tmp/ \
    quay.io/bgruening/galaxy

echo 'Waiting for Galaxy to come up.'
galaxy_wait "$GALAXY_WAIT_TIMEOUT"

# Test if the tool installed previously is available
curl -v --fail 'http://localhost:8080/api/tools/toolshed.g2.bx.psu.edu/repos/devteam/cut_columns/Cut1/1.0.2'

# analyze image using dive tool
if [[ "$SKIP_DIVE" == "true" ]]; then
    echo "Skipping dive image analysis (dive not installed)."
else
    CI=true dive quay.io/bgruening/galaxy
fi

docker stop galaxy
docker rm -f galaxy
docker rmi -f $DOCKER_RUN_CONTAINER || true


================================================
FILE: .github/workflows/single_container.yml
================================================
name: Single Container Test
on: [push, pull_request]
jobs:
  build_and_test:
    runs-on: ubuntu-latest
    strategy:
      matrix:
        python-version: ['3.10']
    steps:
    - name: Checkout
      uses: actions/checkout@v6
    - name: Configure Docker data-root
      run: |
        sudo mkdir -p /mnt/docker
        if [ ! -f /etc/docker/daemon.json ]; then
          echo '{}' | sudo tee /etc/docker/daemon.json
        fi
        sudo jq '."data-root"="/mnt/docker"' /etc/docker/daemon.json > /tmp/docker_daemon.json
        sudo mv /tmp/docker_daemon.json /etc/docker/daemon.json
        sudo systemctl daemon-reload
        sudo systemctl restart docker
    - name: Set up Docker Buildx
      uses: docker/setup-buildx-action@v3
    - uses: actions/setup-python@v6
      with:
        python-version: ${{ matrix.python-version }}
    - name: Build and Test
      run: bash .github/workflows/single.sh


================================================
FILE: .github/workflows/update-site.yml
================================================
name: Deploy Documentation

on:
  push:
    branches:
      - main
    paths:
      - 'README.md'

jobs:
  deploy_docs:
    runs-on: ubuntu-latest

    steps:
      - name: Check out the repository
        uses: actions/checkout@v6
        with:
          persist-credentials: false

      - name: Set up Python
        uses: actions/setup-python@v6
        with:
          python-version: "3.12"
          cache: "pip"

      - name: Install python dependencies
        run: pip install -r docs/src/requirements.txt

      - name: Generate documentation
        run: python docs/src/generate_docs.py

      - name: Deploy to GitHub Pages
        uses: peaceiris/actions-gh-pages@v4
        with:
          github_token: ${{ secrets.GITHUB_TOKEN }}
          publish_dir: ./docs


================================================
FILE: .gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]

# C extensions
*.so

# Distribution / packaging
.Python
env/
bin/
build/
develop-eggs/
dist/
eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.cache
nosetests.xml
coverage.xml

# Translations
*.mo

# Mr Developer
.mr.developer.cfg
.project
.pydevproject

# Rope
.ropeproject

# Django stuff:
*.log
*.pot

# Sphinx documentation
docs/_build/

# Export folder for docker-compose setup
compose/export
compose-v2/export

.DS_Store


================================================
FILE: .travis.yml
================================================
sudo: required

language: python
python: 3.10

services:
  - docker

env:
  matrix:
    - TOX_ENV=py310
  global:
    - secure: "SEjcKJQ0NGXdpFxFhLVlyJmiBvgiLtR5Uufg90Vm3owKlMy0NSfIrOR+2dwNniqOp7QI3eVepnqjid/Ka0QStzVqMCe55OLkJ/TbTHnMLpbtY63mpGfogVRvxMMAVpzLpcQqtJFORZmO/MIWSLlBiXMMzOg3+tbXvQXmL17Rbmw="

matrix:
  allow_failures:
    - env: KUBE=True

git:
  submodules: false

before_install:
  - set -e
  - export GALAXY_HOME=/home/galaxy
  - export GALAXY_USER=admin@example.org
  - export GALAXY_USER_EMAIL=admin@example.org
  - export GALAXY_USER_PASSWD=password
  - export BIOBLEND_GALAXY_API_KEY=fakekey
  - export BIOBLEND_GALAXY_URL=http://localhost:8080

  - sudo apt-get update -qq
  - sudo apt-get install docker-ce --no-install-recommends -y -o Dpkg::Options::="--force-confmiss" -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confnew"
  - sudo apt-get install sshpass --no-install-recommends -y
  - pip install ephemeris

  - docker --version
  - docker info


  # start building this repo
  - sudo chown 1450 /tmp && sudo chmod a=rwx /tmp
  - export WORKING_DIR="$TRAVIS_BUILD_DIR"
  - export DOCKER_RUN_CONTAINER="quay.io/bgruening/galaxy"
  - export INSTALL_REPO_ARG=""
  - export SAMPLE_TOOLS=$GALAXY_HOME/ephemeris/sample_tool_list.yaml
  - travis_wait 30 cd "$WORKING_DIR" && docker build -t quay.io/bgruening/galaxy galaxy/
  - |
    ## define a container size check function, first parameter is the container name, second the max allowed size in MB
    container_size_check () {

        # check that the image size is not growing too much between releases
        # the 19.05 monolithic image was around 1.500 MB
        size=`docker image inspect $1 --format='{{.Size}}'`
        size_in_mb=$(($size/(1024*1024)))
        if [[ $size_in_mb -ge $2 ]]
        then
            echo "The new compiled image ($1) is larger than allowed. $size_in_mb vs. $2"
            sleep 2
            #exit
        fi
    }
    container_size_check   quay.io/bgruening/galaxy  1500

    mkdir local_folder
    docker run -d -p 8080:80 -p 8021:21 -p 8022:22 \
        --name galaxy \
        --privileged=true \
        -v `pwd`/local_folder:/export/ \
        -e GALAXY_CONFIG_ALLOW_USER_DATASET_PURGE=True \
        -e GALAXY_CONFIG_ALLOW_PATH_PASTE=True \
        -e GALAXY_CONFIG_ALLOW_USER_DELETION=True \
        -e GALAXY_CONFIG_ENABLE_BETA_WORKFLOW_MODULES=True \
        -v /tmp/:/tmp/ \
        quay.io/bgruening/galaxy

    sleep 30
    docker logs galaxy
    # Define start functions
    docker_exec() {
      cd $WORKING_DIR
      docker exec -t -i galaxy "$@"
    }
    docker_exec_run() {
      cd $WORKING_DIR
      docker run quay.io/bgruening/galaxy "$@"
    }
    docker_run() {
      cd $WORKING_DIR
      docker run "$@"
    }

  - docker ps

script:
  - set -e
  # Test submitting jobs to an external slurm cluster
  - cd $TRAVIS_BUILD_DIR/test/slurm/ && bash test.sh && cd $WORKING_DIR
  # Test submitting jobs to an external gridengine cluster
  # TODO 19.05, need to enable this again!
  # - cd $TRAVIS_BUILD_DIR/test/gridengine/ && bash test.sh && cd $WORKING_DIR

  - echo 'Waiting for Galaxy to come up.'
  - galaxy-wait -g $BIOBLEND_GALAXY_URL --timeout 600

  - curl -v --fail $BIOBLEND_GALAXY_URL/api/version

  # Test self-signed HTTPS
  - docker_run -d --name httpstest -p 443:443 -e "USE_HTTPS=True" $DOCKER_RUN_CONTAINER

  - sleep 180s && curl -v -k --fail https://127.0.0.1:443/api/version
  - echo | openssl s_client -connect 127.0.0.1:443 2>/dev/null | openssl x509 -issuer -noout| grep localhost

  - docker logs httpstest && docker stop httpstest && docker rm httpstest

  # Test FTP Server upload
  - date > time.txt && curl -v --fail -T time.txt ftp://localhost:8021 --user $GALAXY_USER:$GALAXY_USER_PASSWD || true
  # Test FTP Server get
  - curl -v --fail ftp://localhost:8021 --user $GALAXY_USER:$GALAXY_USER_PASSWD

  # Test CVMFS
  - docker_exec bash -c "service autofs start"
  - docker_exec bash -c "cvmfs_config chksetup"
  - docker_exec bash -c "ls /cvmfs/data.galaxyproject.org/byhand"

  # Test SFTP Server
  - sshpass -p $GALAXY_USER_PASSWD sftp -v -P 8022 -o User=$GALAXY_USER -o "StrictHostKeyChecking no" localhost <<< $'put time.txt'

  # Run a ton of BioBlend test against our servers.
  - cd $TRAVIS_BUILD_DIR/test/bioblend/ && . ./test.sh && cd $WORKING_DIR/

  # not working anymore in 18.01
  # executing: /galaxy_venv/bin/uwsgi --yaml /etc/galaxy/galaxy.yml --master --daemonize2 galaxy.log --pidfile2 galaxy.pid  --log-file=galaxy_install.log --pid-file=galaxy_install.pid
  # [uWSGI] getting YAML configuration from /etc/galaxy/galaxy.yml
  # /galaxy_venv/bin/python: unrecognized option '--log-file=galaxy_install.log'
  # getopt_long() error
  # cat: galaxy_install.pid: No such file or directory
  # tail: cannot open ‘galaxy_install.log’ for reading: No such file or directory
  #- |
  #  if [ "${COMPOSE_SLURM}" ] || [ "${KUBE}" ] || [ "${COMPOSE_CONDOR_DOCKER}" ] || [ "${COMPOSE_SLURM_SINGULARITY}" ]
  #  then
  #      # Test without install-repository wrapper
  #      sleep 10
  #      docker_exec_run bash -c 'cd $GALAXY_ROOT_DIR && python ./scripts/api/install_tool_shed_repositories.py --api admin -l http://localhost:80 --url https://toolshed.g2.bx.psu.edu -o devteam --name cut_columns --panel-section-name BEDTools'
  #  fi


  # Test the 'new' tool installation script
  - docker_exec install-tools "$SAMPLE_TOOLS"
  # Test the Conda installation
  - docker_exec_run bash -c 'export PATH=$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR/_conda/bin/:$PATH && conda --version && conda install samtools -c bioconda --yes'


after_success:
  - |
    if [ "$TRAVIS_PULL_REQUEST" == "false" -a "$TRAVIS_BRANCH" == "master" ]
    then
        cd ${TRAVIS_BUILD_DIR}
        echo "Generate and deploy html documentation"
        ./docs/bin/deploy_docs
    fi


notifications:
  webhooks:
    urls:
      - https://webhooks.gitter.im/e/559f5480ac7a4ef238af
    on_success: change
    on_failure: always
    on_start: never


================================================
FILE: Changelog.md
================================================
# Changelog

## 0.1: Initial release!
    - with Apache2, PostgreSQL and Tool Shed integration
## 0.2: complete new Galaxy stack.
   - with nginx, uwsgi, proftpd, docker, supervisord and SLURM
## 0.3: Add Interactive Environments
   - IPython in docker in Galaxy in docker
   - advanged logging
## 0.4:
   - base the image on toolshed/requirements with all required Galaxy dependencies
   - use Ansible roles to build large parts of the image
   - export the supervisord web interface on port 9002
   - enable Galaxy reports webapp
## 15.07:
  - `install-biojs` can install BioJS visualisations into Galaxy
  - `add-tool-shed` can be used to activate third party Tool Sheds in child Dockerfiles
  - many documentation improvements
  - RStudio is now part of Galaxy and this Image
  - configurable postgres UID/GID by @chambm
  - smarter starting of postgres during Tool installations by @shiltemann
## 15.10:
  - new Galaxy 15.10 release
  - fix https://github.com/bgruening/docker-galaxy-stable/issues/94
## 16.01:
  - enable Travis testing for all builds and PR
  - offer new [yaml based tool installations](https://github.com/galaxyproject/ansible-galaxy-tools/blob/master/files/tool_list.yaml.sample)
  - enable dynamic UWSGI processes and threads with `-e UWSGI_PROCESSES=2` and `-e UWSGI_THREADS=4`
  - enable dynamic Galaxy handlers `-e GALAXY_HANDLER_NUMPROCS=2`
  - Addition of a new `lite` mode contributed by @kellrott
  - first release with Jupyter integration
## 16.04:
  - include a Galaxy-bare mode, enable with `-e BARE=True`
  - first release with [HTCondor](https://research.cs.wisc.edu/htcondor/) installed and pre-configured
## 16.07:
  - documentation and tests updates for SLURM integration by @mvdbeek
  - first version with initial Docker compose support (proftpd ✔️)
  - SFTP support by @zfrenchee
## 16.10:
   - [HTTPS support](https://github.com/bgruening/docker-galaxy-stable/pull/240 ) by @zfrenchee and @mvdbeek
## 17.01:
  - enable Conda dependency resolution by default
  - [new Galaxy version](https://docs.galaxyproject.org/en/master/releases/17.01_announce.html)
  - more compose work (slurm, postgresql)
## 17.05:
   - add PROXY_PREFIX variable to enable automatic configuration of Galaxy running under some prefix (@abretaud)
   - enable quota by default (just the funtionality, not any specific value)
   - HT-Condor is now supported in compose with semi-autoscaling and BioContainers
   - Galaxy Docker Compose is completely under Travis testing and available with SLURM and HT-Condor
   - using Docker `build-arg`s for GALAXY_RELEASE and GALAXY_REPO
## 17.09:
   - much improved documentation about using Galaxy Docker and an external cluster (@rhpvorderman)
   - CVMFS support - mounting in 4TB of pre-build reference data (@chambm)
   - Singularity support and tests (compose only)
   - more work on K8s support and testing (@jmchilton)
   - using .env files to configure the compose setup for SLURM, Condor, K8s, SLURM-Singularity, Condor-Docker
## 18.01:
   - tracking the Galaxy release_18.01 branch
   - uwsgi work to adopt to changes for 18.01
   - remove nodejs-legacy & npm from Dockerfile and install latest version from ansible-extras
   - initial galaxy.ini → galaxy.yml integration
   - grafana and influxdb container (compose)
   - Galaxy telegraf integration to push to influxdb (compose)
   - added some documentation (compose)
## 18.05:
   - Nothing very special, but a awesome Galaxy release as usual
## 18.09:
   - new and more powerful orchestration build script (build-orchestration-images.sh) by @pcm32
   - a lot of bug-fixes to the compose setup by @abretaud
## 19.01:
   - This is featuring the latest and greatest from the Galaxy community
   - Please note that this release will be the last release which is based on `ubuntu:14.04` and PostgreSQL 9.3.
     We will migrate to `ubuntu:18.04` and a newer PostgreSQL version in `19.05`. Furthermore, we will not
     support old Galaxy tool dependencies.
## 19.05:
   - The image is now based on `ubuntu:18.04` (instead of ubuntu:14.04 previously) and PostgreSQL 11.5 (9.3 previously).
     See [migration documention](#Postgresql-migration) to migrate the postgresql database from 9.3 to 11.5.
   - We not longer support old Galaxy tool dependencies.
## 20.05:
   - Featuring Galaxy 20.05
   - Completely reworked compose setup
   - The default admin password and apikey (`GALAXY_DEFAULT_ADMIN_PASSWORD` and `GALAXY_DEFAULT_ADMIN_KEY`) have changed: the password is now `password` (instead of `admin`) and the apikey `fakekey` (instead of `admin`).
## 20.09:
   - Featuring Galaxy 20.09
## 24.1:
   - Deprecating the `compose` setup.
   - Complete new setup, adjusting to the latest Galaxy stack.
   - Base Ubuntu Image: Upgraded from version 18.04 to 22.04
   - Galaxy: Upgraded from version 20.09 to 24.1
   - PostgreSQL: Upgraded from version 11 to 15
   - Python3: Upgraded from version 3.7 to 3.10 (Python 3.10 is set as the default interpreter)
   - The dockerfile now uses a multi-stage build to reduce the final image size and include only necessary files.
   - New Service Support:
     - Gunicorn: Replaces uWSGI as the web server for Galaxy. Installed by default inside Galaxy's virtual environment. Configured Nginx to proxy Gunicorn enabled on port 4001.
     - Celery: Installed by default inside Galaxy's virtual environment. Enabled Celery for distributed task queues and Celery Beat for periodic task running. RabbitMQ serves as the broker for Celery (if RabbitMQ is disabled, it defaults to PostgreSQL database connection).
     - Redis is used as the backend for Celery (if Redis is disabled, it defaults to a SQLite database). Flower service is added for monitoring and debugging Celery.
     - RabbitMQ Management: Enabled the RabbitMQ management plugin on port 15672 for managing and monitoring the RabbitMQ server. The dashboard is exposed via Nginx and is accessible at the /rabbitmq/ path. The default access credentials are admin:admin.
     - Flower: Added Flower service on port 5555 for monitoring and debugging Celery. The dashboard is exposed via Nginx and is available at the /flower/ path. The default access credentials are admin:admin.
     - TUSd: Added TUSd server on port 1080 to support fault-tolerant uploads; Nginx is configured to proxy TUSd.
     - gx-it-proxy: Added gx-it-proxy service on port 4002 to support Interactive Tools.
   - Ansible Playbooks:
     - Migrated from galaxyextras git submodule to using mainatined ansible roles.
     - Added configure_rabbitmq_users.yml Ansible playbook, which removes the default guest user and adds admin, galaxy, and flower users for RabbitMQ during container startup.
   - Environment Variables:
     - Added `GUNICORN_WORKERS` and `CELERY_WORKERS` magic environment variables to set the number of Gunicorn and Celery workers, respectively, during container startup.
   - Configuration Changes:
     - Replaced the Galaxy Reports sample configuration file.
     - Removed galaxy_web, handlers, reports, and ie_proxy services from Supervisor.
     - Added Gravity for managing Galaxy services such as Gunicorn, Celery, gx-it-proxy, TUSd, reports, and handlers. It uses Supervisor as the process manager, with the configuration file located at /etc/galaxy/gravity.yml.
     - Added support for dynamic handlers (set as the default handler type).
     - Redis and Flower services are now managed by Supervisor.
     - Since Galaxy Interactive Environments are deprecated, they have been replaced by Interactive Tools (ITs). The sample configuration file tools_conf_interactive.xml.sample is placed inside GALAXY_CONFIG_DIR. Nginx is also configured to support both domain and path-based ITs.
     - Switched to using the cvmfs-config.galaxyproject.org repository for automatic configuration and updates of Galaxy project CVMFS repositories. Updated tool data table config path to include CVMFS locations from data.galaxyproject.org in --privileged mode.
     - Enabled IPv6 support in Nginx for ports 80 and 443.
     - Added Subject Alternative Name (SAN) extension (DNS:localhost and IP:127.0.0.1) while generating a self-signed SSL certificate.
     - Ensured the Nginx SSL certificate is trusted system-wide by adding it to the CA store.
     - Updated Galaxy extra dependencies.
     - Added docker_net, docker_auto_rm, and docker_set_user parameters for Docker-enabled job destinations.
     - Added update_yaml_value.py script to update nested key values in a YAML file.
     - Replaced ie_proxy with gx-it-proxy.
     - Replaced nginx_upload_module with TUSd for delegated uploads.
   - CI Tests
     - Added dive tool for analyzing the docker image
     - Added test for check data persistence


================================================
FILE: LICENSE
================================================
The MIT License (MIT)

Copyright (c) 2014 Björn Grüning

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.


================================================
FILE: README.md
================================================
[![DOI](https://zenodo.org/badge/5466/bgruening/docker-galaxy-stable.svg)](https://zenodo.org/badge/latestdoi/5466/bgruening/docker-galaxy-stable)
[![Build Status](https://travis-ci.org/bgruening/docker-galaxy-stable.svg?branch=master)](https://travis-ci.org/bgruening/docker-galaxy-stable)
[![Docker Repository on Quay](https://quay.io/repository/bgruening/galaxy/status "Docker Repository on Quay")](https://quay.io/repository/bgruening/galaxy)
[![Gitter](https://badges.gitter.im/bgruening/docker-galaxy-stable.svg)](https://gitter.im/bgruening/docker-galaxy-stable?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
![docker pulls](https://img.shields.io/docker/pulls/bgruening/galaxy-stable.svg) ![docker stars](https://img.shields.io/docker/stars/bgruening/galaxy-stable.svg)
[![docker image stats](https://images.microbadger.com/badges/image/bgruening/galaxy-stable.svg)](https://microbadger.com/images/bgruening/galaxy-stable "Get your own image badge on microbadger.com")

Galaxy Docker Image
===================

The [Galaxy](http://www.galaxyproject.org) [Docker](http://www.docker.io) Image is an easy distributable full-fledged Galaxy installation, that can be used for testing, teaching and presenting new tools and features.

One of the main goals is to make access to entire tool suites as easy as possible. Usually,
this includes the setup of a public available web-service that needs to be maintained, or that the Tool-user needs to either setup a Galaxy Server by its own or to have Admin access to a local Galaxy server.
With docker, tool developers can create their own Image with all dependencies and the user only needs to run it within docker.

The Image is based on [Ubuntu 24.04 LTS](http://releases.ubuntu.com/24.04/) and all recommended Galaxy requirements are installed. The following chart should illustrate the [Docker](http://www.docker.io) image hierarchy we have build to make is as easy as possible to build on different layers of our stack and create many exciting Galaxy flavors.

![Docker hierarchy](https://raw.githubusercontent.com/bgruening/docker-galaxy-stable/master/chart.png)

Breaking changes
================

:information_source: After a long pause, due to interesting times at the beginning of the "golden 2020s", we are finally back with release `24.1`. Many things have changed in Galaxy.
It is deployed completely differently and gained many new features with many new dependencies. We recommend starting with a fresh `/export` folder and contacting us if you encounter any problems. 

# Table of Contents <a name="toc" />

- [Usage](#Usage)
  - [Upgrading images](#Upgrading-images)
    - [PostgreSQL migration](#Postgresql-migration)
  - [Enabling Interactive Tools in Galaxy](#Enabling-Interactive-Tools-in-Galaxy)
  - [Using passive mode FTP or SFTP](#Using-passive-mode-FTP-or-SFTP)
  - [Using Parent docker](#Using-Parent-docker)
  - [RabbitMQ Management](#RabbitMQ-Management)
  - [Flower Webapp](#Flower-Webapp)
  - [Galaxy's config settings](#Galaxys-config-settings)
  - [Configuring Galaxy's behind a proxy](#Galaxy-behind-proxy)
  - [On-demand reference data with CVMFS](#cvmfs)
  - [Personalize your Galaxy](#Personalize-your-Galaxy)
  - [Deactivating services](#Deactivating-services)
  - [Restarting Galaxy](#Restarting-Galaxy)
  - [Advanced Logging](#Advanced-Logging)
  - [Running on an external cluster (DRM)](#Running-on-an-external-cluster-(DRM))
    - [Basic setup for the filesystem](#Basic-setup-for-the-filesystem)
    - [Using an external Slurm cluster](#Using-an-external-Slurm-cluster)
    - [Using an external Grid Engine cluster](#Using-an-external-Grid-Engine-cluster)
    - [Tips for Running Jobs Outside the Container](#Tips-for-Running-Jobs-Outside-the-Container)
- [Enable Galaxy to use BioContainers (Docker)](#auto-exec-tools-in-docker)
- [Magic Environment variables](#Magic-Environment-variables)
- [HTTPS Support](#HTTPS-Support)
- [Lite Mode](#Lite-Mode)
- [Extending the Docker Image](#Extending-the-Docker-Image)
  - [List of Galaxy flavours](#List-of-Galaxy-flavours)
- [Integrating non-Tool Shed tools into the container](#Integrating-non-Tool-Shed-tools-into-the-container)
- [Users & Passwords](#Users-Passwords)
- [Development](#Development)
- [Requirements](#Requirements)
- [Changelog](./Changelog.md)
- [Support & Bug Reports](#Support-Bug-Reports)


# Usage <a name="Usage" /> [[toc]](#toc)
This chapter explains how to launch the container manually.

At first you need to install docker. Please follow the [very good instructions](https://docs.docker.com/installation/) from the Docker project.

After the successful installation, all you need to do is:

```sh
docker run -d -p 8080:80 -p 8021:21 -p 8022:22 quay.io/bgruening/galaxy
```

I will shortly explain the meaning of all the parameters. For a more detailed description please consult the [docker manual](http://docs.docker.io/), it's really worth reading.

Let's start:
- `docker run` will run the Image/Container for you.

    In case you do not have the Container stored locally, docker will download it for you.

- `-p 8080:80` will make the port 80 (inside of the container) available on port 8080 on your host. Same holds for port 8021 and 8022, that can be used to transfer data via the FTP or SFTP protocol, respectively.

    Inside the container a nginx Webserver is running on port 80 and that port can be bound to a local port on your host computer. With this parameter you can access your Galaxy
    instance via `http://localhost:8080` immediately after executing the command above. If you work with the [Docker Toolbox](https://www.docker.com/products/docker-toolbox) on Mac or Windows,
    you need to connect to the machine generated by 'Docker Quickstart'. You get its IP address from `docker-machine ls` or from the first line in the terminal, e.g.: `docker is configured to use the default machine with IP 192.168.99.100`.

- `quay.io/bgruening/galaxy` is the Image/Container name, that directs docker to the correct path in the [docker index](https://quay.io/repository/bgruening/galaxy?tab=tags).
- `-d` will start the docker container in daemon mode.

For an interactive session, you can execute:

```sh
docker run -i -t -p 8080:80 \
    quay.io/bgruening/galaxy \
    /bin/bash
```

and run the `startup` script by yourself, to start PostgreSQL, nginx and Galaxy.

Docker images are "read-only", all your changes inside one session will be lost after restart. This mode is useful to present Galaxy to your colleagues or to run workshops with it. To install Tool Shed repositories or to save your data you need to export the calculated data to the host computer.

Fortunately, this is as easy as:

```sh
docker run -d -p 8080:80 \
    -v /home/user/galaxy_storage/:/export/ \
    quay.io/bgruening/galaxy
```

With the additional `-v /home/user/galaxy_storage/:/export/` parameter, Docker will mount the local folder `/home/user/galaxy_storage` into the Container under `/export/`. A `startup.sh` script, that is usually starting nginx, PostgreSQL and Galaxy, will recognize the export directory with one of the following outcomes:

- In case of an empty `/export/` directory, it will move the [PostgreSQL](http://www.postgresql.org/) database, the Galaxy database directory, Shed Tools and Tool Dependencies and various config scripts to /export/ and symlink back to the original location.
- In case of a non-empty `/export/`, for example if you continue a previous session within the same folder, nothing will be moved, but the symlinks will be created.

This enables you to have different export folders for different sessions - means real separation of your different projects.

To detect when the Galaxy distribution in the image changes, the container writes a marker at
`/export/.galaxy_export_marker`. You can override the marker value with `GALAXY_EXPORT_MARKER` if you
need deterministic export refresh behavior.

You can also collect and store `/export/` data of Galaxy instances in a dedicated docker [Data  volume Container](https://docs.docker.com/engine/userguide/dockervolumes/) created by:

```sh
docker create -v /export \
    --name galaxy-store \
    quay.io/bgruening/galaxy \
    /bin/true
```

To mount this data volume in a Galaxy container, use the  `--volumes-from` parameter:

```sh
docker run -d -p 8080:80 \
    --volumes-from galaxy-store \
    quay.io/bgruening/galaxy
```

This also allows for data separation, but keeps everything encapsulated within the docker engine (e.g. on OS X within your `$HOME/.docker` folder - easy to backup, archive and restore. This approach, albeit at the expense of disk space, avoids the problems with permissions [reported](https://github.com/bgruening/docker-galaxy-stable/issues/68) for data export on non-Linux hosts.


## Upgrading images <a name="Upgrading-images" /> [[toc]](#toc)

We will release a new version of this image concurrent with every new Galaxy release. For upgrading an image to a new version we have assembled a few hints for you. Please, take in account that upgrading may vary depending on your Galaxy installation, and the changes in new versions. Use this example carefully!

* Create a test instance with only the database and configuration files. This will allow testing to ensure that things run but won't require copying all of the data.
* New unmodified configuration files are always stored in a hidden directory called `.distribution_config`. Use this folder to diff your configurations with the new configuration files shipped with Galaxy. This prevents needing to go through the change log files to find out which new files were added or which new features you can activate.

Here are 2 suggested upgrade methods, a quick one, and a safer one.

### The quick upgrade method

This method involves less data copying, which makes the process quicker, but makes it impossible to downgrade in case of problems.

If you are upgrading from <19.05 to >=19.05, you need to migrate the PostgreSQL database, have a look at [PostgreSQL migration](#Postgresql-migration).


1. Stop the old Galaxy container

```sh
docker stop <old_container_name>
docker pull quay.io/bgruening/galaxy
```

2. Run the container with the updated image

```sh
docker run -p 8080:80 -v /data/galaxy-data:/export --name <new_container_name> quay.io/bgruening/galaxy
```

3. Use diff to find changes in the config files (only if you changed any config file).

```sh
cd /data/galaxy-data/.distribution_config
for f in *; do echo $f; diff $f ../galaxy/config/$f; read; done
```

4. Upgrade the database schema

```sh
docker exec -it <new_container_name> bash
galaxyctl stop
sh manage_db.sh upgrade
exit
```
5. Restart Galaxy

```sh
docker exec -it <new_container_name> galaxyctl start
```

(Alternatively, restart the whole container)


### The safe upgrade method

With this method, you keep a backup in case you decide to downgrade, but requires some potentially long data copying.

* Note that copying database and datasets can be expensive if you have many GB of data.
* If you are upgrading from <19.05 to >=19.05, you need to migrate the PostgreSQL database, have a look at [PostgreSQL migration](#Postgresql-migration).

1. Download newer version of the Galaxy image

  ```
  $ sudo docker pull quay.io/bgruening/galaxy
  ```
2. Stop and rename the current galaxy container

  ```
  $ sudo docker stop galaxy-instance
  $ sudo docker rename galaxy-instance galaxy-instance-old
  ```
3. Rename the data directory (the one that is mounted to /export in the docker)

  ```
  $ sudo mv /data/galaxy-data /data/galaxy-data-old
  ```
4. Run a new Galaxy container using newer image and wait while Galaxy generates the default content for /export

  ```
  $ sudo docker run -p 8080:80 -v /data/galaxy-data:/export --name galaxy-instance quay.io/bgruening/galaxy
  ```
5. Stop the Galaxy container

  ```
  $ sudo docker stop galaxy-instance
  ```
6. Replace the content of the postgres database by the old db data

  ```
  $ sudo rm -r /data/galaxy-data/postgresql/
  $ sudo rsync -var /data/galaxy-data-old/postgresql/  /data/galaxy-data/postgresql/
  ```
7. Use diff to find changes in the config files (only if you changed any config file).

  ```
  $ cd /data/galaxy-data/.distribution_config
  $ for f in *; do echo $f; diff $f ../../galaxy-data-old/galaxy/config/$f; read; done
  ```
8. Copy all the users' datasets to the new instance

  ```
  $ sudo rsync -var /data/galaxy-data-old/galaxy/database/files/* /data/galaxy-data/galaxy/database/files/
  ```
9. Copy all the installed tools

  ```
  $ sudo rsync -var /data/galaxy-data-old/tool_deps/* /data/galaxy-data/tool_deps/
  $ sudo rsync -var /data/galaxy-data-old/galaxy/database/shed_tools/* /data/galaxy-data/galaxy/database/shed_tools/
  $ sudo rsync -var /data/galaxy-data-old/galaxy/database/config/* /data/galaxy-data/galaxy/database/config/
  ```
10. Copy the welcome page and all its files.

  ```
  $ sudo rsync -var /data/galaxy-data-old/welcome* /data/galaxy-data/
  ```
11. Create an auxiliary docker in interactive mode and upgrade the database.

  ```
  $ sudo docker run -it --rm -v /data/galaxy-data:/export quay.io/bgruening/galaxy /bin/bash
  # Startup all processes
  > startup &
  #Upgrade the database to the most recent version
  > sh manage_db.sh upgrade
  #Logout
  > exit
  ```
12. Start the docker and test

  ```
  $ sudo docker start galaxy-instance
  ```
13. Clean the old container and image


### Postgresql migration <a name="Postgresql-migration" /> [[toc]](#toc)

In the 19.05 version, Postgresql was updated from version 9.3 to version 11.5. If you are upgrading from a version <19.05, you will need to migrate the database.
You can do it the following way (based on the "The quick upgrade method" above):

1. Stop Galaxy in the old container

```sh
docker exec -it <old_container_name> galaxyctl stop
```

2. Dump the old database

```sh
docker exec -it <old_container_name> bash
su postgres
pg_dumpall --clean > /export/postgresql/9.3dump.sql
exit
exit
```

3. Update the container (= step 1 of the "The quick upgrade method" above)

```sh
docker stop <old_container_name>
docker pull quay.io/bgruening/galaxy
```

4. Run the container with the updated image (= step 2 of the "The quick upgrade method" above)

```sh
docker run -p 8080:80 -v /data/galaxy-data:/export --name <new_container_name> quay.io/bgruening/galaxy
```

5. Restore the dump to the new postgres version

Wait for the startup process to finish (Galaxy should be accessible)

```sh
docker exec -it <new_container_name> bash
galaxyctl stop
su postgres
psql -f /export/postgresql/9.3dump.sql postgres
exit
exit
```

6. Use diff to find changes in the config files (only if you changed any config file). (= step 3 of the "The quick upgrade method" above)

```sh
cd /data/galaxy-data/.distribution_config
for f in *; do echo $f; diff $f ../galaxy/config/$f; read; done
```

7. Upgrade the database schema (= step 4 of the "The quick upgrade method" above)

```sh
docker exec -it <new_container_name> bash
galaxyctl stop
sh manage_db.sh upgrade
exit
```

5. Restart Galaxy (= step 5 of the "The quick upgrade method" above)

```sh
docker exec -it <new_container_name> galaxyctl start
```

(Alternatively, restart the whole container)

6. Clean old files

If you are *very* sure that everything went well, you can delete `/export/postgresql/9.3dump.sql` and `/export/postgresql/9.3/` to save some space.

## Enabling Interactive Tools in Galaxy <a name="Enabling-Interactive-Tools-in-Galaxy" /> [[toc]](#toc)

Interactive Tools (IT) are sophisticated ways to extend Galaxy with powerful services, like [Jupyter](http://jupyter.org/), in a secure and reproducible way.

For this we need to be able to launch Docker containers inside our Galaxy Docker container.

```sh
docker run -d -p 8080:80 -p 8021:21 -p 4002:4002 \
    --privileged=true \
    -v /home/user/galaxy_storage/:/export/ \
    quay.io/bgruening/galaxy
```

The port 4002 is the proxy port that is used to handle Interactive Tools. `--privileged` is needed to start docker containers inside docker.

Additionally, you can set the `GALAXY_DOMAIN` environment variable to specify the domain name for your Galaxy instance to ensure that domain-based ITs work correctly. By default, it is set to `localhost`. If you have your own domain, you can specify it instead.

If you're using the default job configuration, set the `GALAXY_DESTINATIONS_DEFAULT` environment variable to a Docker-enabled destination. By default, this is set to `slurm_cluster`, so you'll need to update it accordingly. Alternatively, you can also provide your own job configuration file. 

```sh
docker run -d -p 8080:80 -p 8021:21 -p 4002:4002 \
    --privileged=true \
    -v /home/user/galaxy_storage/:/export/ \
    -e "GALAXY_DOMAIN=your.domain.com" \
    -e "GALAXY_DESTINATIONS_DEFAULT=slurm_cluster_docker" \
    quay.io/bgruening/galaxy
```


## Using passive mode FTP or SFTP <a name="Using-passive-mode-FTP-or-SFTP" /> [[toc]](#toc)

By default, FTP servers running inside of docker containers are not accessible via passive mode FTP, due to not being able to expose extra ports. To circumvent this, you can use the `--net=host` option to allow Docker to directly open ports on the host server:

```sh
docker run -d \
    --net=host \
    -v /home/user/galaxy_storage/:/export/ \
    quay.io/bgruening/galaxy
```

Note that there is no need to specifically bind individual ports (e.g., `-p 80:80`) if you use `--net`.

An alternative to FTP and it's shortcomings it to use the SFTP protocol via port 22. Start your Galaxy container with a port binding to 22.

```sh
docker run -i -t -p 8080:80 -p 8022:22 \
    -v /home/user/galaxy_storage/:/export/ \
    quay.io/bgruening/galaxy
```

And use for example [Filezilla](https://filezilla-project.org/) or the `sftp` program to transfer data:

```sh
sftp -v -P 8022 -o User=admin@example.org localhost <<< $'put <YOUR FILE HERE>'
```


## Using Parent docker <a name="Using-Parent-docker" /> [[toc]](#toc)

On some linux distributions, Docker-In-Docker can run into issues (such as running out of loopback interfaces). If this is an issue, you can use a 'legacy' mode that use a docker socket for the parent docker installation mounted inside the container. To engage, set the environmental variable `DOCKER_PARENT`

```sh
docker run -p 8080:80 -p 8021:21 \
    --privileged=true -e DOCKER_PARENT=True \
    -v /var/run/docker.sock:/var/run/docker.sock \
    -v /home/user/galaxy_storage/:/export/ \
    quay.io/bgruening/galaxy
```

## RabbitMQ Management <a name="RabbitMQ-Management" /> [[toc]](#toc)

RabbitMQ is used as the broker for services like Celery. RabbitMQ provides a dedicated web interface for managing message queues, accessible at `http://localhost:8080/rabbitmq/`. This interface allows you to monitor queues, exchanges, bindings, and more. By default, it is password protected with `admin:admin`, but the credentials can be changed after logging in.

To completely disable RabbitMQ, you can set the `NONUSE` environment variable during container startup.

```sh
docker run -p 8080:80 \
    -e "NONUSE=rabbitmq" \
    quay.io/bgruening/galaxy
```

## Flower Webapp <a name="Flower-Webapp" /> [[toc]](#toc)

Flower is a web-based tool for monitoring and administering Celery. It is accessible at `http://localhost:8080/flower`. By default, this site is password protected with `admin:admin`. You can change this by providing a `common_htpasswd` file in `/home/user/galaxy_storage/`.

The Flower Webapp will only be available if both Celery and RabbitMQ are enabled, meaning the environment variable `NONUSE` does not include `celery` and `rabbitmq`. To completely disable the Flower Webapp, you can set the `NONUSE` environment variable during container startup.

```sh
docker run -p 8080:80 \
    -e "NONUSE=flower" \
    quay.io/bgruening/galaxy
```

## Galaxy's config settings <a name="Galaxys-config-settings" /> [[toc]](#toc)

Every Galaxy configuration parameter in `config/galaxy.yml` can be overwritten by passing an environment variable to the `docker run` command during startup. The name of the environment variable has to be:
`GALAXY_CONFIG`+ *the_original_parameter_name_in_capital_letters*
For example, you can set the Galaxy session timeout to 5 minutes and set your own Galaxy brand by invoking the `docker run` like this:

```sh
docker run -p 8080:80 \
    -e "GALAXY_CONFIG_BRAND='My own Galaxy flavour'" \
    -e "GALAXY_CONFIG_SESSION_DURATION=5" \
    quay.io/bgruening/galaxy
```

Note, that if you would like to run any of the [cleanup scripts](https://galaxyproject.org/admin/config/performance/purge-histories-and-datasets/), you will need to add the following to `/export/galaxy/config/galaxy.yml`:

```
database_connection = postgresql://galaxy:galaxy@localhost:5432/galaxy
file_path = /export/galaxy/database/files
```

## Security Configuration

*By default* the `admin_users` and `bootstrap_admin_api_key` variables are set to:

```
admin_users: admin@example.org
bootstrap_admin_api_key: HSNiugRFvgT574F43jZ7N9F3
```

Additionally, Galaxy encodes various internal values that can be part of output using a secret string configurable as `id_secret` in the config file (use 5-65 bytes long string).
This prevents 'guessing' of Galaxy's internal database sequences. Example:

```
id_secret: d5c910cc6e32cad08599987ab64dcfae
```

You should manually change all three configuration variables above in `/export/galaxy/config/galaxy.yml`.

Alternatively, you can pass the security configuration when running the image but please note that it is a security problem.
E.g. if a tool exposes all `env`'s your secret API key will also be exposed.

In addition with 24.2 we enabled Galaxy Vault configuration. This enables users to store secrets in a user-owned password safe, called vault.
It is highly recommended to change the pre-configured key under `$GALAXY_CONFIG_DIR/vault_conf.yml` following the instructions inside the file.


## Configuring Galaxy's behind a proxy <a name="Galaxy-behind-proxy" /> [[toc]](#toc)

If your Galaxy docker instance is running behind an HTTP proxy server, and if you're accessing it with a specific path prefix (e.g. http://www.example.org/some/prefix/), you need to make Galaxy aware of it. There is an environment variable available to do so:

```
PROXY_PREFIX=/some/prefix
```

You can and should overwrite these during launching your container:

```sh
docker run -p 8080:80 \
    -e "PROXY_PREFIX=/some/prefix" \
    quay.io/bgruening/galaxy
```

## On-demand reference data with CVMFS <a name="cvmfs" /> [[toc]](#toc)
By default, Galaxy instances launched with this image will have on-demand access to approximately 4TB of
reference genomes and indexes. These are the same reference data available on the main Galaxy server.
This is achieved by connecting to Galaxy's CernVM filesystem (CVMFS) at `cvmfs-config.galaxyproject.org` repository, which provides automatic configuration for all galaxyproject.org CVMFS repositories, including `data.galaxyproject.org`, and ensures they remain up to date.
The CVMFS capability doesn't add to the size of the Docker image, but when running, CVMFS maintains
a cache to keep the most recently used data on the local disk.

*Note*: for CVMFS directories to be mounted-on-demand with `autofs`, you must launch Docker as `--privileged`.
If privileged mode is not an option, use the optional CVMFS sidecar in `galaxy/docker-compose.yaml`:

```sh
cd galaxy
CVMFS_MOUNT_DIR=/cvmfs EXPORT_DIR=./export docker compose --profile cvmfs up
```

This starts a dedicated CVMFS container that mounts the repositories and shares `/cvmfs` with the Galaxy
container. The CVMFS cache is persisted in `${EXPORT_DIR}/cvmfs-cache`.


## Personalize your Galaxy <a name="Personalize-your-Galaxy" /> [[toc]](#toc)

The Galaxy welcome screen can be changed by providing a `welcome.html` page in `/home/user/galaxy_storage/`. All files starting with `welcome` will be copied during startup and served as introduction page. If you want to include images or other media, name them `welcome_*` and link them relative to your `welcome.html` ([example](`https://github.com/bgruening/docker-galaxy-stable/blob/master/galaxy/welcome.html`)).


## Deactivating services <a name="Deactivating-services" /> [[toc]](#toc)

Non-essential services can be deactivated during startup. Set the environment variable `NONUSE` to a comma separated list of services. Currently, `postgres`, `cron`, `proftp`, `nodejs`, `condor`, `slurmd`, `slurmctld`, `celery`, `rabbitmq`, `redis`, `flower` and `tusd` are supported.

```sh
docker run -d -p 8080:80 -p 9002:9002 \
    -e "NONUSE=cron,proftp,nodejs,condor,slurmd,slurmctld,celery,rabbitmq,redis,flower,tusd" \
    quay.io/bgruening/galaxy
```

A graphical user interface for starting/stopping services is available on port `9002` if you map it (e.g. `-p 9002:9002`).
This is the Supervisor web UI and it is unauthenticated by default, so only expose it on trusted networks or adjust the
Supervisor credentials in the image build.


## Restarting Galaxy <a name="Restarting-Galaxy" /> [[toc]](#toc)

If you want to restart Galaxy without restarting the entire Galaxy container you can use `docker exec` (docker > 1.3).

```sh
docker exec <container name> galaxyctl restart
```

To restart only web workers or handlers:

```sh
docker exec <container name> galaxyctl restart gunicorn
docker exec <container name> galaxyctl restart handler
```

Use `galaxyctl --help` for service names available in your configuration.

In addition, you can start/stop every supervisord process using a web interface on port `9002`. Start your container with:

```sh
docker run -p 9002:9002 quay.io/bgruening/galaxy
```


## Advanced Logging <a name="Advanced-Logging" /> [[toc]](#toc)

You can set the environment variable $GALAXY_LOGGING to FULL to access all logs from supervisor. For example start your container with:

```sh
docker run -d -p 8080:80 -p 8021:21 \
    -e "GALAXY_LOGGING=full" \
    quay.io/bgruening/galaxy
```

Then, you can access the supervisord web interface on port `9002` and get access to log files. To do so, start your container with:

```sh
docker run -d -p 8080:80 -p 8021:21 -p 9002:9002 \
    -e "GALAXY_LOGGING=full" \
    quay.io/bgruening/galaxy
```

Alternatively, you can access the container directly using the following command:

```sh
docker exec -it <container name> bash
```

Once connected to the container, log files are available in `/home/galaxy/logs`.

A volume can also be used to map this directory to one external to the container - for instance if logs need to be persisted for auditing reasons (security, debugging, performance testing, etc...).:

```sh
mkdir gx_logs
docker run -d -p 8080:80 -p 8021:21 -e "GALAXY_LOGGING=full" -v `pwd`/gx_logs:/home/galaxy/logs quay.io/bgruening/galaxy
```

## Running on an external cluster (DRM)  <a name="Running-on-an-external-cluster-(DRM)" />[[toc]](#toc)

### Basic setup for the filesystem  <a name="Basic-setup-for-the-filesystem" /> [[toc]](#toc)

#### The easy way
The easiest way is to create a `/export` mount point on the cluster and mount the container with `/export:/export`.

#### Not using the /export mount point on the cluster.
The docker container sets up all its files on the /export directory, but this directory may not exist on the cluster filesystem. This can be solved with symbolic links on the cluster filesystem but it can also be solved within the container itself.

In this example configuration the cluster file system has a directory `/cluster_storage/galaxy_data` which is accessible for the galaxy user in the container (UID 1450) and the user starting the container.

The container should be started with the following settings configured:
```bash
docker run -d -p 8080:80 -p 8021:21
-v /cluster_storage/galaxy_data/galaxy_export:/export # This makes sure all galaxy files are on the cluster filesystem
-v /cluster_storage/galaxy_data:/cluster_storage/galaxy_data # This ensures the links within the docker container and on the cluster fs are the same
# The following settings make sure that each job is configured with the paths on the cluster fs instead of /export
-e GALAXY_CONFIG_TOOL_DEPENDENCY_DIR="/cluster_storage/galaxy_data/galaxy_export/tool_deps"
-e GALAXY_CONFIG_TOOL_DEPENDENCY_CACHE_DIR="/cluster_storage/galaxy_data/galaxy_export/tool_deps/_cache"
-e GALAXY_CONFIG_FILE_PATH="/cluster_storage/galaxy_data/galaxy_export/galaxy/database/files"
-e GALAXY_CONFIG_TOOL_PATH="/cluster_storage/galaxy_data/galaxy_export/galaxy/tools"
-e GALAXY_CONFIG_TOOL_DATA_PATH="/cluster_storage/galaxy_data/galaxy_export/galaxy/tool-data"
-e GALAXY_CONFIG_SHED_TOOL_DATA_PATH="/cluster_storage/galaxy_data/galaxy_export/galaxy/tool-data"
# The following settings are for directories that can be anywhere on the cluster fs.
-e GALAXY_CONFIG_JOB_WORKING_DIRECTORY="/cluster_storage/galaxy_data/galaxy_export/galaxy/database/job_working_directory" #IMPORTANT: needs to be created manually. Can also be placed elsewhere, but is originally located here
-e GALAXY_CONFIG_NEW_FILE_PATH="/cluster_storage/galaxy_data/tmp" # IMPORTANT: needs to be created manually. This needs to be writable by UID=1450 and have its flippy bit set (chmod 1777 for world-writable with flippy bit)
-e GALAXY_CONFIG_OUTPUTS_TO_WORKING_DIRECTORY=False # Writes Job scripts, stdout and stderr to job_working_directory.
-e GALAXY_CONFIG_RETRY_JOB_OUTPUT_COLLECTION=5 #IF your cluster fs uses nfs this may introduce latency. You can set galaxy to retry if a job output is not yet created.
# Conda settings. IMPORTANT!
-e GALAXY_CONFIG_CONDA_PREFIX="/cluster_storage/galaxy_data/_conda" # Can be anywhere EXCEPT cluster_storage/galaxy/galaxy_export!
# Conda uses $PWD to determine where the virtual environment is. If placed inside the export directory conda will determine $PWD to be a subirectory of the  /export folder which does not exist on the cluster!
-e GALAXY_CONFIG_CONDA_AUTO_INIT=True # When the necessary environment can not be found a new one will automatically be created
```
### Setting up a Python virtual environment on the cluster  <a name="Setting-up-a-python-virtual-environment-on-the-cluster" />[[toc]](#toc)
The Python environment in the container is not accessible from the cluster. So it needs to be created beforehand.
In this example configuration the Python virtual environment is created on  `/cluster_storage/galaxy_data/galaxy_venv` and the export folder on `/cluster_storage/galaxy_data/galaxy_export`. To create the virtual environment:
1. Create the virtual environment `virtualenv /cluster_storage/galaxy_data/galaxy_venv`
2. Activate the virtual environment `source /cluster_storage/galaxy_data/galaxy_venv/bin/activate`
3. Install the galaxy requirements `pip install --index-url https://wheels.galaxyproject.org/simple --only-binary all -r /cluster_storage/galaxy_data/galaxy/lib/galaxy/dependencies/pinned-requirements.txt`
  * Make sure to upgrade the environment with the new requirements when a new version of galaxy is released.

To make the Python environment usable on the cluster, create your custom `job_conf.xml` file and put it in `/cluster_storage/galaxy_data/galaxy_export/galaxy/config`.
In the destination section the following code should be added:
```xml
<destinations default="cluster">
  <destination id="cluster" runner="your_cluster_runner">
    <env file="/cluster_storage/galaxy_data/galaxy_venv/bin/activate"/>
    <env id="GALAXY_ROOT_DIR">/cluster_storage/galaxy_data/galaxy_export/galaxy</env>
    <env id="GALAXY_LIB">/cluster_storage/galaxy_data/galaxy_export/galaxy/lib</env>
    <env id="PYTHONPATH">/cluster_storage/galaxy_data/galaxy_export/galaxy/lib</env>
    <param id="embed_metadata_in_job">True</param>
  </destination>
```
In this way, Python tools on the cluster are able to use the Galaxy libraries.

More information can be found [here](https://github.com/galaxyproject/galaxy/blob/dev/doc/source/admin/framework_dependencies.rst#managing-dependencies-manually)
and
[here](https://github.com/galaxyproject/galaxy/blob/dev/doc/source/admin/framework_dependencies.rst#galaxy-job-handlers).

### Using an external Slurm cluster <a name="Using-an-external-Slurm-cluster" /> [[toc]](#toc)

It is often convenient to configure Galaxy to use a high-performance cluster for running jobs. To do so, two files are required:

1. munge.key
2. slurm.conf

These files from the cluster must be copied to the `/export` mount point (i.e., `/cluster_storage/galaxy_data/galaxy_export/` on the host if using below command) accessible to Galaxy before starting the container. This must be done regardless of which Slurm daemons are running within Docker. At start, symbolic links will be created to these files to `/etc` within the container, allowing the various Slurm functions to communicate properly with your cluster. In such cases, there's no reason to run `slurmctld`, the Slurm controller daemon, from within Docker, so specify `-e "NONUSE=slurmctld"`. Unless you would like to also use Slurm (rather than the local job runner) to run jobs within the Docker container, then alternatively specify `-e "NONUSE=slurmctld,slurmd"`.

Importantly, Slurm relies on a shared filesystem between the Docker container and the execution nodes. To allow things to function correctly, checkout the basic filesystem setup above.

A brief note is in order regarding the version of Slurm installed. This Docker image uses Ubuntu 14.04 as its base image. The version of Slurm in the Ubuntu 14.04 repository is 2.6.5 and that is what is installed in this image. If your cluster is using an incompatible version of Slurm then you will likely need to modify this Docker image.

The following is an example for how to specify a destination in `job_conf.xml` that uses a custom partition ("work", rather than "debug") and 4 cores rather than 1:

```
<destination id="slurm4threads" runner="slurm">
    <param id="embed_metadata_in_job">False</param>
    <param id="nativeSpecification">-p work -n 4</param>
</destination>
```

The usage of `-n` can be confusing. Note that it will specify the number of cores, not the number of tasks (i.e., it's not equivalent to `srun -n 4`).

### Using an external Grid Engine cluster <a name="Using-an-external-Grid-Engine-cluster"/> [[toc]](#toc)

Set up the filesystem on the cluster as mentioned above.
To use Grid Engine (Sun Grid Engine, Open Grid Scheduler), one configuration file and an environment variable are required:


1. create an `act_qmaster` file in the /export folder.
  * In ***act_qmaster*** is something like this.

  ```
  YOUR_GRIDENGINE_MASTER_HOST
  ```
  * this file will automatically be installed in the container's `/var/lib/gridengine` folder.
2. set the environment variable `SGE_ROOT`
  * By default
  ```
  -e SGE_ROOT=/var/lib/gridengine
  ```
3. Make sure that YOUR_GRIDENGINE_MASTER_HOST can be pinged from the docker container. If this is not the case you can put the qmaster's hostname and ip in the containers `/etc/hosts`
Your Grid Engine needs to accept job submissions from inside the container. If your container is already on a host that can submit jobs, set the hostname of the container to be exactly the same as the host. (The hostname can be changed by using the --hostname flag when starting the container).


 Alternatively, you can add the container's hostname (default=galaxy-docker) to the /etc/hosts file on the gridengine head node. And setting the container's hostname as a submit host.


### Tips for Running Jobs Outside the Container <a name="Tips-for-Running-Jobs-Outside-the-Container"/> [[toc]](#toc)

In its default state Galaxy assumes both the Galaxy source code and
various temporary files are available on shared file systems across the
cluster. When using Condor or SLURM (as described above) to run jobs outside
of the Docker container one can take steps to mitigate these assumptions.

The `embed_metadata_in_job` option on job destinations in `job_conf.xml`
forces Galaxy collect metadata inside the container instead of on the
cluster:

```
<param id="embed_metadata_in_job">False</param>
```

This has performance implications and may not scale as well as performing
these calculations on the remote cluster - but this should not be a problem
for most Galaxy instances.

# Enable Galaxy to use BioContainers (Docker) <a name="auto-exec-tools-in-docker"/> [[toc]](#toc)
This is a very cool feature where Galaxy automatically detects that your tool has an associated docker image, pulls it and runs it for you. These images (when available) have been generated using [mulled](https://docs.galaxyproject.org/en/latest/admin/special_topics/mulled_containers.html).
To test, install the [IUC bedtools](https://toolshed.g2.bx.psu.edu/repository?repository_id=8d84903cc667dbe7&changeset_revision=7b3aaff0d78c) from the toolshed. When you try to execute *ClusterBed* for example. You may get a missing dependancy error for *bedtools*. But bedtools has an associated docker image on [quay.io](https://quay.io/).  Now configure Galaxy as follows:

- Add this environment variable to `docker run`: `-e GALAXY_CONFIG_ENABLE_MULLED_CONTAINERS=True`
- Persist mulled Singularity caches by mounting `/export` and reusing `/export/container_cache/singularity/mulled` across runs.
- In `job_conf.xml` configure a Docker enabled destination as follows:

```xml
<destination id="docker_local" runner="local">
    <param id="docker_enabled">true</param>
    <param id="docker_volumes">$galaxy_root:ro,$galaxy_root/database/tmp:rw,$tool_directory:ro,$job_directory:ro,$working_directory:rw,$default_file_path:rw</param>
    <param id="docker_sudo">false</param>
</destination>
```

When you execute the tool again, Galaxy will pull the image from Biocontainers ([quay.io/biocontainers](https://quay.io/organization/biocontainers)), run the tool inside of this container to produce the desired output.

# Magic Environment variables <a name="Magic-Environment-variables"/> [[toc]](#toc)

| Name   | Description   |
|---|---|
| `ENABLE_TTS_INSTALL`  | Enables the Test Tool Shed during container startup. This change is not persistent. (`ENABLE_TTS_INSTALL=True`)  |
| `GALAXY_LOGGING` | Enables for verbose logging at Docker stdout. (`GALAXY_LOGGING=full`)  |
| `BARE` | Disables all default Galaxy tools. (`BARE=True`)  |
| `NONUSE` |  Disable services during container startup. (`NONUSE=cron,proftp,nodejs,condor,slurmd,slurmctld,celery,rabbitmq,redis,flower,tusd`) |
| `GUNICORN_WORKERS` | Set the number of gunicorn workers (`GUNICORN_WORKERS=2`) |
| `CELERY_WORKERS` | Set the number of celery workers (`CELERY_WORKERS=2`) |
| `GALAXY_DOCKER_ENABLED` | Enable Galaxy to use Docker containers if annotated in tools (`GALAXY_DOCKER_ENABLED=False`) |
| `GALAXY_DOCKER_VOLUMES` | Specify volumes that should be mounted into tool containers (`GALAXY_DOCKER_VOLUMES=""`) |
| `GALAXY_HANDLER_NUMPROCS` | Set the number of Galaxy handler (`GALAXY_HANDLER_NUMPROCS=2`) |
| `LOAD_GALAXY_CONDITIONAL_DEPENDENCIES` | Installing optional dependencies into the Galaxy virtual environment |
| `LOAD_PYTHON_DEV_DEPENDENCIES` | Installation of Galaxy's dev dependencies. Needs `LOAD_GALAXY_CONDITIONAL_DEPENDENCIES` as well |
| `GALAXY_AUTO_UPDATE_DB` | Run the Galaxy database migration script during startup |
| `GALAXY_EXPORT_MARKER` | Override the export marker used to refresh `/export/galaxy`. |


# HTTPS Support <a name="HTTPS-Support"/> [[toc]](#toc)

It's possible to automatically configure your container with HTTPS, either with
certificates of your own or by automatically requesting an HTTPS certificate from
Letsencrypt with the following environment variables:

| Name   | Description   |
|---|---|
| `USE_HTTPS` | Set `USE_HTTPS=True` to set up HTTPS via self-signed certificates (CN is set to the value of `GALAXY_DOMAIN` variable, defaulting to `localhost` if no value is provided). If you have your own certificates, copy them to `/export/{server.key,server.crt}`. |
| `USE_HTTPS_LETSENCRYPT` | Set `USE_HTTPS_LETSENCRYPT=True` to automatically set up HTTPS using Letsencrypt as a certificate authority. (Requires you to also set `GALAXY_DOMAIN`) Note: only set one of `USE_HTTPS` and `USE_HTTPS_LETSENCRYPT` to true. |
| `GALAXY_DOMAIN` | Set `GALAXY_DOMAIN=<your_domain>` so that Letsencrypt can test your that you own the domain you claim to own in order to issue you your HTTPS cert. |


# Lite Mode <a name="Lite-Mode" /> [[toc]](#toc)

The lite mode will only start postgresql and a single Galaxy process, without nginx, gunicorn or any other special feature from the normal mode. In particular there is no support for the export folder or any Magic Environment variables.

```sh
docker run -i -t -p 8080:8080 quay.io/bgruening/galaxy startup_lite
```

This will also use the standard `job_conf.xml.sample_basic` shipped by Galaxy. If you want to use the regular one from the normal mode you can pass `-j` to the `startup_lite` script.


# Extending the Docker Image <a name="Extending-the-Docker-Image" /> [[toc]](#toc)

If the desired tools are already included in the Tool Shed, building your own personalised Galaxy docker Image (Galaxy flavour) can be done using the following steps:

1. Create a file named `Dockerfile`
2. Include `FROM quay.io/bgruening/galaxy` at the top of the file. This means that you use the Galaxy Docker Image as base Image and build your own extensions on top of it.
3. Supply the list of desired tools in a file (`my_tool_list.yml` below). See [this page](https://github.com/galaxyproject/ansible-galaxy-tools/blob/master/files/tool_list.yaml.sample) for the file format requirements.
4. Execute `docker build -t my-docker-test .`
4a. (if behind proxy). Add the ENV http_proxy and https_proxy variables as IPs (to avoid nameserver resolution problems) as in the example below.
5. Run your container with `docker run -p 8080:80 my-docker-test`
6. Open your web browser on `http://localhost:8080`

For a working example, have a look at these  Dockerfiles.
- [deepTools](http://deeptools.github.io/) [Dockerfile](https://github.com/bgruening/docker-recipes/blob/master/galaxy-deeptools/Dockerfile)
- [ChemicalToolBox](https://github.com/bgruening/galaxytools/tree/master/chemicaltoolbox) [Dockerfile](https://github.com/bgruening/docker-recipes/blob/master/galaxy-chemicaltoolbox/Dockerfile)

```
# Galaxy - deepTools
#
# VERSION       0.2

FROM quay.io/bgruening/galaxy

MAINTAINER Björn A. Grüning, bjoern.gruening@gmail.com

ENV GALAXY_CONFIG_BRAND deepTools
# The following two lines are optional and can be given during runtime
# with the -e http_proxy='http://yourproxyIP:8080' parameter
ENV http_proxy 'http://yourproxyIP:8080'
ENV https_proxy 'http://yourproxyIP:8080'

WORKDIR /galaxy

RUN add-tool-shed --url 'http://testtoolshed.g2.bx.psu.edu/' --name 'Test Tool Shed'

# Install Visualisation
RUN install-biojs msa

# Adding the tool definitions to the container
ADD my_tool_list.yml $GALAXY_ROOT_DIR/my_tool_list.yml

# Install deepTools
RUN install-tools $GALAXY_ROOT_DIR/my_tool_list.yml

# Mark folders as imported from the host.
VOLUME ["/export/", "/data/", "/var/lib/docker"]

# Expose port 80 (webserver), 21 (FTP server)
EXPOSE :80
EXPOSE :21

# Autostart script that is invoked during container start
CMD ["/usr/bin/startup"]
```

or the [RNA-workbench](https://github.com/bgruening/galaxy-rna-workbench/blob/master/Dockerfile).
The RNA-workbench has advanced examples about:

- populating Galaxy data libraries

  ```bash
    setup-data-libraries -i $GALAXY_ROOT_DIR/library_data.yaml -g http://localhost:8080
        -u $GALAXY_DEFAULT_ADMIN_USER -p $GALAXY_DEFAULT_ADMIN_PASSWORD
  ```

The actual data is references in a YAML file similar this [one](https://github.com/bgruening/galaxy-rna-workbench/blob/master/library_data.yaml).

- installing workflows

  ```bash
      workflow-install --workflow_path $GALAXY_HOME/workflows/ -g http://localhost:8080
          -u $GALAXY_DEFAULT_ADMIN_USER -p $GALAXY_DEFAULT_ADMIN_PASSWORD
  ```

Where all Galaxy workflows needs to be in one directory, here the `$GALAXY_HOME/workflows/`.

- running Galaxy data-managers to create indices or download data

  ```bash
      run-data-managers -u $GALAXY_DEFAULT_ADMIN_USER -p $GALAXY_DEFAULT_ADMIN_PASSWORD -g http://localhost:8080
          --config data_manager_rna_seq.yaml
  ```

The data-managers can be configured and specified in a YAML file similar to this [one](https://github.com/galaxyproject/training-material/blob/master/RNA-Seq/docker/data_manager_rna_seq.yaml).


If you host your flavor on GitHub consider to test our build with Travis-CI. This project will help you:
https://github.com/bgruening/galaxy-flavor-testing

## Test matrix <a name="Test-matrix" /> [[toc]](#toc)

The project includes local test scripts and CI workflows. Use the matrix below to decide what to run.

| Area | Script / Workflow | Requires | Notes |
| --- | --- | --- | --- |
| Image build | `docker build -t galaxy:test galaxy/` | Docker | Baseline image build. |
| Startup sanity | `docker run --rm --privileged galaxy:test /usr/bin/startup2` | Privileged | Confirms services start and CVMFS messaging is sane. |
| Bioblend | `test/bioblend/test.sh` | Running Galaxy container | Uses a Bioblend test image against Galaxy. |
| Slurm | `test/slurm/test.sh` | Docker, Slurm test image | Uses external Slurm container; set `GALAXY_IMAGE=galaxy:test` if needed. |
| SGE (Grid Engine) | `test/gridengine/test.sh` | Docker, SGE test image | Uses ephemeris container to wait for Galaxy. |
| CVMFS sidecar | `test/cvmfs/test.sh` | Privileged | Builds and validates mount propagation from sidecar. |
| FTP/SFTP | `.github/workflows/single.sh` | Docker, sshpass (CI) | FTP and SFTP checks run in CI; local run skips SFTP if `sshpass` is missing. |
| /export persistence | `startup.sh` / `startup2.sh` | `/export` volume | Export and cache relocation happens during startup; exercised by CI runs. |
| HTTPS/TLS | `.github/workflows/single.sh` | Docker | Uses `curl` and `openssl s_client` against port 443. |
| Tool install smoke | `.github/workflows/single.sh` | Docker | Installs sample tools and verifies tool availability. |
| Container resolvers | `test/container_resolvers_conf.ci.yml` | Galaxy container | CI uses a minimal resolver config for toolbox resolution tests. |
| Image analysis (optional) | `.github/workflows/single.sh` | `dive` | Runs only when `dive` is installed. |
| Single-container CI | `.github/workflows/single_container.yml` | CI | Full container test (privileged). |
| Multi-test CI | `.github/workflows/single.sh` | CI | Builds image + runs SLURM, SGE, Bioblend; uses buildx cache. |

Notes:
- If `/tmp` is small in CI, set `TMPDIR=/var/tmp` for test scripts.
- CVMFS sidecar CI builds/pushes on tags; branch pushes run tests only when CVMFS paths change.




## List of Galaxy flavours <a name="List-of-Galaxy-flavours" /> [[toc]](#toc)

- [Aurora Galaxy](https://github.com/statonlab/aurora-galaxy-tools)
- [SNP analysis Workflows on Docker (sniplay)](https://github.com/ValentinMarcon/docker-galaxy-sniplay)
- [NCBI-Blast](https://github.com/bgruening/docker-galaxy-blast)
- [ChemicalToolBox](https://github.com/bgruening/docker-recipes/blob/master/galaxy-chemicaltoolbox)
- [ballaxy](https://github.com/anhi/docker-scripts/tree/master/ballaxy)
- [NGS-deepTools](https://github.com/bgruening/docker-recipes/blob/master/galaxy-deeptools)
- [Galaxy ChIP-exo](https://github.com/gregvonkuster/docker-galaxy-ChIP-exo)
- [Galaxy Proteomics](https://github.com/bgruening/docker-galaxyp)
- [Imaging](https://github.com/bgruening/docker-galaxy-imaging)
- [Constructive Solid Geometry](https://github.com/gregvonkuster/docker-galaxy-csg)
- [Galaxy for metagenomics](https://github.com/bgruening/galaxy-metagenomics)
- [Galaxy with the Language Application Grid tools](https://github.com/lappsgrid-incubator/docker-galaxy-lappsgrid)
- [RNAcommender](https://github.com/gianlucacorrado/galaxy-RNAcommender)
- [OpenMoleculeGenerator](https://github.com/bgruening/galaxy-open-molecule-generator)
- [Workflow4Metabolomics](https://github.com/workflow4metabolomics/w4m-docker)
- [HiC-Explorer](https://github.com/maxplanck-ie/docker-galaxy-hicexplorer)
- [SNVPhyl](https://github.com/phac-nml/snvphyl-galaxy)
- [GraphClust](https://github.com/BackofenLab/docker-galaxy-graphclust)
- [RNA workbench](https://github.com/bgruening/galaxy-rna-workbench)
- [Cancer Genomics Toolkit](https://github.com/morinlab/tools-morinlab/tree/master/docker)
- [Clustered Heatmaps for Interactive Exploration of Molecular Profiling Data](http://cancerres.aacrjournals.org/content/77/21/e23)

# Integrating non-Tool Shed tools into the container <a name="Integrating-non-Tool-Shed-tools-into-the-container" /> [[toc]](#toc)

We recommend to use the [Main Galaxy Tool Shed](https://toolshed.g2.bx.psu.edu/) for all your tools and workflows that you would like to share.
In rare situations where you cannot share your tools but still want to include them into your Galaxy Docker instance, please follow the next steps.

- Get your tools into the container.

    Mount your tool directory into the container with a separate `-v /home/user/my_galaxy_tools/:/local_tools`.

- Create a `tool_conf.xml` file for your tools.

    This should look similar to the main [`tool_conf.xml`](https://github.com/galaxyproject/galaxy/blob/dev/lib/galaxy/config/sample/tool_conf.xml.sample) file, but references your tools from the new directory. In other words a tool entry should look like this `<tool file="/local_tools/application_foo/foo.xml" />`.
    Your `tool_conf.xml` should be available from inside of the container. We assume you have it stored under `/local_tools/my_tools.xml`.

- Add the new tool config file to the Galaxy configuration.

    To make Galaxy aware of your new tool configuration file you need to add the path to `tool_config_file`, which is set to `/etc/galaxy/tool_conf.xml`. You can do this during container start by setting the environment variable `-e GALAXY_CONFIG_TOOL_CONFIG_FILE=/etc/galaxy/tool_conf.xml,/local_tools/my_tools.xml`.


# Users & Passwords <a name="Users-Passwords" /> [[toc]](#toc)

The Galaxy Admin User has the username `admin@example.org` and the password `password`.
The PostgreSQL username is `galaxy`, the password is `galaxy` and the database name is `galaxy` (I know I was really creative ;)).
If you want to create new users, please make sure to use the `/export/` volume. Otherwise your user will be removed after your docker session is finished.

The proftpd server is configured to use the main galaxy PostgreSQL user to access the database and select the username and password. If you want to run the
docker container in production, please do not forget to change the user credentials in `/etc/proftpd/proftpd.conf` too.

The Flower Webapp is `htpasswd` protected with username and password set to `admin`.

RabbitMQ is configured with:
  - Admin username: `admin`
  - Admin password: `admin`
  - Galaxy vhost: `galaxy`
  - Galaxy username: `galaxy`
  - Galaxy password: `galaxy`
  - Flower username: `flower`
  - Flower password: `flower`


# Development <a name="Development" /> [[toc]](#toc)

You can clone this repository with:

```sh
git clone https://github.com/bgruening/docker-galaxy-stable.git
```

This repository uses various [Ansible](http://www.ansible.com/) roles as specified in [requirements.yml](galaxy/ansible/requirements.yml) to manage configurations and dependencies. You can install these roles with the following command:

```sh
cd galaxy/ansible/ && ansible-galaxy install -r requirements.yml -p roles
```

If you simply want to change the Galaxy repository and/or the Galaxy branch, from which the container is build you can do this with Docker `--build-arg` during the `docker build` step. For example you can use these parameters during container build:

```
 --build-arg GALAXY_RELEASE=install_workflow_and_tools
 --build-arg GALAXY_REPO=https://github.com/manabuishii/galaxy
```

To keep docker images lean and optimize storage, we recommend using [Dive](https://github.com/wagoodman/dive). It provides an interactive UI that lets you explore each layer of the image, helping you quickly identify files and directories that take up significant space. To install Dive, follow the installation instructions provided in the [Dive GitHub repository](https://github.com/wagoodman/dive?tab=readme-ov-file#installation). After building your docker image, use Dive to analyze it:

```bash
dive <your-docker-image-name>
```

# Requirements <a name="Requirements" /> [[toc]](#toc)

- [Docker](https://www.docker.io/gettingstarted/#h_installation)


# Support & Bug Reports <a name="Support-Bug-Reports" /> [[toc]](#toc)

You can file an [github issue](https://github.com/bgruening/docker-galaxy-stable/issues) or ask
us on the [Galaxy development list](http://lists.bx.psu.edu/listinfo/galaxy-dev).

If you like this service please fill out this survey: https://www.surveymonkey.de/r/denbi-service?sc=rbc&tool=galaxy-docker


================================================
FILE: compose/README.md
================================================

⚠️ 

The `compose` version of this project is currently not maintained. We update the files and versions as we have time, but it's not a priority at the moment.
We will concentrate on the single-container version. If you want to deploy a composable version of Galaxy please have a look at https://github.com/galaxyproject/galaxy-helm or take over the maintainership of this version here :)

⚠️

# Galaxy Docker Compose

This setup is built on the idea of using a basic docker-compose file and extending it
for additional use cases. Therefore the `docker-compose.yml` is the base of the
whole setup. By concatenating additional files, you can extend it to use, for
example, HTCondor (see [Usage](#usage)).

All working data (database, virtual environment, etc.) is exported in the
`EXPORT_DIR`, which defaults to ./export.


## Usage
### First startup
When starting the setup for the first time, the Galaxy container will copy
a bunch of files into the `EXPORT_DIR`. This might take quite some time
to finish (even 20 minutes or more). Please don't interrupt the setup in
this period, as this might result in a broken state of the `EXPORT_DIR`
(see [Killing while first start up](#killing-while-first-start-up)).

### Basic setup
Simply run

> docker-compose up

to start Galaxy. In the basic setup, Galaxy together with Nginx as the proxy,
Postgres as the DB, and RabbitMQ as the message queue is run.

The default username and password is "admin", "password" (API key "fakekey").
Those credentials are set at first run and can be tweaked using the environment
variables `GALAXY_DEFAULT_ADMIN_USER`, `GALAXY_DEFAULT_ADMIN_EMAIL`,
`GALAXY_DEFAULT_ADMIN_PASSWORD`, and `GALAXY_DEFAULT_ADMIN_KEY` in the
`docker-compose.yml` file. If you want to change the email address of an admin,
remember to update the `admin_users` setting of the Galaxy config (also
see [Configuration](#configuration) to learn how to configure Galaxy).

### Running in background
If you want to run the setup in the background, use the detach option (`-d`):

> docker-compose up -d

### Upgrading to a newer Galaxy version
When not setting `IMAGE_TAG` to a specific version, Docker-Compose will always
fetch the newest image and therefore Galaxy version available. Depending
on the magnitude of the upgrade, you may need to delete the virtual
environment of Galaxy (EXPORT_PATH/galaxy/.venv) before you start the
setup again. The DB migration depends on the `database_auto_migrate`
setting for Galaxy (which is not
set on default and will therefore be `false` normally).


## Extending the setup
Beyond the basic usage, extending the setup is as easy as adding a additional
docker-compose extension file. This is done be the [standard docker-compose syntax](https://docs.docker.com/compose/extends/):
`docker-compose -f docker-compose.yml -f docker-compose.EXTENSION.yml`. Simply
concatenate the extensions you want to use. The rest should be handled for you.

### Running a HTCondor cluster
The `docker-compose.htcondor.yml` file is responsible to build up
an HTCondor cluster. Simply run:

> docker-compose -f docker-compose.yml -f docker-compose.htcondor.yml up

This will bring up a "cluster" with one master and one executor. Galaxy
acts like the submit node. To scale
the cluster, run the up statement with a `--scale htcondor-executor=n` option.
The setup ships with a basic configuration for HTCondor (see the
`base_config.yml` file). To customize the settings, set the appropriate
`HTCONDOR_MASTER_CONFIG_`, `HTCONDOR_EXECUTOR_CONFIG_`, `HTCONDOR_GALAXY_CONFIG`
environment variables (see [Configuration](#configuration)).

### Running a SLURM cluster
Append the `docker-compose.slurm.yml` file to your `docker-compose up` command. This
will spin up a small Slurm cluster and configure Galaxy to schedule jobs there.
To scale the cluster, run the up statement with a `--scale slurm_node=n` option.
As all nodes need to be defined in the slurm.conf file, you will also need to
set the env variable `SLURM_NODE_COUNT` to the correct node count.
Here is an example for scaling to three nodes:
`SLURM_NODE_COUNT=3 docker-compose -f docker-compose.yml -f docker-compose.slurm.yml up --scale slurm_node=3`.

Some background info about the slurm.conf configuration: As said earlier, Slurm
expects to have all nodes be defined in the conf file, together with valid
hostnames. Therefore `galaxy-configurator` automatically adds references
(the names of the slurm_node-containers) to the nodes by utilizing `SLURM_NODE_COUNT`.
As the docker-compose containers can contain underscores, the names are not
valid as hostnames (even though they are resolvable from inside the containers).
To cope with this problem, the `galaxy-slurm-node-discovery`-container
uses the Docker API to fetch the correct hostnames and replaces them on the
fly inside the slurm.conf file.

### Running a Kubernetes Cluster (with kind)
It is possible to start a small Kubernetes (k8s) cluster using [kind](https://kind.sigs.k8s.io)
(Kubernetes in Docker) and let Galaxy run your jobs there. For this use the
`docker-compose.k8s.yml` file. Note that this extension is only meant
to run individually (so no Pulsar, HTCondor etc.).

The `galaxy-kind` container is responsible for starting up your local Kubernetes
cluster and applying all the configuration the Galaxy-Configurator created. You can
find these files under `galaxy-configurator/templates/kind`. The `kind_config.yml`
file is used to configure Kind itself (also see https://kind.sigs.k8s.io/docs/user/configuration/),
while the files in the `k8s_config` are the configs that will be applied to
Kubernetes using `kubectl apply -f <k8s_config>`. By default, k8s is configured
to add some persistent volumes (PV) and persistent volume claims (PVC) so jobs
can access all the needed files from Galaxy.
It is relatively easy to add your own k8s_configs: Simply place your files into the
template folder (remember to add the `.j2` extension!) and mention it in the
`kind_configs` variable in the run.sh file of the galaxy-configurator
(see [Extend the Galaxy-Configurator](#extend-the-galaxy-configurator)).

While Kind is starting up the cluster, it blocks Galaxy from starting itself.
This is needed as Galaxy will parse the KUBECONFIG (that is created after k8s has started)
only once on startup. So don't be surprised if Galaxy is quiet for some time :)

Note that the cluster is being rebuilt on every start (to be more precise,
a `kind delete cluster` is called on shut down), so manual changes will
be overwritten if they are not defined in the k8s_config!

### Using Singularity for dependency resolution
Conda is used as the default dependency resolution. To switch to using
Singularity containers, add the `docker-compose.singularity.yml` file.
This will advice Galaxy to - if possible - stick with Singularity
for the dependency resolution. See the
[Galaxy documentation](https://docs.galaxyproject.org/en/master/admin/special_topics/mulled_containers.html)
for more information.

### Configuration
The `galaxy-configurator` is the central place for configuration
and is used to configure Galaxy and its
additional services (currently Nginx, and Slurm). For this, it utilizes
environment variables (set in the docker-compose file) for common configs,
and the `base_config.yml` file, used for base-configuration that does not
change often. For environment variables, there are two categories of
configuration: The ones that contain a `_CONFIG_`
(like `GALAXY_CONFIG_ADMIN_USERS`) and the ones that don't (like
`GALAXY_PROXY_PREFIX`). The first category contains configuration
options within the tools itself and they are simply mapped to the
corresponding config-file one-to-one (see for example
[galaxy.yml.sample](https://github.com/galaxyproject/galaxy/blob/dev/lib/galaxy/config/sample/galaxy.yml.sample)
for reference). The other category contains options that have some
logic within the docker-compose setup. `GALAXY_PROXY_PREFIX`, for example,
touches multiple Galaxy and Nginx options, so you don't have to.

The base of the configrations are [Jinja2](https://jinja.palletsprojects.com/en/2.11.x/)
templates, located at `galaxy-configurator/templates`.
The `galaxy-configurator` renders these
templates on startup and saves them in the export-folder to be
used by the other containers. A diff is created to surface changes
that will be applied. To disable the configurator, simply remove the
corresponding `*_OVERWRITE_CONFIG` environment variable
(like `GALAXY_OVERWRITE_CONFIG`) or set it to `false`.

All options are discussed under [configuration reference](#configuration-reference).

### Use specific Galaxy version or Docker images
The `IMAGE_TAG` environment variable allows to use specific versions of the
setup. Say, you want to stay with Galaxy v24.1 for now:

> export IMAGE_TAG=24.1
> docker-compose up

Without setting this variable, you will always get updated to the newest
version available.

### Restarting
To restart the setup (for example after a configuration change), you can simply
kill (CTRL-C) Docker Compose and re-run `docker-compose ... up`. Your data will
not be lost, as long as you keep the `export`-folder.

### Using prefix
It is possible to host Galaxy under a prefix like example.com/galaxy. For that,
set the env variable in the `galaxy-configurator` part to
`GALAXY_PROXY_PREFIX=/your/wanted/prefix` (like `/galaxy`)
and remember to also update `GALAXY_CONFIG_INFRASTRUCTURE_URL` accordingly.

## More advanced stuff
### "SSH"ing into a container
When facing a bug it may be helpful to have command-line controle over a
container. This is as simple as running `docker exec -it CONTAINER_NAME /bin/bash`.
For the galaxy-server container that would mean:

> docker exec -it compose_galaxy-server_1 /bin/bash

Note that not all containers have bash shipped with them. In this case replace
it by `/bin/sh`.

### Build containers locally
When developing locally, you may come to the point were you need to build
images yourself. In most cases adding a `--build` to the docker-compose statement
should be enough. It's
recommended to build the images using custom tags, so it's easy to switch between
versions. Simply set `IMAGE_TAG` to something other than `latest`:

> export IMAGE_TAG=bugfix1
> docker-compose up --build

Maybe you found a bug in Galaxy itself and you want to test it now. For this,
you can set the `GALAXY_REPO` and `GALAXY_RELEASE` build arguments to your
own fork and branch.

> docker build galaxy-server -t quay.io/bgruening/galaxy-server:$IMAGE_TAG --build-arg GALAXY_REPO=https://github.com/YOUR-USERNAME/galaxy --build-arg GALAXY_RELEASE=my_custom_branch

Some containers use base-images that share some common dependencies (like
Docker that is not only used for Galaxy, but also Pulsar, HTCondor, or Slurm).
After re-building these images yourself, you may also need to add
`--build-arg IMAGE_TAG=your_base_image_tag` and `SETUP_REPO` if your
base-images are tagged differently or are stored in a different repository.

### Extend the Galaxy-Configurator
It is possible to extend the usage of the configurator, both in extending the
Jinja2 templates, but also in adding additional files.

All environment variables of the `galaxy-configurator` are accessible
within the templates. Additionally,
the configurator parses specific `*_CONFIG_*`
variables and makes them accessible as a dict (for example `galaxy` or
`gravity`). It may be helpful to understand the current use cases
within the templates and how the `customize.py` file (actually just an
extension of the [J2cli](https://github.com/kolypto/j2cli) parses env
variables.

To add more template files, have a look into the `run.sh` file. For example
adding a configuration file for Galaxy is as simple as adding an entry
into the `galaxy_configs` array.

### Adding additional containers or configurations
So you want to extend the setup to - for example - support a new
Workload Manager for Galaxy? Or you have a specific configuration
of Galaxy in mind that goes out of the scope of the basic
`docker-compose.yml` file? Aweseome!
Let's have a look at two examples for how you can create a custom
extension:
**HTCondor**:
The `docker-compose.htcondor.yml` file is a good example of what
the idea of extensions are in the context of this setup.
The HTCondor "cluster" is based on a single image (`galaxy-htcondor`)
and, depending on the containers purpose, it gets exposed to
different volumes. As Galaxy needs some addition files, one volume
is added to its container. The `galaxy-configurator` part
overwrites a single
environment variable and sets a new one. The neat thing of this
approach is that if you don't need
to run HTCondor, the base setup will work just fine without
much additional ballast. However, adding HTCondor isn't a hassle
either.

**Singularity**
Changing a bunch of variables all the time, just to be able to switch
between different setups can become a hassle quickly. The
`docker-compose.singularity.yml` file is a good a example of how you
can avoid that. In normal cases, Galaxy should run jobs in the
shell directly, changing that to Singularity requires some
different settings. The file is a good example in how you can
quickly overwrite settings and be able to reuse it for different
occasions (remember that by concatenating this file behind
HTCondor, Slurm, or Pulsar enables Singularity the same way). Another
example would be to create a custom `docker-compose.debug.yml` file
that could be used to enable some debug flags or
setting `GALAXY_CONFIG_CLEANUP_JOB=never`.

### Running the CI pipeline on your own fork
The GitHub Actions workflow used to build, test and deploy this setup
is independent of any specific username or Docker Registry. To run
the workflow on your fork, simply
[set the following secrets](https://help.github.com/en/actions/configuring-and-managing-workflows/creating-and-storing-encrypted-secrets):
* `docker_registry`: The Registry the images should be pushed
to (`docker.io`, for example)
* `docker_registry_username`: Your username
* `docker_registry_password`: Your password


## Troubleshooting
### Killing while first start up
If you kill (CTRL-C) Docker Compose while Galaxy is performing the first
startup, you may come into the situation where not all files have been properly
exported. As the exporting is only done for the first start, this can result in
missing dependencies. In this case it is good to remove the whole
`export`-folder (or at least Galaxy related files - the `postgres` folder can
stay, if wanted).

### Resetting the setup
To start from the beginning, you of course need to delete the `export`-folder.
But remember to also do a `docker-compose -f <COMPOSE-FILES..> down`, as this
will shut down and remove all containers. If you forget this, while still
deleting the `export`-folder, the Galaxy container may have problems with
exporting all necessary files, as they are usually deleted within the container
after the first proper startup.

## Testing
The setup provides a bunch of different integration tests to run against Galaxy.
Have a look inside the `tests` folder. There you find the containers that run
the tests and their docker-compose files. The containers are essentially just
a wrapper around the test tools to simplify using them. Running a tests
is the same as extending
any other part of the setup: Just concatenate the test file at the end.
To run, for example, some Planemo Worklow tests against a Galaxy installation that
is connected to a HTCondor cluster using Singularity, just enter:
`docker-compose -f docker-compose.yml -f docker-compose.htcondor.yml
-f docker-compose.singularity.yml -f tests/docker-compose.test.yml
-f tests/docker-compose.test.workflows.yml up`. To stop the setup when a test
has finished, you may want to add the option `--exit-code-from galaxy-workflow-test`.
This returns the exit code of the test container (should be 0 if successful),
which you could use for further automation.

The tests are run using GitHub Actions on every commit. So feel free to inspect
the `.github/workflows/compose.yml` file for more test cases and get inspired
by them :)

### Planemo workflow tests
Like the name suggests, this runs [Planemo](https://planemo.readthedocs.io/en/latest/)
workflow tests. The container uses the tests from [UseGalaxy.eu](https://github.com/usegalaxy-eu/workflow-testing),
but you can mount any test you could think of inside the container at the `/src` path.
By default, it will run some select workflows, but you can choose your own
by setting the `WORKFLOWS` env variable to a comma separated list of paths to some tests
(e.g. `WORKFLOWS=test1/test1.ga,test2/test2.ga docker-compose ...`).

### Selenium tests
The Selenium tests simulate a real user that is accessing Galaxy through the
browser to perform some actions. For that it uses a headless Chrome to runs the
tests from the [Galaxy repo](https://github.com/galaxyproject/galaxy/tree/dev/lib/galaxy_test/selenium).
The GitHub Actions currently just run a few of those. To select more tests,
set the env variable `TESTS` to a comma separated list (like `TESTS=navigates_galaxy.py,login.py`).
Note that you don't need to append the `test_` prefix for every
single file!

### BioBlend tests
BioBlend has some tests that run against Galaxy. We are using some of them to test
our setup too. Have a look into the `run.sh` file of the container to see
which tests we have excluded (at least for now).


## Configuration reference
Tool specific configuration can be applied via `base_config.yml` or the following
environment variables:
* `GALAXY_CONFIG_`
* `GRAVITY_CONFIG_`
* `NGINX_CONFIG_`
* `PULSAR_CONFIG_`
* `HTCONDOR_MASTER_CONFIG_`
* `HTCONDOR_EXECUTOR_CONFIG_`
* `HTCONDOR_GALAXY_CONFIG`
* `SLURM_CONFIG_`

The following are settings specific to this docker-compose setup:
### Galaxy
| Variable                  | Description                                                                                                        |
|---------------------------|--------------------------------------------------------------------------------------------------------------------|
| `GALAXY_OVERWRITE_CONFIG` | Enable Galaxy-configurator, which may result in overwriting manual config changes done in `EXPORT_DIR/galaxy/config`.                                                                                                        |
| `GALAXY_PROXY_PREFIX`     | Host Galaxy under a prefix (like example.com/galaxy). Note that you also need to update `GALAXY_CONFIG_INFRASTRUCTURE_URL` accordingly.                                                                                      |
| `GALAXY_JOB_DESTINATION`  | The name of the preferred job destination (local, condor, slurm, singularity..) defined in `job_conf.xml`. Generally, this does not need to be changed, as the docker-compose extensions are already taking care of that. |
| `GALAXY_JOB_RUNNER`       | The job runner Galaxy will use to process jobs. Can be `local`, `condor`, `slurm`, `pular_rest` or `pulsar_mq`, or `k8s`. |
| `GALAXY_DEPENDENCY_RESOLUTION ` | Determines how Galaxy should resolve dependencies. You can choose between Conda (`conda`) or running them inside a Singularity container (`singularity`).|
| `GALAXY_PULSAR_URL`       | The URL Galaxy will communicate with Pulsar, when choosing the `pulsar_rest` job runner. |
| `GALAXY_JOB_METRICS_*`    | Enable the corresponding job metrics. Can be `CORE`, `CPUINFO` (`true` or `verbose`), `MEMINFO`, `UNAME`, and `ENV`, also see [job_metrics.xml.sample](https://github.com/galaxyproject/galaxy/blob/dev/lib/galaxy/config/sample/job_metrics_conf.xml.sample) for reference.

### Nginx
| Variable                  | Description                                                                                                        |
|---------------------------|--------------------------------------------------------------------------------------------------------------------|
| `NGINX_OVERWRITE_CONFIG`  | Also see `GALAXY_OVERWRITE_CONFIG`. |
| `NGINX_PROXY_READ_TIMEOUT` | Determines how long Nginx will wait (in seconds) for Galaxy to respond to a request until it times out. Defaults to 180 seconds. |

### Pulsar
| Variable                  | Description                                                                                                        |
|---------------------------|--------------------------------------------------------------------------------------------------------------------|
| `PULSAR_OVERWRITE_CONFIG` | Also see `GALAXY_OVERWRITE_CONFIG`. |
| `PULSAR_JOB_RUNNER`       | The job runner Pulsar will use to process jobs. Currently, only `local` is supported, but this will be extended to HTCondor and Slurm in the future. |
| `PULSAR_NUM_CONCURRENT_JOBS ` | The number of jobs Pulsar will run concurrently. Defaults to 1. |
| `PULSAR_GALAXY_URL`       | The URL Pulsar will use to send results back to Galaxy. Defaults to `http://nginx:80`. |
| `PULSAR_HOSTNAME`         | The hostname Pulsar will listen to for requests. Defaults to `pulsar`. |
| `PULSAR_PORT`             | The port Pulsar will listen to for requests. Defaults to 8913. |
| `PULSAR_LOG_LEVEL`        | The log level (like `DEBUG` or `INFO`) of Pulsar. Defaults to `INFO`. |

### Kind (Kubernetes in Docker)
| Variable                  | Description                                                                                                        |
|---------------------------|--------------------------------------------------------------------------------------------------------------------|
| `KIND_OVERWRITE_CONFIG` | Also see `GALAXY_OVERWRITE_CONFIG`. |
| `KIND_NODE_COUNT`       | The number of Kubernetes nodes kind should start. Defaults to 1. |
| `KIND_PV_STORAGE_SIZE`  | The size limit (in Gi) of a Kubernetes Persistent Volume. Defaults to 100.  |
| `GALAXY_KUBECONFIG`     | The path to the KUBECONFIG that Galaxy will use to connect to Kubernetes. Defaults to the one created with galaxy-kind. |
| `GALAXY_K8S_PVC`        | The PVCs a job pod should mount. Defaults to `galaxy-root:/galaxy,galaxy-database:/galaxy/database,galaxy-tool-deps:/tool_deps`. |
| `GALAXY_K8S_DOCKER_REPO_DEFAULT` | The Docker Repo/Registry to use if the resolver could not resolve the proper image for a job. Defaults to `docker.io`. |
| `GALAXY_K8S_DOCKER_OWNER_DEFAULT` | The Owner/Username to use if the resolver could not resolve the proper image for a job. Is not set by default. |
| `GALAXY_K8S_DOCKER_IMAGE_DEFAULT` | The Image to use if the resolver could not resolve the proper image for a job. Defaults to `ubuntu`. |
| `GALAXY_K8S_DOCKER_TAG_DEFAULT` | The Image Tag to use if the resolver could not resolve the proper image for a job. Defaults to `22.04`. |

### HTCondor
| Variable                    | Description                                                                                                        |
|-----------------------------|--------------------------------------------------------------------------------------------------------------------|
| `HTCONDOR_OVERWRITE_CONFIG` | Also see `GALAXY_OVERWRITE_CONFIG`. |

### Slurm
| Variable                  | Description                                                                                                        |
|---------------------------|--------------------------------------------------------------------------------------------------------------------|
| `SLURM_OVERWRITE_CONFIG`  | Also see `GALAXY_OVERWRITE_CONFIG`. |
| `SLURM_NODE_COUNT`        | The number of Slurm nodes running. This needs to be changed when scaling the setup (eg. `docker-compose up --scale slurm_node=n`) to let the Slurm controller know of all available nodes. |
| `SLURM_NODE_CPUS`         | Number of CPUs per node. Defaults to 1. |
| `SLURM_NODE_MEMORY`       | Amount of memory per node. Defaults to 1024. |
| `SLURM_NODE_HOSTNAME`     | Docker Compose adds a prefix in front of the container names by default. Change this value to the name of your setup and `_slurm_node` (e.g. `compose_slurm_node`) to ensure a correct mapping of the Slurm nodes. |

### Github Workflow Tests (Branch 24.1)
| Setup                  | bioblend           | workflow ard       | workflow quality_control | workflow wf3-shed-tools (example1) | selenium           |
|------------------------|--------------------|--------------------|--------------------------|------------------------------------|--------------------|
| Galaxy Base            | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark:       | :heavy_check_mark:                 | :heavy_check_mark: |
| Galaxy Proxy Prefix    | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark:       | :heavy_check_mark:                 | :x:                |
| HTCondor               | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark:       | :heavy_check_mark:                 | :heavy_check_mark: |
| Slurm                  | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark:       | :heavy_check_mark:                 | :heavy_check_mark: |
| Pulsar-REST            | :heavy_check_mark: | :heavy_check_mark: | :x:                      | :heavy_check_mark:                 | :heavy_check_mark: |
| Pulsar-MQ              | :heavy_check_mark: | :heavy_check_mark: | :x:                      | :heavy_check_mark:                 | :heavy_check_mark: |
| k8s                    | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark:       | :heavy_check_mark:                 | :heavy_check_mark: |
| Singularity            | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark:       | :heavy_check_mark:                 | :heavy_check_mark: |
| Pulsar-MQ + Singularity| :heavy_check_mark: | :heavy_check_mark: | :x:                      | :heavy_check_mark:                 | :heavy_check_mark: |
| Slurm + Singularity    | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark:       | :heavy_check_mark:                 | :heavy_check_mark: |
| HTCondor + Singularity | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark:       | :heavy_check_mark:                 | :heavy_check_mark: |


Implemented: :heavy_check_mark:   
Not Implemented: :x:



================================================
FILE: compose/base-images/galaxy-cluster-base/Dockerfile
================================================
ARG DOCKER_REGISTRY=quay.io
ARG DOCKER_REGISTRY_USERNAME=bgruening
ARG IMAGE_TAG=latest

FROM $DOCKER_REGISTRY/$DOCKER_REGISTRY_USERNAME/galaxy-container-base:$IMAGE_TAG

# Base dependencies
RUN apt update && apt install --no-install-recommends gnupg2 curl -y \
    && /usr/bin/common_cleanup.sh

# Install HTCondor
ENV DEBIAN_FRONTEND=noninteractive
RUN curl -fsSL https://research.cs.wisc.edu/htcondor/repo/keys/HTCondor-current-Key | apt-key add - \
    && echo "deb https://research.cs.wisc.edu/htcondor/repo/ubuntu/current jammy main" >> /etc/apt/sources.list \
    && apt update && apt install --no-install-recommends htcondor -y \
    && rm -f /etc/condor/condor_config.local \
    && /usr/bin/common_cleanup.sh

# Install Slurm client
ENV MUNGE_USER=munge \
    MUNGE_UID=1200 \
    MUNGE_GID=1200
RUN groupadd -r $MUNGE_USER -g $MUNGE_GID \
    && useradd -u $MUNGE_UID -r -g $MUNGE_USER $MUNGE_USER \
    && echo "deb http://ppa.launchpad.net/natefoo/slurm-drmaa/ubuntu jammy main" >> /etc/apt/sources.list \
    && echo "deb-src http://ppa.launchpad.net/natefoo/slurm-drmaa/ubuntu jammy main" >> /etc/apt/sources.list \
    && apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 8DE68488997C5C6BA19021136F2CC56412788738 \
    && apt update \
    && apt install --no-install-recommends python3-distutils slurm-client slurmd slurmctld slurm-drmaa1 -y \
    && apt --no-install-recommends install munge libmunge-dev -y \
    && ln -s /usr/lib/slurm-drmaa/lib/libdrmaa.so.1 /usr/lib/slurm-drmaa/lib/libdrmaa.so \
    && /usr/bin/common_cleanup.sh

# Install CVMFS
RUN apt update \
    && apt install wget lsb-release -y \
    && wget https://ecsft.cern.ch/dist/cvmfs/cvmfs-release/cvmfs-release-latest_all.deb \
    && dpkg -i cvmfs-release-latest_all.deb \
    && rm -f cvmfs-release-latest_all.deb \
    && apt update \
    && apt install --no-install-recommends cvmfs -y \
    && mkdir /srv/cvmfs \
    && /usr/bin/common_cleanup.sh
COPY files/cvmfs /etc/cvmfs


================================================
FILE: compose/base-images/galaxy-cluster-base/files/common_cleanup.sh
================================================
#!/bin/sh

set -x

# This usually drastically reduced the container size
# at the cost of the startup time of your application
find / -name '*.pyc' -delete

find / -name '*.log' -delete
find / -name '.cache' -delete
rm -rf /var/lib/apt/lists/*
rm -rf /var/cache/*

# https://askubuntu.com/questions/266738/how-to-truncate-all-logfiles
truncate -s 0 /var/log/*log || true
truncate -s 0 /var/log/**/*log || true


================================================
FILE: compose/base-images/galaxy-cluster-base/files/cvmfs/default.local
================================================
CVMFS_REPOSITORIES="data.galaxyproject.org,singularity.galaxyproject.org"
CVMFS_HTTP_PROXY="DIRECT"
CVMFS_QUOTA_LIMIT="4000"
CVMFS_CACHE_BASE="/srv/cvmfs/cache"


================================================
FILE: compose/base-images/galaxy-cluster-base/files/cvmfs/domain.d/galaxyproject.org.conf
================================================
CVMFS_SERVER_URL="http://cvmfs1-psu0.galaxyproject.org/cvmfs/@fqrn@;http://cvmfs1-iu0.galaxyproject.org/cvmfs/@fqrn@;http://cvmfs1-tacc0.galaxyproject.org/cvmfs/@fqrn@;http://cvmfs1-mel0.gvl.org.au/cvmfs/@fqrn@;http://cvmfs1-ufr0.galaxyproject.eu/cvmfs/@fqrn@"
CVMFS_KEYS_DIR=/etc/cvmfs/keys/galaxyproject.org
CVMFS_USE_GEOAPI="yes"


================================================
FILE: compose/base-images/galaxy-cluster-base/files/cvmfs/keys/galaxyproject.org/data.galaxyproject.org.pub
================================================
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5LHQuKWzcX5iBbCGsXGt
6CRi9+a9cKZG4UlX/lJukEJ+3dSxVDWJs88PSdLk+E25494oU56hB8YeVq+W8AQE
3LWx2K2ruRjEAI2o8sRgs/IbafjZ7cBuERzqj3Tn5qUIBFoKUMWMSIiWTQe2Sfnj
GzfDoswr5TTk7aH/FIXUjLnLGGCOzPtUC244IhHARzu86bWYxQJUw0/kZl5wVGcH
maSgr39h1xPst0Vx1keJ95AH0wqxPbCcyBGtF1L6HQlLidmoIDqcCQpLsGJJEoOs
NVNhhcb66OJHah5ppI1N3cZehdaKyr1XcF9eedwLFTvuiwTn6qMmttT/tHX7rcxT
owIDAQAB
-----END PUBLIC KEY-----

================================================
FILE: compose/base-images/galaxy-cluster-base/files/cvmfs/keys/galaxyproject.org/singularity.galaxyproject.org.pub
================================================
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5LHQuKWzcX5iBbCGsXGt
6CRi9+a9cKZG4UlX/lJukEJ+3dSxVDWJs88PSdLk+E25494oU56hB8YeVq+W8AQE
3LWx2K2ruRjEAI2o8sRgs/IbafjZ7cBuERzqj3Tn5qUIBFoKUMWMSIiWTQe2Sfnj
GzfDoswr5TTk7aH/FIXUjLnLGGCOzPtUC244IhHARzu86bWYxQJUw0/kZl5wVGcH
maSgr39h1xPst0Vx1keJ95AH0wqxPbCcyBGtF1L6HQlLidmoIDqcCQpLsGJJEoOs
NVNhhcb66OJHah5ppI1N3cZehdaKyr1XcF9eedwLFTvuiwTn6qMmttT/tHX7rcxT
owIDAQAB
-----END PUBLIC KEY-----

================================================
FILE: compose/base-images/galaxy-container-base/Dockerfile
================================================
FROM buildpack-deps:22.04 as build_apptainer

COPY ./files/common_cleanup.sh /usr/bin/common_cleanup.sh

# Install Go (only needed for building apptainer)
ENV GO_VERSION=1.22.7
RUN apt update && apt install --no-install-recommends cryptsetup-bin uuid-dev libseccomp-dev libfuse-dev libfuse3-dev -y \
    && wget https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz \
    && tar -C /usr/local -xzvf go${GO_VERSION}.linux-amd64.tar.gz \
    && rm go${GO_VERSION}.linux-amd64.tar.gz \
    && /usr/bin/common_cleanup.sh

ENV PATH=/usr/local/go/bin:${PATH}
ENV APPTAINER_VERSION=1.3.4
RUN wget https://github.com/apptainer/apptainer/releases/download/v${APPTAINER_VERSION}/apptainer-${APPTAINER_VERSION}.tar.gz \
    && mkdir -p apptainer \
    && tar -xzf apptainer-${APPTAINER_VERSION}.tar.gz --strip-components=1 -C apptainer \
    && cd apptainer \
    && ./mconfig --with-suid \
    && make -C builddir \
    && /usr/bin/common_cleanup.sh


# --- Final image ---
FROM ubuntu:22.04 as final

COPY ./files/common_cleanup.sh /usr/bin/common_cleanup.sh

# Base dependencies
RUN apt update && apt install --no-install-recommends ca-certificates python3-distutils squashfs-tools tzdata -y \
    && /usr/bin/common_cleanup.sh

# Install Docker
RUN apt update \
    && apt install --no-install-recommends docker.io -y \
    && /usr/bin/common_cleanup.sh

# Install Apptainer
COPY --from=build_apptainer /apptainer /apptainer
RUN apt update && apt install --no-install-recommends make -y \
    && make -C /apptainer/builddir install \
    && apt remove make -y \
    && rm -rf /apptainer \
    && sed -e '/bind path = \/etc\/localtime/s/^/#/g' -i /usr/local/etc/apptainer/apptainer.conf \
    && /usr/bin/common_cleanup.sh


================================================
FILE: compose/base-images/galaxy-container-base/files/common_cleanup.sh
================================================
#!/bin/sh

set -x

# This usually drastically reduced the container size
# at the cost of the startup time of your application
find / -name '*.pyc' -delete

find / -name '*.log' -delete
find / -name '.cache' -delete
rm -rf /var/lib/apt/lists/*
rm -rf /var/cache/*

# https://askubuntu.com/questions/266738/how-to-truncate-all-logfiles
truncate -s 0 /var/log/*log || true
truncate -s 0 /var/log/**/*log || true


================================================
FILE: compose/base_config.yml
================================================
gravity:
  process_manager: supervisor
  galaxy_root: /galaxy
  virtualenv: /galaxy/.venv
  gunicorn:
    enable: True
    bind: 0.0.0.0:5555
    workers: 2
  celery:
    enable: true
    enable_beat: true
    concurrency: 2
  handlers:
    handler:
      processes: 2
      pools:
        - job-handlers
        - workflow-schedulers

galaxy:
  tool_dependency_dir: /tool_deps
  tool_data_table_config_path: /cvmfs/data.galaxyproject.org/byhand/location/tool_data_table_conf.xml,/cvmfs/data.galaxyproject.org/managed/location/tool_data_table_conf.xml
  tus_upload_store: /tus_upload_store
  enable_celery_tasks: true
  celery_conf:
    result_backend: redis://redis:6379/0

pulsar:
  conda_auto_init: True
  conda_auto_install: True
  tool_dependency_dir: dependencies
  dependency_resolution:
    resolvers:
      - type: conda
        auto_init: true
        auto_install: true
      - type: conda
        versionless: true

# Probably needs more polishing, but at least it works..
slurm:
  SlurmctldHost: "slurmctld"
  AuthType: "auth/munge"
  CryptoType: "crypto/munge"
  MpiDefault: "none"
  ProctrackType: "proctrack/pgid"
  ReturnToService: "1"
  SlurmctldPidFile: "/var/run/slurmctld.pid"
  SlurmctldPort: "6817"
  SlurmdPidFile: "/var/run/slurmd.pid"
  SlurmdPort: "6818"
  SlurmdSpoolDir: "/tmp/slurmd"
  SlurmUser: "slurm"
  StateSaveLocation: "/tmp/slurm"
  SwitchType: "switch/none"
  TaskPlugin: "task/none"
  InactiveLimit: "0"
  KillWait: "30"
  MinJobAge: "300"
  SlurmctldTimeout: "120"
  SlurmdTimeout: "300"
  Waittime: "0"
  SchedulerType: "sched/backfill"
  SelectType: "select/cons_res"
  SelectTypeParameters: "CR_Core_Memory"
  AccountingStorageType: "accounting_storage/none"
  AccountingStoreFlags: "job_comment"
  ClusterName: "Cluster"
  JobCompType: "jobcomp/none"
  JobAcctGatherFrequency: "30"
  JobAcctGatherType: "jobacct_gather/none"
  SlurmctldDebug: info
  SlurmdDebug: info

htcondor_galaxy:
  CONDOR_HOST: "htcondor-master"
  ALLOW_ADMINISTRATOR: "*"
  ALLOW_OWNER: "*"
  ALLOW_READ: "*"
  ALLOW_WRITE: "*"
  ALLOW_CLIENT: "*"
  ALLOW_DAEMON: "*"
  ALLOW_NEGOTIATOR: "*"
  DAEMON_LIST: "MASTER, SCHEDD"
  UID_DOMAIN: "galaxy"
  DISCARD_SESSION_KEYRING_ON_STARTUP: "False"
  TRUST_UID_DOMAIN: "true"
  SEC_PASSWORD_FILE: "/var/lib/condor/pool_password"
  SEC_DAEMON_AUTHENTICATION: "REQUIRED"
  SEC_DAEMON_INTEGRITY: "REQUIRED"
  SEC_DAEMON_AUTHENTICATION_METHODS: "PASSWORD"
  SEC_NEGOTIATOR_AUTHENTICATION: "REQUIRED"
  SEC_NEGOTIATOR_INTEGRITY: "REQUIRED"
  SEC_NEGOTIATOR_AUTHENTICATION_METHODS: "PASSWORD"
  SEC_CLIENT_AUTHENTICATION_METHODS: "FS, PASSWORD"

htcondor_master:
  BASE_CGROUP: ""
  CONDOR_HOST: "$(FULL_HOSTNAME)"
  DAEMON_LIST: "MASTER, COLLECTOR, NEGOTIATOR, SCHEDD"
  DISCARD_SESSION_KEYRING_ON_STARTUP: "False"
  TRUST_UID_DOMAIN: "True"
  ALLOW_ADMINISTRATOR: "*"
  ALLOW_OWNER: "*"
  ALLOW_READ: "*"
  ALLOW_WRITE: "*"
  ALLOW_NEGOTIATOR: "*"
  ALLOW_NEGOTIATOR_SCHEDD: "*"
  ALLOW_WRITE_COLLECTOR: "*"
  ALLOW_WRITE_STARTD: "*"
  ALLOW_READ_COLLECTOR: "*"
  ALLOW_READ_STARTD: "*"
  ALLOW_CLIENT: "*"
  ALLOW_DAEMON: "*"
  DOCKER_IMAGE_CACHE_SIZE: "20"
  UID_DOMAIN: "galaxy"
  TRUST_UID_DOMAIN: "TRUE"
  SEC_PASSWORD_FILE: "/var/lib/condor/pool_password"
  SEC_DAEMON_AUTHENTICATION: "REQUIRED"
  SEC_DAEMON_INTEGRITY: "REQUIRED"
  SEC_DAEMON_AUTHENTICATION_METHODS: "PASSWORD"
  SEC_NEGOTIATOR_AUTHENTICATION: "REQUIRED"
  SEC_NEGOTIATOR_INTEGRITY: "REQUIRED"
  SEC_NEGOTIATOR_AUTHENTICATION_METHODS: "PASSWORD"
  SEC_CLIENT_AUTHENTICATION_METHODS: "FS, PASSWORD"

htcondor_executor:
  CONDOR_HOST: "htcondor-master"
  DAEMON_LIST: "MASTER, STARTD"
  DISCARD_SESSION_KEYRING_ON_STARTUP: "False"
  TRUST_UID_DOMAIN: "true"
  NUM_SLOTS: "1"
  NUM_SLOTS_TYPE_1: "1"
  BASE_CGROUP: ""
  ALLOW_ADMINISTRATOR: "*"
  ALLOW_OWNER: "*"
  ALLOW_READ: "*"
  ALLOW_WRITE: "*"
  ALLOW_CLIENT: "*"
  ALLOW_DAEMON: "*"
  ALLOW_NEGOTIATOR_SCHEDD: "*"
  ALLOW_WRITE_COLLECTOR: "*"
  ALLOW_WRITE_STARTD: "*"
  ALLOW_READ_COLLECTOR: "*"
  ALLOW_READ_STARTD: "*"
  UID_DOMAIN: "galaxy"
  SCHED_NAME: "htcondor-master"
  SEC_PASSWORD_FILE: "/var/lib/condor/pool_password"
  SEC_DAEMON_AUTHENTICATION: "REQUIRED"
  SEC_DAEMON_INTEGRITY: "REQUIRED"
  SEC_DAEMON_AUTHENTICATION_METHODS: "PASSWORD"
  SEC_NEGOTIATOR_AUTHENTICATION: "REQUIRED"
  SEC_NEGOTIATOR_INTEGRITY: "REQUIRED"
  SEC_NEGOTIATOR_AUTHENTICATION_METHODS: "PASSWORD"
  SEC_CLIENT_AUTHENTICATION_METHODS: "FS, PASSWORD"


================================================
FILE: compose/docker-compose.htcondor.yml
================================================
# Extend Galaxy to run jobs using HTCondor.
# Example: `docker-compose -f docker-compose.yml -f docker-compose.htcondor.yml up`
services:
  galaxy-configurator:
    environment:
      - GALAXY_JOB_RUNNER=condor
      - HTCONDOR_OVERWRITE_CONFIG=true
    volumes:
      - ${EXPORT_DIR:-./export}/htcondor:/htcondor
  htcondor-master:
    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-htcondor:${IMAGE_TAG:-latest}
    build: galaxy-htcondor
    hostname: htcondor-master
    environment:
      - HTCONDOR_TYPE=master
      - HTCONDOR_POOL_PASSWORD=123456789changeme
    volumes:
      - ${EXPORT_DIR:-./export}/htcondor:/config
    networks:
      - galaxy
  htcondor-executor:
    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-htcondor:${IMAGE_TAG:-latest}
    build: galaxy-htcondor
    privileged: true
    environment:
      - HTCONDOR_TYPE=executor
      - CONDOR_HOST=htcondor-master
      - HTCONDOR_POOL_PASSWORD=123456789changeme
    volumes:
      - ${EXPORT_DIR:-./export}/htcondor:/config
      - ${EXPORT_DIR:-./export}/galaxy/database:/galaxy/database
      - ${EXPORT_DIR:-./export}/galaxy/lib/galaxy/tools:/galaxy/lib/galaxy/tools:ro
      - ${EXPORT_DIR:-./export}/galaxy/tools:/galaxy/tools:ro
      - ${EXPORT_DIR:-./export}/galaxy/tool-data:/galaxy/tool-data
      - ${EXPORT_DIR:-./export}/galaxy/.venv:/galaxy/.venv
      - ${EXPORT_DIR:-./export}/tool_deps:/tool_deps
      - /var/run/docker.sock:/var/run/docker.sock
    networks:
      - galaxy
  galaxy-server:
    volumes:
      - ${EXPORT_DIR:-./export}/htcondor:/htcondor_config


================================================
FILE: compose/docker-compose.k8s.yml
================================================
# Extend Galaxy to run jobs on Kubernetes.
# This will set up Kubernetes using kind (https://kind.sigs.k8s.io).
# Note that this extension is not compatible with others like Pulsar, HTCondor, Singularity, etc.
# Example: `docker-compose -f docker-compose.yml -f docker-compose.k8s.yml up`
services:
  galaxy-configurator:
    environment:
      - KIND_OVERWRITE_CONFIG=true
      - GALAXY_JOB_RUNNER=k8s
      - GALAXY_KUBECONFIG=/kind/.kube/config_in_docker
    volumes:
      - ${EXPORT_DIR:-./export}/kind:/kind
  galaxy-server:
    volumes:
      - ${EXPORT_DIR:-./export}/kind:/kind
    networks:
      - kind
  galaxy-kind:
    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-kind:${IMAGE_TAG:-latest}
    build: galaxy-kind
    privileged: true
    volumes:
      - ${EXPORT_DIR:-./export}/kind:/kind
      - /var/run/docker.sock:/var/run/docker.sock
    networks:
      - galaxy
      - kind
networks:
  kind:
    name: kind


================================================
FILE: compose/docker-compose.pulsar.mq.yml
================================================
# Extend Pulsar to use RabbitMQ (Message Queue) instead of the REST API
# for communicating with Galaxy.
# Requirements: `docker-compose.pulsar.yml`
# Example: `docker-compose -f docker-compose.yml -f docker-compose.pulsar.yml -f docker-compose.pulsar.mq.yml up`
services:
  galaxy-configurator:
    environment:
      - GALAXY_JOB_RUNNER=pulsar_mq
      - PULSAR_CONFIG_MESSAGE_QUEUE_URL=amqp://pulsar:8jfqi9uo2i30fqoifqfo09@pulsar-rabbitmq/pulsar
      - PULSAR_GALAXY_URL=http://nginx:80
  pulsar-rabbitmq:
    image: rabbitmq:alpine
    container_name: pulsar-rabbitmq
    hostname: pulsar-rabbitmq
    environment:
      - RABBITMQ_DEFAULT_USER=pulsar
      - RABBITMQ_DEFAULT_PASS=8jfqi9uo2i30fqoifqfo09
      - RABBITMQ_DEFAULT_VHOST=pulsar
    volumes:
      - ${EXPORT_DIR:-./export}/pulsar_rabbitmq:/var/lib/rabbitmq:delegated
    networks:
      - galaxy


================================================
FILE: compose/docker-compose.pulsar.yml
================================================
# Extend Galaxy to run jobs using Pulsar. With this setup, you
# don't need to share the `/galaxy/database` path with Galaxy.
# Galaxy will send all the needed files for Pulsar, and Pulsar
# will handle the rest locally on its side.
# This docker-compose file enables for Galaxy and Pulsar to
# communicate over HTTP. To enable the MQ, concatenate the
# docker-compose.pulsar.mq.yml after this one.
# Example: `docker-compose -f docker-compose.yml -f docker-compose.pulsar.yml up`
services:
  galaxy-configurator:
    environment:
      - GALAXY_JOB_RUNNER=pulsar_rest
      - GALAXY_PULSAR_TRANSPORT=${GALAXY_PULSAR_TRANSPORT:-curl}
      - PULSAR_OVERWRITE_CONFIG=true
      - PULSAR_JOB_RUNNER=local
      - PULSAR_CONFIG_PRIVATE_TOKEN=changemeinproduction
      - GALAXY_PULSAR_URL=http://pulsar:8913
    volumes:
      - ${EXPORT_DIR:-./export}/pulsar/config:/pulsar/config
  pulsar:
    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/pulsar:${IMAGE_TAG:-latest}
    build: pulsar
    hostname: pulsar
    privileged: true
    volumes:
      - ${EXPORT_DIR:-./export}/pulsar/config:/pulsar/config
      - ${EXPORT_DIR:-./export}/pulsar/dependencies:/pulsar/dependencies
      - ${EXPORT_DIR:-./export}/galaxy/database:/galaxy/database
      - ${EXPORT_DIR:-./export}/galaxy/tool-data:/galaxy/tool-data
    networks:
      - galaxy


================================================
FILE: compose/docker-compose.singularity.yml
================================================
# Extend Galaxy to use Singularity for dependency resolution.
# This is working with the base Galaxy, but also in combination
# with different job runners, like HTCondor, or Slurm
# (Pulsar is still WIP).
# Examples:
#  * `docker-compose -f docker-compose.yml -f docker-compose.singularity.yml up`
#  * `docker-compose -f docker-compose.yml -f docker-compose.slurm.yml -f docker-compose.singularity.yml up`
services:
  galaxy-configurator:
    environment:
      - GALAXY_DEPENDENCY_RESOLUTION=singularity
      - GALAXY_CONFIG_CONDA_AUTO_INSTALL=false


================================================
FILE: compose/docker-compose.slurm.yml
================================================
# Extend Galaxy to run jobs using Slurm.
# Example: `docker-compose -f docker-compose.yml -f docker-compose.slurm.yml up`
services:
  galaxy-configurator:
    environment:
      - GALAXY_JOB_RUNNER=slurm
      - SLURM_OVERWRITE_CONFIG=true
      - SLURM_NODE_COUNT=${SLURM_NODE_COUNT:-1}
      - SLURM_NODE_HOSTNAME=compose_slurm_node
    volumes:
      - ${EXPORT_DIR:-./export}/slurm_config:/etc/slurm
  galaxy-server:
    volumes:
      - ${EXPORT_DIR:-./export}/munge:/etc/munge
      - ${EXPORT_DIR:-./export}/slurm_config:/etc/slurm
  slurmctld:
    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-slurm:${IMAGE_TAG:-latest}
    build: galaxy-slurm
    command: ["slurmctld"]
    hostname: slurmctld
    volumes:
      - ${EXPORT_DIR:-./export}/slurm_config:/etc/slurm
      - ${EXPORT_DIR:-./export}/munge:/etc/munge
    networks:
      - galaxy
  slurm_node_discovery:
    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-slurm-node-discovery:${IMAGE_TAG:-latest}
    build: galaxy-slurm-node-discovery
    volumes:
      - ${EXPORT_DIR:-./export}/slurm_config:/etc/slurm
      - /var/run/docker.sock:/var/run/docker.sock
  slurm_node:
    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-slurm:${IMAGE_TAG:-latest}
    build: galaxy-slurm
    command: ["slurmd"]
    privileged: true
    labels:
      slurm_node: true
    volumes:
      - ${EXPORT_DIR:-./export}/galaxy/database:/galaxy/database
      - ${EXPORT_DIR:-./export}/galaxy/tools:/galaxy/tools:ro
      - ${EXPORT_DIR:-./export}/galaxy/lib/galaxy/tools:/galaxy/lib/galaxy/tools:ro
      - ${EXPORT_DIR:-./export}/galaxy/tool-data:/galaxy/tool-data
      - ${EXPORT_DIR:-./export}/galaxy/.venv:/galaxy/.venv
      - ${EXPORT_DIR:-./export}/tool_deps:/tool_deps
      - ${EXPORT_DIR:-./export}/slurm_config:/etc/slurm
      - ${EXPORT_DIR:-./export}/munge:/etc/munge
      - /var/run/docker.sock:/var/run/docker.sock
    networks:
      - galaxy


================================================
FILE: compose/docker-compose.yml
================================================
services:
  galaxy-server:
    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-server:${IMAGE_TAG:-latest}
    build: galaxy-server
    environment:
      - GALAXY_DEFAULT_ADMIN_USER=admin
      - GALAXY_DEFAULT_ADMIN_EMAIL=admin@galaxy.org
      - GALAXY_DEFAULT_ADMIN_PASSWORD=password
      - GALAXY_DEFAULT_ADMIN_KEY=fakekey
      - HTCONDOR_POOL_PASSWORD=123456789changeme
    hostname: galaxy-server
    privileged: True
    volumes:
      # This is the directory where all your files from Galaxy will be stored
      # on your host system
      - ${EXPORT_DIR:-./export}/:/export/:delegated
      - ${EXPORT_DIR:-./export}/tus_upload_store:/tus_upload_store:delegated
      - /var/run/docker.sock:/var/run/docker.sock
    depends_on:
      - postgres
      - rabbitmq
      - redis
      - rustus
    networks:
      - galaxy
  # The galaxy-configurator is responsible for the whole configuration of
  # your setup and should be the central place of configuration.
  galaxy-configurator:
    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-configurator:${IMAGE_TAG:-latest}
    build: galaxy-configurator
    environment:
      - EXPORT_DIR=${EXPORT_DIR:-./export}
      - HOST_PWD=$PWD
      - GALAXY_OVERWRITE_CONFIG=true
      - GALAXY_DEPENDENCY_RESOLUTION=conda
      - GALAXY_JOB_RUNNER=local
      - GALAXY_CONFIG_ADMIN_USERS=admin@galaxy.org
      - GALAXY_CONFIG_DATABASE_CONNECTION=postgresql://galaxy:chaopagoosaequuashie@postgres/galaxy
      - GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL:-http://localhost}
      - GALAXY_CONFIG_CONDA_AUTO_INSTALL=true
      - GALAXY_CONFIG_AMQP_INTERNAL_CONNECTION=amqp://galaxy:vaiJa3ieghai2ief0jao@rabbitmq/galaxy
      - GALAXY_PROXY_PREFIX=${GALAXY_PROXY_PREFIX:-}
      - GALAXY_CONFIG_CLEANUP_JOB=onsuccess
      - NGINX_OVERWRITE_CONFIG=true
    volumes:
      - ${EXPORT_DIR:-./export}/galaxy/config:/galaxy/config
      - ${EXPORT_DIR:-./export}/nginx:/etc/nginx
      - ./base_config.yml:/base_config.yml
      - ./galaxy-configurator/templates:/templates
  # The database for Galaxy
  postgres:
    image: postgres:15
    hostname: postgres
    environment:
      - POSTGRES_PASSWORD=chaopagoosaequuashie
      - POSTGRES_USER=galaxy
      - POSTGRES_DB=galaxy
    volumes:
      - ${EXPORT_DIR:-./export}/postgres/:/var/lib/postgresql/data:delegated
    networks:
      - galaxy
  # The proxy server. All web-traffic is going through here, so we can
  # offload static file serving
  # (https://docs.galaxyproject.org/en/master/admin/production.html#using-a-proxy-server)
  nginx:
    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-nginx:${IMAGE_TAG:-latest}
    build: galaxy-nginx
    ports:
      - 80:80
    volumes:
      - ${EXPORT_DIR:-./export}/nginx:/config:ro
      - ${EXPORT_DIR:-./export}/galaxy/static:/export/galaxy/static:ro
      - ${EXPORT_DIR:-./export}/galaxy/config/plugins:/galaxy/config/plugins:ro
    depends_on:
      - galaxy-server
    networks:
      - galaxy
  # Message queue for better performance
  rabbitmq:
    image: rabbitmq:alpine
    container_name: galaxy-rabbitmq
    hostname: rabbitmq
    environment:
      - RABBITMQ_DEFAULT_USER=galaxy
      - RABBITMQ_DEFAULT_PASS=vaiJa3ieghai2ief0jao
      - RABBITMQ_DEFAULT_VHOST=galaxy
    volumes:
      - ${EXPORT_DIR:-./export}/rabbitmq:/var/lib/rabbitmq:delegated
    networks:
      - galaxy
  # Backend for Celery
  redis:
    image: redis:alpine
    container_name: galaxy-redis
    hostname: redis
    volumes:
      - ${EXPORT_DIR:-./export}/redis:/data:delegated
    networks:
      - galaxy
  # For file uploads
  rustus:
    image: s3rius/rustus:0.7.6-alpine
    container_name: galaxy-rustus
    hostname: rustus
    environment:
      - RUSTUS_STORAGE=file-storage
      - RUSTUS_DATA_DIR=/data/
      - RUSTUS_URL=${GALAXY_PROXY_PREFIX:-}/api/upload/resumable_upload
      - RUSTUS_HOOKS_HTTP_URLS=http://nginx${GALAXY_PROXY_PREFIX:-}/api/upload/hooks
      - RUSTUS_HOOKS_HTTP_PROXY_HEADERS=X-Api-Key,Cookie
      - RUSTUS_HOOKS=pre-create
      - RUSTUS_HOOKS_FORMAT=tusd
      - RUSTUS_INFO_STORAGE=redis-info-storage
      - RUSTUS_INFO_DB_DSN=redis://redis:6379/1
      - RUSTUS_MAX_BODY_SIZE=20000000
      - RUSTUS_BEHIND_PROXY=true
    volumes:
      - ${EXPORT_DIR:-./export}/tus_upload_store:/data:delegated
    depends_on:
      - redis
    networks:
      - galaxy
networks:
  galaxy:


================================================
FILE: compose/galaxy-configurator/Dockerfile
================================================
FROM alpine:3.17

RUN apk add --no-cache bash python3 py3-pip \
    && pip3 install j2cli[yaml] jinja2-ansible-filters

COPY ./templates /templates
COPY ./customize.py /customize.py
COPY ./run.sh /usr/bin/run.sh

ENTRYPOINT "/usr/bin/run.sh"


================================================
FILE: compose/galaxy-configurator/customize.py
================================================
import os


def j2_environment_params():
    """ Extra parameters for the Jinja2 Environment
    Add AnsibleCoreFiltersExtension for filters known in Ansible
    like `to_nice_yaml`
    """
    return dict(
        extensions=('jinja2_ansible_filters.AnsibleCoreFiltersExtension',),
    )


def alter_context(context):
    """
    Translates env variables that start with a specific prefix
    and combines them into one dict (like all GALAXY_CONFIG_*
    are stored at galaxy.*).
    Variables that are stored in an input file overwrite
    the input from env.

    TODO: Unit test
    """
    new_context = dict(os.environ)

    translations = {
      "GALAXY_CONFIG_":         "galaxy",
      "GRAVITY_CONFIG_":        "gravity",
      "GALAXY_JOB_METRICS_":    "galaxy_job_metrics",
      "NGINX_CONFIG_":          "nginx",
      "SLURM_CONFIG_":          "slurm",
      "HTCONDOR_GALAXY_":       "htcondor_galaxy",
      "HTCONDOR_MASTER_":       "htcondor_master",
      "HTCONDOR_EXECUTOR_":     "htcondor_executor",
      "PULSAR_CONFIG_":         "pulsar"
    }

    # Add values from possible input file if existent
    if context is not None and len(context) > 0:
        new_context.update(context)

    # Translate string-boolean to Python boolean
    for key, value in new_context.items():
        if not isinstance(value, str):
            continue
        if value.lower() == "true":
            new_context[key] = True
        elif value.lower() == "false":
            new_context[key] = False

    for to in translations.values():
        if to not in new_context:
            new_context[to] = {}

    for key, value in new_context.items():
        for frm, to in translations.items():
            if key.startswith(frm):
                # Format key depending on it being uppercase or not
                # (to cope with different formatings: compare Slurm
                # with Galaxy)
                key = key[len(frm):]
                if key.isupper():
                    key = key.lower()

                new_context[to][key] = value

    context = new_context

    # Set HOST_EXPORT_DIR depending on EXPORT_DIR being absolute or relative
    if "HOST_EXPORT_DIR" not in context and "EXPORT_DIR" in context \
            and "HOST_PWD" in context:
        if context["EXPORT_DIR"].startswith("./"):
            context["HOST_EXPORT_DIR"] = context["HOST_PWD"] \
                                         + context["EXPORT_DIR"][1:]
        else:
            context["HOST_EXPORT_DIR"] = context["EXPORT_DIR"]

    return context


================================================
FILE: compose/galaxy-configurator/run.sh
================================================
#!/bin/bash

# Set default config dirs
export GALAXY_CONF_DIR=${GALAXY_CONF_DIR:-/galaxy/config} \
       NGINX_CONF_DIR=${NGINX_CONF_DIR:-/etc/nginx/} \
       SLURM_CONF_DIR=${SLURM_CONF_DIR:-/etc/slurm} \
       HTCONDOR_CONF_DIR=${HTCONDOR_CONF_DIR:-/htcondor} \
       PULSAR_CONF_DIR=${PULSAR_CONF_DIR:-/pulsar/config} \
       KIND_CONF_DIR=${KIND_CONF_DIR:-/kind}

echo "Locking all configurations"
locks=("$GALAXY_CONF_DIR" "$SLURM_CONF_DIR" "$HTCONDOR_CONF_DIR" "$PULSAR_CONF_DIR" "$KIND_CONF_DIR")
for lock in "${locks[@]}"; do
  echo "Locking $lock"
  touch "${lock}/configurator.lock"
done

# Nginx configuration
if [ "$NGINX_OVERWRITE_CONFIG" != "true" ]; then
  echo "NGINX_OVERWRITE_CONFIG is not true. Skipping configuration of Nginx"
else
  nginx_configs=( "nginx.conf" )

  for conf in "${nginx_configs[@]}"; do
    echo "Configuring $conf"
    j2 --customize /customize.py --undefined -o "/tmp/$conf" "/templates/nginx/$conf.j2" /base_config.yml
    echo "The following changes will be applied to $conf:"
    diff "${NGINX_CONF_DIR}/$conf" "/tmp/$conf"
    mv -f "/tmp/$conf" "${NGINX_CONF_DIR}/$conf"
  done
fi

# Slurm configuration
if [ "$SLURM_OVERWRITE_CONFIG" != "true" ]; then
  echo "SLURM_OVERWRITE_CONFIG is not true. Skipping configuration of Slurm"
else
  slurm_configs=( "slurm.conf" )

  for conf in "${slurm_configs[@]}"; do
    echo "Configuring $conf"
    j2 --customize /customize.py --undefined -o "/tmp/$conf" "/templates/slurm/$conf.j2" /base_config.yml
    echo "The following changes will be applied to $conf:"
    diff "${SLURM_CONF_DIR}/$conf" "/tmp/$conf"
    mv -f "/tmp/$conf" "${SLURM_CONF_DIR}/$conf"
  done

  rm "${SLURM_CONF_DIR}/configurator.lock"
  echo "Lock for Slurm config released"
fi

# HTCondor configuration
if [ "$HTCONDOR_OVERWRITE_CONFIG" != "true" ]; then
  echo "HTCONDOR_OVERWRITE_CONFIG is not true. Skipping configuration of HTCondor"
else
  htcondor_configs=( "galaxy.conf" "master.conf" "executor.conf" )

  for conf in "${htcondor_configs[@]}"; do
    echo "Configuring $conf"
    j2 --customize /customize.py --undefined -o "/tmp/$conf" "/templates/htcondor/$conf.j2" /base_config.yml
    echo "The following changes will be applied to $conf:"
    diff "${HTCONDOR_CONF_DIR}/$conf" "/tmp/$conf"
    mv -f "/tmp/$conf" "${HTCONDOR_CONF_DIR}/$conf"
  done

  rm "${HTCONDOR_CONF_DIR}/configurator.lock"
  echo "Lock for HTCondor config released"
fi

# Pulsar configuration
if [ "$PULSAR_OVERWRITE_CONFIG" != "true" ]; then
  echo "PULSAR_OVERWRITE_CONFIG is not true. Skipping configuration of Pulsar"
else
  pulsar_configs=( "server.ini" "app.yml" )

  for conf in "${pulsar_configs[@]}"; do
    echo "Configuring $conf"
    j2 --customize /customize.py --undefined -o "/tmp/$conf" "/templates/pulsar/$conf.j2" /base_config.yml
    echo "The following changes will be applied to $conf:"
    diff "${PULSAR_CONF_DIR}/$conf" "/tmp/$conf"
    mv -f "/tmp/$conf" "${PULSAR_CONF_DIR}/$conf"
  done

  rm "${PULSAR_CONF_DIR}/configurator.lock"
  echo "Lock for Pulsar config released"
fi

# Kind configuration
if [ "$KIND_OVERWRITE_CONFIG" != "true" ]; then
  echo "KIND_OVERWRITE_CONFIG is not true. Skipping configuration of Kind"
else
  kind_configs=( "kind_config.yml" "k8s_config/persistent_volumes.yml" "k8s_config/pv_claims.yml" )
  mkdir /tmp/k8s_config
  mkdir "${KIND_CONF_DIR}/k8s_config"

  for conf in "${kind_configs[@]}"; do
    echo "Configuring $conf"
    j2 --customize /customize.py --undefined -o "/tmp/$conf" "/templates/kind/$conf.j2" /base_config.yml

    echo "The following changes will be applied to $conf:"
    diff "${KIND_CONF_DIR}/$conf" "/tmp/$conf"
    mv -f "/tmp/$conf" "${KIND_CONF_DIR}/$conf"
  done

  rm "${KIND_CONF_DIR}/configurator.lock"
  echo "Lock for Kind config released"
  sleep 5
  echo "Waiting for Kind to create the cluster"
  until [ -f "${GALAXY_KUBECONFIG:-${KIND_CONF_DIR}/.kube/config_in_docker}" ] && echo Found KUBECONFIG; do
    sleep 0.1;
  done;
  chmod a+r "${GALAXY_KUBECONFIG:-${KIND_CONF_DIR}/.kube/config_in_docker}"
fi

echo "Releasing all locks (except Galaxy) if it didn't happen already"
locks=("$SLURM_CONF_DIR" "$HTCONDOR_CONF_DIR" "$PULSAR_CONF_DIR" "$KIND_CONF_DIR")
for lock in "${locks[@]}"; do
  echo "Unlocking $lock"
  rm "${lock}/configurator.lock"
done

# Galaxy configuration
if [ "$GALAXY_OVERWRITE_CONFIG" != "true" ]; then
  echo "GALAXY_OVERWRITE_CONFIG is not true. Skipping configuration of Galaxy"
  echo "Lock for Galaxy config released"
  rm "${GALAXY_CONF_DIR}/configurator.lock"
  exit 0
fi

cd "${GALAXY_CONF_DIR}" || { echo "Error: Could not find Galaxy config dir"; exit 1; }

echo "Waiting for Galaxy config dir to be initially populated (in case of first startup)"
until [ "$(ls -p | grep -v /)" != "" ] && echo Galaxy config populated; do
  sleep 0.5;
done;

if [ ! -f /base_config.yml ]; then
  echo "Warning: 'base_config.yml' does not exist. Configuration will solely happen through env!"
  touch /base_config.yml
fi

galaxy_configs=( "job_conf.xml" "galaxy.yml" "job_metrics.xml" "container_resolvers_conf.yml" "dependency_resolvers_conf.xml" "GALAXY_PROXY_PREFIX.txt" )

for conf in "${galaxy_configs[@]}"; do
  echo "Configuring $conf"
  j2 --customize /customize.py --undefined -o "/tmp/$conf" "/templates/galaxy/$conf.j2" /base_config.yml
  echo "The following changes will be applied to $conf:"
  diff "${GALAXY_CONF_DIR}/$conf" "/tmp/$conf"
  mv -f "/tmp/$conf" "${GALAXY_CONF_DIR}/$conf"
done

echo "Finished configuring Galaxy"
echo "Lock for Galaxy config released"
rm "${GALAXY_CONF_DIR}/configurator.lock"

if [ "$DONT_EXIT" = "true" ]; then
  echo "Integration test detected. Galaxy Configurator will go to sleep (to not interrupt docker-compose)."
  sleep infinity
fi


================================================
FILE: compose/galaxy-configurator/templates/galaxy/GALAXY_PROXY_PREFIX.txt.j2
================================================
{{ GALAXY_PROXY_PREFIX }}


================================================
FILE: compose/galaxy-configurator/templates/galaxy/container_resolvers_conf.yml.j2
================================================
# Resolvers that are potentially used by default are uncommented (comments describe under 
# which premises they are in the defaults).

# Note that commented yaml does not have a space after the #
# while additional explanations do.

# Explicit container resolvers
# ============================

# get a container description (URI) for an explicit singularity container requirement
- type: explicit_singularity

# get a cached container description (path) for singularity
# pulls the container into a cache directory if not yet there
- type: cached_explicit_singularity
  # set the cache directory for storing images
  #cache_directory: database/container_cache/singularity/explicit

# Mulled container resolvers
# ==========================

# The following uncommented container resolvers are in the defaults
# if ``enable_mulled_containers`` is set in ``galaxy.yml`` (which is the default).

# get a container description for a cached mulled singularity container
# checks if the image file exists in `cache_directory`
- type: cached_mulled_singularity
  #
  #cache_directory: database/container_cache/singularity/mulled
  #
  # the method for caching directory listings (not the method for image caching)
  # can be uncached or dir_mtime (the latter only determines the directory listing
  # if the modification time of the directory changed)
  #cache_directory_cacher_type: uncached

# Resolves container images from quay.io/NAMESPACE/MULLED_HASH where the
# mulled hash describes which packages and versions should be in the container
#
# These resolvers are generally listed after the cached_* resolvers, so that images
# are not pulled if they are already cached.
#
# When pulling the image file will be stored in the configured cache dir.
# If auto_install is True the result will point to the cached image file
# and to quay.io/NAMESPACE/MULLED_HASH otherwise.
- type: mulled_singularity
  auto_install: False
  #namespace: biocontainers
  # In addition to the arguments of `mulled` there are cache_directory
  # and cache_directory_cacher_type. See the description at `cached_explicit_singularity`
  # and note the minor difference in the default for `cache_directory`
  #cache_directory: database/container_cache/singularity/mulled
  #cache_directory_cacher_type: uncached

# Building container resolvers
# ----------------------------
#
# The following uncommented container resolvers are included in the default
# if ``docker`` is available

- type: build_mulled_singularity
  auto_install: False
  #hash_func: v2
  #cache_directory: database/container_cache/singularity/mulled
  #cache_directory_cacher_type: uncached

# Other explicit container resolvers
# ----------------------------------

#-type: fallback_singularity
  #identifier: A_VALID_CONTAINER_IDENTIFIER
#-type: fallback_no_requirements_singularity
  #identifier: A_VALID_CONTAINER_IDENTIFIER
#-type: requires_galaxy_environment_singularity
  #identifier: A_VALID_CONTAINER_IDENTIFIER

# The mapping container resolver allows to specify a list of mappings from tools
# (tool_id) to containers (type and identifier).

#-type: mapping
  #mappings:
  #- container_type: singularity
     #tool_id: A_TOOL_ID
     #identifier: A_VALID_CONTAINER_IDENTIFIER


================================================
FILE: compose/galaxy-configurator/templates/galaxy/dependency_resolvers_conf.xml.j2
================================================
<dependency_resolvers>
  {% if GALAXY_DEPENDENCY_RESOLUTION != 'singularity' %}
  <!-- the default configuration, first look for dependencies installed from the toolshed -->
  <tool_shed_packages />
  <!-- then look for env.sh files in directories according to the "galaxy packages" schema.
       These resolvers can take a base_path attribute to specify where to look for
       package definitions, but by default look in the directory specified by tool_dependency_dir
       in Galaxy's config/galaxy.ini -->
  <galaxy_packages />
  <!-- check whether the correct version has been installed via conda -->
  <conda />
  <!-- look for any version of the dependency installed via conda -->
  <conda versionless="true" />
  <!-- look for a "default" symlink pointing to a directory containing an
       env.sh file for the package in the "galaxy packages" schema -->
  <galaxy_packages versionless="true" />
  {% endif %}
</dependency_resolvers>


================================================
FILE: compose/galaxy-configurator/templates/galaxy/galaxy.yml.j2
================================================
gravity:
{{ gravity | to_nice_yaml(indent=2) | indent(2, first=True) }}

galaxy:
{{ galaxy | to_nice_yaml(indent=2) | indent(2, first=True) }}

  {% if GALAXY_PROXY_PREFIX %}
  galaxy_url_prefix: /{{ GALAXY_PROXY_PREFIX | regex_replace("^/", "") | regex_replace("/$", "") }}
  {% endif %}

  {% if GALAXY_DEPENDENCY_RESOLUTION == 'singularity' %}
  enable_mulled_containers: true
  containers_resolvers_config_file: container_resolvers_conf.yml
  {% endif %}


================================================
FILE: compose/galaxy-configurator/templates/galaxy/job_conf.xml.j2
================================================
<?xml version="1.0"?>
<!-- A sample job config that explicitly configures job running the way it is configured by default (if there is no explicit config). -->
<job_conf>
    <plugins>
        <plugin id="local" type="runner" load="galaxy.jobs.runners.local:LocalJobRunner" workers="4"/>
        <plugin id="condor" type="runner" load="galaxy.jobs.runners.condor:CondorJobRunner"/>
        <plugin id="slurm" type="runner" load="galaxy.jobs.runners.slurm:SlurmJobRunner">
            <param id="drmaa_library_path">/usr/lib/slurm-drmaa/lib/libdrmaa.so</param>
        </plugin>
        <plugin id="pulsar_rest" type="runner" load="galaxy.jobs.runners.pulsar:PulsarRESTJobRunner">
            <param id="transport">{{ GALAXY_PULSAR_TRANSPORT | default('curl') }}</param>
        </plugin>
        {% if GALAXY_JOB_RUNNER == 'pulsar_mq' -%}
        <plugin id="pulsar_mq" type="runner" load="galaxy.jobs.runners.pulsar:PulsarMQJobRunner">
            <param id="galaxy_url">{{ PULSAR_GALAXY_URL }}</param>
            <param id="amqp_url">{{ PULSAR_CONFIG_MESSAGE_QUEUE_URL}}</param>
            <param id="amqp_acknowledge">True</param>
            <param id="amqp_ack_republish_time">30</param>
            <param id="amqp_publish_retry">True</param>
        </plugin>
        {% endif -%}
        {% if GALAXY_JOB_RUNNER == 'k8s' -%}
        <plugin id="k8s" type="runner" load="galaxy.jobs.runners.kubernetes:KubernetesJobRunner">
          <param id="k8s_config_path">{{ GALAXY_KUBECONFIG }}</param>
          <param id="k8s_persistent_volume_claims">{{ GALAXY_K8S_PVC | default('galaxy-root:/galaxy,galaxy-database:/galaxy/database,galaxy-tool-deps:/tool_deps') }}</param>
        </plugin>
        {% endif -%}
    </plugins>
    <handlers assign_with="db-skip-locked" />
    <destinations default="{{ GALAXY_DEPENDENCY_RESOLUTION | default('conda') }}_{{ GALAXY_JOB_RUNNER | default('local') }}">
        <destination id="local" runner="local">
            <env file="/galaxy/.venv/bin/activate" />
        </destination>
        <destination id="{{ GALAXY_DEPENDENCY_RESOLUTION | default('conda') }}_{{ GALAXY_JOB_RUNNER | default('local') }}" runner="{{ GALAXY_JOB_RUNNER | default('local') }}">
            {% if GALAXY_DEPENDENCY_RESOLUTION == 'singularity' -%}
              <env file="/galaxy/.venv/bin/activate" />
              <env id="HOME">/home/galaxy</env>
              <env id="LC_ALL">C</env>
              <env id="APPTAINER_CACHEDIR">/tmp/singularity</env>
              <env id="APPTAINER_TMPDIR">/tmp</env>
              <param id="singularity_enabled">true</param>
              {% if GALAXY_JOB_RUNNER == 'local' -%}
                <param id="singularity_volumes">{{ EXPORT_DIR | regex_replace("^.", "") }}/$galaxy_root:$galaxy_root:ro,{{ EXPORT_DIR | regex_replace("^.", "") }}/$galaxy_root/database/tmp:$galaxy_root/database/tmp:rw,{{ EXPORT_DIR | regex_replace("^.", "") }}/$tool_directory:$tool_directory:ro,{{ EXPORT_DIR | regex_replace("^.", "") }}/$job_directory:$job_directory:rw,{{ EXPORT_DIR | regex_replace("^.", "") }}/$working_directory:$working_directory:rw,{{ EXPORT_DIR | regex_replace("^.", "") }}/$default_file_path:$default_file_path:rw</param>
              {% endif -%}
            {% elif GALAXY_DEPENDENCY_RESOLUTION == 'docker' -%}
              <param id="docker_enabled">true</param>
              <param id="docker_sudo">false</param>
              <param id="docker_set_user"></param>
              {% if GALAXY_JOB_RUNNER == 'local' -%}
                <param id="docker_volumes">{{ HOST_EXPORT_DIR }}/$galaxy_root:$galaxy_root:ro,{{ HOST_EXPORT_DIR }}/$galaxy_root/database/tmp:$galaxy_root/database/tmp:rw,{{ HOST_EXPORT_DIR }}/$tool_directory:$tool_directory:ro,{{ HOST_EXPORT_DIR }}/$job_directory:$job_directory:rw,{{ HOST_EXPORT_DIR }}/$working_directory:$working_directory:rw,{{ HOST_EXPORT_DIR }}/$default_file_path:$default_file_path:rw</param>
              {% endif -%}
            {% elif not GALAXY_JOB_RUNNER.startswith('pulsar') and GALAXY_JOB_RUNNER != 'k8s' -%}
              <env file="/galaxy/.venv/bin/activate" />
            {% endif -%}
            {% if GALAXY_JOB_RUNNER == 'pulsar_rest' -%}
              <param id="url">{{ GALAXY_PULSAR_URL }}</param>
              <param id="private_token">{{ PULSAR_CONFIG_PRIVATE_TOKEN }}</param>
              <param id="dependency_resolution">remote</param>
            {% endif -%}
            {% if GALAXY_JOB_RUNNER == 'pulsar_mq' -%}
              <param id="jobs_directory">{{ PULSAR_JOBS_DIRECTORY | default('/pulsar/files/staging/') }}</param>
            {% endif -%}
            {% if GALAXY_JOB_RUNNER == 'k8s' -%}
              <param id="docker_repo_default">{{ GALAXY_K8S_DOCKER_REPO_DEFAULT | default('docker.io') }}</param>
              {% if GALAXY_K8S_DOCKER_OWNER_DEFAULT -%}<param id="docker_owner_default">{{ GALAXY_K8S_DOCKER_OWNER_DEFAULT }}</param>{% endif -%}
              <param id="docker_image_default">{{ GALAXY_K8S_DOCKER_IMAGE_DEFAULT | default('python') }}</param>
              <param id="docker_tag_default">{{ GALAXY_K8S_DOCKER_TAG_DEFAULT | default('3.10.15') }}</param>
              <param id="docker_enabled">true</param>
            {% endif -%}
        </destination>
    </destinations>
    <tools>
        <tool id="upload1" destination="local" />
        <tool id="__SET_METADATA__" destination="local" />
    </tools>
</job_conf>


================================================
FILE: compose/galaxy-configurator/templates/galaxy/job_metrics.xml.j2
================================================
<?xml version="1.0"?>
<job_metrics>
{% if galaxy_job_metrics.core %}
  <core />
{% endif %}
{% if galaxy_job_metrics.cpuinfo and galaxy_job_metrics.cpuinfo == "verbose" %}
  <cpuinfo verbose="true" />
{% elif galaxy_job_metrics.cpuinfo %}
  <cpuinfo />
{% endif %}
{% if galaxy_job_metrics.meminfo %}
  <meminfo />
{% endif %}
{% if galaxy_job_metrics.uname %}
  <uname />
{% endif %}
{% if galaxy_job_metrics.env %}
  <env />
{% endif %}
</job_metrics>


================================================
FILE: compose/galaxy-configurator/templates/htcondor/executor.conf.j2
================================================
{% for key, value in htcondor_executor.items() -%}
{{ key }}={{ value }}
{% endfor %}


================================================
FILE: compose/galaxy-configurator/templates/htcondor/galaxy.conf.j2
================================================
{% for key, value in htcondor_galaxy.items() -%}
{{ key }}={{ value }}
{% endfor %}


================================================
FILE: compose/galaxy-configurator/templates/htcondor/master.conf.j2
================================================
{% for key, value in htcondor_master.items() -%}
{{ key }}={{ value }}
{% endfor %}


================================================
FILE: compose/galaxy-configurator/templates/kind/k8s_config/persistent_volumes.yml.j2
================================================
kind: PersistentVolume
apiVersion: v1
metadata:
  name: galaxy-root
spec:
  storageClassName: standard
  capacity:
    storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  hostPath:
    path: {{ HOST_EXPORT_DIR }}/galaxy
---
kind: PersistentVolume
apiVersion: v1
metadata:
  name: galaxy-database
spec:
  storageClassName: standard
  capacity:
    storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  hostPath:
    path: {{ HOST_EXPORT_DIR }}/galaxy/database
---
kind: PersistentVolume
apiVersion: v1
metadata:
  name: galaxy-tool-deps
spec:
  storageClassName: standard
  capacity:
    storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  hostPath:
    path: {{ HOST_EXPORT_DIR }}/tool_deps


================================================
FILE: compose/galaxy-configurator/templates/kind/k8s_config/pv_claims.yml.j2
================================================
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: galaxy-root
spec:
  storageClassName: standard
  accessModes:
    - ReadWriteMany
  volumeName: galaxy-root
  resources:
    requests:
      storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: galaxy-database
spec:
  storageClassName: standard
  accessModes:
    - ReadWriteMany
  volumeName: galaxy-database
  resources:
    requests:
      storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: galaxy-tool-deps
spec:
  storageClassName: standard
  accessModes:
    - ReadWriteMany
  volumeName: galaxy-tool-deps
  resources:
    requests:
      storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi


================================================
FILE: compose/galaxy-configurator/templates/kind/kind_config.yml.j2
================================================
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
  extraMounts:
  - hostPath: {{ HOST_EXPORT_DIR }}/galaxy
    containerPath: {{ HOST_EXPORT_DIR }}/galaxy
  - hostPath: {{ HOST_EXPORT_DIR }}/tool_deps
    containerPath: {{ HOST_EXPORT_DIR }}/tool_deps
{% set kind_node_count = KIND_NODE_COUNT | default(1) | int -%}
{% for i in range(1, kind_node_count + 1) -%}
- role: worker
  extraMounts:
  - hostPath: {{ HOST_EXPORT_DIR }}/galaxy
    containerPath: {{ HOST_EXPORT_DIR }}/galaxy
  - hostPath: {{ HOST_EXPORT_DIR }}/tool_deps
    containerPath: {{ HOST_EXPORT_DIR }}/tool_deps
{% endfor %}


================================================
FILE: compose/galaxy-configurator/templates/nginx/nginx.conf.j2
================================================
events { }

http {
  include mime.types;
  # See https://docs.galaxyproject.org/en/latest/admin/nginx.html#serving-galaxy-at-the-web-server-root

  # compress responses whenever possible
  gzip on;
  gzip_http_version 1.1;
  gzip_vary on;
  gzip_comp_level 6;
  gzip_proxied any;
  gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
  gzip_buffers 16 8k;

  # allow up to 3 minutes for Galaxy to respond to slow requests before timing out
  proxy_read_timeout {{ NGINX_PROXY_READ_TIMEOUT | default(180, true) }};

  proxy_buffers 8 16k;
  proxy_buffer_size 16k;

  # maximum file upload size
  client_max_body_size 10g;

  server {
    listen 80 default_server;
    listen [::]:80 default_server;
    server_name _;

    # use a variable for convenience
    set $galaxy_static /export/galaxy/static;
    set $galaxy_root /export/galaxy;

    # proxy all requests not matching other locations to gunicorn
    location /{{ GALAXY_PROXY_PREFIX | regex_replace("^/", "") | regex_replace("/$", "") }} {
      proxy_pass http://galaxy-server:5555;
      proxy_set_header Host $http_host;
      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
      proxy_set_header X-Forwarded-Proto $scheme;
      proxy_set_header Upgrade $http_upgrade;
    }

    # serve framework static content
    location {{ GALAXY_PROXY_PREFIX | regex_replace("/$", "") }}/static {
      alias $galaxy_static;
      expires 24h;
    }
    location {{ GALAXY_PROXY_PREFIX | regex_replace("/$", "") }}/robots.txt {
      alias $galaxy_static/robots.txt;
      expires 24h;
    }
    location {{ GALAXY_PROXY_PREFIX | regex_replace("/$", "") }}/favicon.ico {
      alias $galaxy_static/favicon.ico;
      expires 24h;
    }

    # serve visualization plugin static content
    location ~ ^{{ GALAXY_PROXY_PREFIX | regex_replace("/$", "") }}/plugins/(?<plug_type>[^/]+?)/((?<vis_d>[^/_]*)_?)?(?<vis_name>[^/]*?)/static/(?<static_file>.*?)$ {
      alias $galaxy_root/config/plugins/$plug_type/;
      try_files $vis_d/${vis_d}_${vis_name}/static/$static_file
        $vis_d/static/$static_file =404;
    }

    # delegated uploads
    location {{ GALAXY_PROXY_PREFIX | regex_replace("/$", "") }}/api/upload/resumable_upload {
      # Disable request and response buffering
      proxy_request_buffering off;
      proxy_buffering off;
      proxy_http_version 1.1;

      # Add X-Forwarded-* headers
      proxy_set_header X-Forwarded-Host $http_host;
      proxy_set_header X-Forwarded-Proto $scheme;
          
      proxy_set_header Upgrade $http_upgrade;
      proxy_set_header Connection "upgrade";
      client_max_body_size 0;
      proxy_pass http://rustus:1081;
    }

    rewrite ^/{{ GALAXY_PROXY_PREFIX | regex_replace("^/", "") | regex_replace("/$", "") }}$ /{{ GALAXY_PROXY_PREFIX | regex_replace("^/", "") | regex_replace("/$", "") }}/ last;
  }
}


================================================
FILE: compose/galaxy-configurator/templates/pulsar/app.yml.j2
================================================
managers:
  {% if PULSAR_JOB_RUNNER == 'local' -%}
  _default_:
    type: queued_python
    num_concurrent_jobs: {{ PULSAR_NUM_CONCURRENT_JOBS | default(1) }}
  {% endif %}

{{ pulsar | to_nice_yaml(indent=2) }}


================================================
FILE: compose/galaxy-configurator/templates/pulsar/server.ini.j2
================================================
[server:main]
use = egg:Paste#http
port = {{ PULSAR_PORT | default(8913) }}
host = {{ PULSAR_HOSTNAME | default('pulsar') }}

[app:main]
paste.app_factory = pulsar.web.wsgi:app_factory
app_config = %(here)s/app.yml

## Configure uWSGI (if used).
[uwsgi]
master = True
paste-logger = true
http = {{ PULSAR_HOSTNAME | default('pulsar') }}:{{ PULSAR_PORT | default(8913) }}
processes = 1
enable-threads = True

[watcher:web]
cmd = chaussette --fd $(circus.sockets.web) paste:server.ini
use_sockets = True
# Pulsar must be single-process for now...
numprocesses = 1

[socket:web]
host = localhost
port = 8913

## Configure Python loggers.
[loggers]
keys = root,pulsar

[handlers]
keys = console

[formatters]
keys = generic

[logger_root]
level = {{ PULSAR_LOG_LEVEL | default('INFO') }}
handlers = console

[logger_pulsar]
level = {{ PULSAR_LOG_LEVEL | default('INFO') }}
handlers = console
qualname = pulsar
propagate = 1

[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = {{ PULSAR_LOG_LEVEL | default('INFO') }}
formatter = generic

[formatter_generic]
format = %(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s


================================================
FILE: compose/galaxy-configurator/templates/slurm/slurm.conf.j2
================================================
{% for key, value in slurm.items() -%}
{{ key }}={{ value }}
{% endfor %}

{% set slurm_node_count = SLURM_NODE_COUNT | int -%}
{% for i in range(1, slurm_node_count + 1) -%}
NodeName={{ SLURM_NODE_HOSTNAME }}_{{ i }} NodeAddr={{ SLURM_NODE_HOSTNAME }}_{{ i }} NodeHostname={{ SLURM_NODE_HOSTNAME }}_{{ i }} CPUs={{ SLURM_NODE_CPUS | default(1, true) }} RealMemory={{ SLURM_NODE_MEMORY | default(1024, true) }} State=UNKNOWN
{% endfor %}
PartitionName=work Nodes={% for i in range(1, slurm_node_count + 1) -%}{{ SLURM_NODE_HOSTNAME }}_{{ i }}{%- if not loop.last -%},{% endif %}{% endfor %} Default=YES MaxTime=INFINITE State=UP Shared=YES # TODO


================================================
FILE: compose/galaxy-htcondor/Dockerfile
================================================
ARG DOCKER_REGISTRY=quay.io
ARG DOCKER_REGISTRY_USERNAME=bgruening
ARG IMAGE_TAG=latest

FROM buildpack-deps:22.04 as galaxy_dependencies

ARG GALAXY_RELEASE=release_24.1
ARG GALAXY_REPO=https://github.com/galaxyproject/galaxy

ENV GALAXY_ROOT_DIR=/galaxy
ENV GALAXY_LIBRARY=$GALAXY_ROOT_DIR/lib

# Download Galaxy source, but only keep necessary dependencies
RUN mkdir "${GALAXY_ROOT_DIR}" \
    && curl -L -s $GALAXY_REPO/archive/$GALAXY_RELEASE.tar.gz | tar xzf - --strip-components=1 -C $GALAXY_ROOT_DIR \
    && cd $GALAXY_ROOT_DIR \
    && ls . | grep -v "lib" | xargs rm -rf \
    && cd $GALAXY_ROOT_DIR/lib \
    && ls . | grep -v "galaxy\|galaxy_ext" | xargs rm -rf \
    && cd $GALAXY_ROOT_DIR/lib/galaxy \
    && ls . | grep -v "__init__.py\|datatypes\|exceptions\|files\|metadata\|model\|util\|security" | xargs rm -rf


FROM $DOCKER_REGISTRY/$DOCKER_REGISTRY_USERNAME/galaxy-container-base:$IMAGE_TAG as final

ENV DEBIAN_FRONTEND=noninteractive

ENV GALAXY_USER=galaxy \
    GALAXY_GROUP=galaxy \
    GALAXY_UID=1450 \
    GALAXY_GID=1450 \
    GALAXY_HOME=/home/galaxy \
    GALAXY_ROOT_DIR=/galaxy

RUN groupadd -r $GALAXY_USER -g $GALAXY_GID \
    && useradd -u $GALAXY_UID -r -g $GALAXY_USER -d $GALAXY_HOME -c "Galaxy user" --shell /bin/bash $GALAXY_USER \
    && mkdir $GALAXY_HOME \
    && chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_HOME

ENV EXPORT_DIR=/export \
    # Setting a standard encoding. This can get important for things like the unix sort tool.
    LC_ALL=en_US.UTF-8 \
    LANG=en_US.UTF-8

ENV CONDOR_CPUS=1 \
    CONDOR_MEMORY=1024

# Condor master
RUN echo "force-unsafe-io" > /etc/dpkg/dpkg.cfg.d/02apt-speedup \
    && echo 'Acquire::http::Timeout "20";' > /etc/apt/apt.conf.d/98AcquireTimeout \
    && echo 'Acquire::Retries "5";' > /etc/apt/apt.conf.d/99AcquireRetries \
    && apt-get update -qq && apt-get install -y --no-install-recommends locales gnupg2 curl \
    && locale-gen en_US.UTF-8 && dpkg-reconfigure locales \
    && curl -fsSL https://research.cs.wisc.edu/htcondor/repo/keys/HTCondor-current-Key | apt-key add - \
    && echo "deb https://research.cs.wisc.edu/htcondor/repo/ubuntu/current jammy main" >> /etc/apt/sources.list \
    && apt-get update -qq && apt-get install -y --no-install-recommends \
        supervisor \
        htcondor \
        wget \
    && touch /var/log/condor/StartLog /var/log/condor/StarterLog /var/log/condor/CollectorLog /var/log/condor/NegotiatorLog \
    && mkdir -p /var/run/condor/ /var/lock/condor/ \
    && chown -R condor: /var/log/condor/StartLog /var/log/condor/StarterLog /var/log/condor/CollectorLog /var/log/condor/NegotiatorLog /var/run/condor/ /var/lock/condor/

ADD supervisord.conf /etc/supervisord.conf

# Copy Galaxy dependencies
COPY --chown=$GALAXY_USER:$GALAXY_USER --from=galaxy_dependencies $GALAXY_ROOT_DIR $GALAXY_ROOT_DIR

COPY start.sh /usr/bin/start.sh
RUN apt update && apt install python3 -y

RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 10 

ENTRYPOINT /usr/bin/start.sh


================================================
FILE: compose/galaxy-htcondor/start.sh
================================================
#!/bin/bash

sleep 5
echo "Waiting for Galaxy configurator to finish and release lock"
until [ ! -f /config/configurator.lock ] && echo Lock released; do
  sleep 0.1;
done;

cp -f "/config/$HTCONDOR_TYPE.conf" /etc/condor/condor_config.local
condor_store_cred -p "$HTCONDOR_POOL_PASSWORD" -f /var/lib/condor/pool_password

/usr/bin/supervisord


================================================
FILE: compose/galaxy-htcondor/supervisord.conf
================================================
[unix_http_server]
file=/var/run/supervisor.sock   ; (the path to the socket file)
chmod=0700                       ; sockef file mode (default 0700)

[supervisord]
nodaemon = true

[program:htcondor]
command=/usr/sbin/condor_master -pidfile /var/run/condor/condor.pid -f -t
#stdout_logfile=/var/log/htcondor.log
#stderr_logfile=/var/log/htcondor.log
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
stopwaitsecs=1
startretries=0
autostart=true
autorestart=false

[program:log-condor-collector]
command=tail -f -n1000 /var/log/condor/CollectorLog
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
stopwaitsecs=1
startretries=5
autostart=true
autorestart=false
user=condor

[program:log-condor-negotiator]
command=tail -f -n1000 /var/log/condor/NegotiatorLog
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
stopwaitsecs=1
startretries=5
autostart=true
autorestart=false
user=condor

# [program:telegraf]
# command=/usr/bin/telegraf --config /etc/telegraf/telegraf.conf
# stdout_logfile=/dev/stdout
# stdout_logfile_maxbytes=0
# stderr_logfile=/dev/stderr
# stderr_logfile_maxbytes=0
# stopwaitsecs=1
# startretries=5
# autostart=true
# autorestart=false
# user=root


[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface

[supervisorctl]
serverurl=unix:///var/run/supervisor.sock ; use a unix:// URL  for a unix socket


================================================
FILE: compose/galaxy-kind/Dockerfile
================================================
FROM alpine:3.17

ARG KIND_RELEASE=v0.24.0
ARG KUBECTL_RELEASE=v1.31.1

RUN apk add --no-cache docker

RUN apk add --no-cache --virtual build-deps wget \
    && apk add --no-cache bash \
    && wget -O /usr/bin/kind https://kind.sigs.k8s.io/dl/${KIND_RELEASE}/kind-linux-amd64 \
    && chmod +x /usr/bin/kind \
    && wget -O /usr/bin/kubectl https://dl.k8s.io/release/${KUBECTL_RELEASE}/bin/linux/amd64/kubectl \
    && chmod +x /usr/bin/kubectl \
    && apk del build-deps

ENV KIND_CONFIG_DIR=/kind
ENV KUBECONFIG=${KIND_CONFIG_DIR}/.kube/config

COPY docker-entrypoint.sh /usr/bin/docker-entrypoint.sh

ENTRYPOINT [ "/usr/bin/docker-entrypoint.sh" ]


================================================
FILE: compose/galaxy-kind/docker-entrypoint.sh
================================================
#!/bin/bash

_term() {
  echo "Caught SIGTERM signal!"
  echo "Trying to stop Kind cluster"
  kind delete cluster --name "${K8S_CLUSTER_NAME:-galaxy}" || true
  exit 0
}
trap _term SIGTERM

if [ -z "$KIND_SKIP_CONFIG_LOCK" ]; then
  sleep 2
  echo "Waiting for Galaxy configurator to finish and release lock"
  until [ ! -f "$KIND_CONFIG_DIR/configurator.lock" ] && echo Lock released; do
  sleep 0.1;
  done;
fi
rm "${KUBECONFIG}_in_docker" || true

kind delete cluster --name "${K8S_CLUSTER_NAME:-galaxy}" || true
kind create cluster --config "$KIND_CONFIG_DIR/kind_config.yml" --kubeconfig "$KUBECONFIG" --name "${K8S_CLUSTER_NAME:-galaxy}" || true

# Create custom kubeconfig, that allows to reach the control-plane from inside the containers
REAL_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "${K8S_CLUSTER_NAME:-galaxy}-control-plane")
cp "${KUBECONFIG}" "${KUBECONFIG}_in_docker"
sed -i "s/127.0.0.1:[0-9]*$/${REAL_IP}:6443/g" "${KUBECONFIG}_in_docker"

export KUBECONFIG="${KUBECONFIG}_in_docker"
kubectl cluster-info

# Not all resources can be easily updated, therefore it is easier
# to remove the resources first, while the whole setup is
# still starting up
ls "$KIND_CONFIG_DIR/k8s_config"
kubectl delete -f "$KIND_CONFIG_DIR/k8s_config" || true
kubectl apply -f "$KIND_CONFIG_DIR/k8s_config"

# Wait for SIGTERM and delete cluster
sleep inf & wait


================================================
FILE: compose/galaxy-nginx/Dockerfile
================================================
FROM nginx:1.27-alpine

COPY start.sh /usr/bin/start.sh

CMD [ "/bin/sh", "/usr/bin/start.sh"]


================================================
FILE: compose/galaxy-nginx/start.sh
================================================
#!/bin/bash
sleep 5 # ToDo: Use locking or so to be sure we really have the newest version
echo "Waiting for Nginx config"
until [ "$(ls -p | grep -v /config)" != "" ] && echo Nginx config found; do
  sleep 0.5;
done;

cp -f /config/* /etc/nginx

echo "Running nginx startup command"
nginx -g "daemon off;"


================================================
FILE: compose/galaxy-server/Dockerfile
================================================
ARG DOCKER_REGISTRY=quay.io
ARG DOCKER_REGISTRY_USERNAME=bgruening
ARG IMAGE_TAG=latest

FROM buildpack-deps:22.04 as build_base

ENV EXPORT_DIR=/export \
    GALAXY_ROOT_DIR=/galaxy \
    HTCONDOR_ROOT=/opt/htcondor

ENV GALAXY_STATIC_DIR=$GALAXY_ROOT_DIR/static \
    GALAXY_EXPORT=$EXPORT_DIR/galaxy \
    GALAXY_CONFIG_DIR=$GALAXY_ROOT_DIR/config \
    GALAXY_CONFIG_TOOL_DEPENDENCY_DIR=/tool_deps \
    GALAXY_CONFIG_TOOL_PATH=$GALAXY_ROOT_DIR/tools \
    GALAXY_CONFIG_TOOL_DATA_PATH=$GALAXY_ROOT_DIR/tool-data \
    GALAXY_VIRTUAL_ENV=$GALAXY_ROOT_DIR/.venv \
    GALAXY_DATABASE_PATH=$GALAXY_ROOT_DIR/database

ENV GALAXY_USER=galaxy \
    GALAXY_GROUP=galaxy \
    GALAXY_UID=1450 \
    GALAXY_GID=1450 \
    GALAXY_HOME=/home/galaxy

ENV GALAXY_CONDA_PREFIX=$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR/_conda \
    MINIFORGE_VERSION=24.3.0-0

RUN groupadd -r $GALAXY_USER -g $GALAXY_GID \
    && useradd -u $GALAXY_UID -r -g $GALAXY_USER -d $GALAXY_HOME -c "Galaxy user" --shell /bin/bash $GALAXY_USER \
    && mkdir $GALAXY_HOME \
    && chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_HOME

FROM build_base as build_miniforge
COPY ./files/common_cleanup.sh /usr/bin/common_cleanup.sh

# Install Miniforge
RUN curl -s -L https://github.com/conda-forge/miniforge/releases/download/$MINIFORGE_VERSION/Miniforge3-$MINIFORGE_VERSION-Linux-x86_64.sh > ~/min
Download .txt
gitextract__x213t1e/

├── .dive-ci
├── .editorconfig
├── .github/
│   └── workflows/
│       ├── compose.yml
│       ├── cvmfs.yml
│       ├── lint.yml
│       ├── pull-request.yml
│       ├── release.yml
│       ├── single.sh
│       ├── single_container.yml
│       └── update-site.yml
├── .gitignore
├── .travis.yml
├── Changelog.md
├── LICENSE
├── README.md
├── compose/
│   ├── README.md
│   ├── base-images/
│   │   ├── galaxy-cluster-base/
│   │   │   ├── Dockerfile
│   │   │   └── files/
│   │   │       ├── common_cleanup.sh
│   │   │       └── cvmfs/
│   │   │           ├── default.local
│   │   │           ├── domain.d/
│   │   │           │   └── galaxyproject.org.conf
│   │   │           └── keys/
│   │   │               └── galaxyproject.org/
│   │   │                   ├── data.galaxyproject.org.pub
│   │   │                   └── singularity.galaxyproject.org.pub
│   │   └── galaxy-container-base/
│   │       ├── Dockerfile
│   │       └── files/
│   │           └── common_cleanup.sh
│   ├── base_config.yml
│   ├── docker-compose.htcondor.yml
│   ├── docker-compose.k8s.yml
│   ├── docker-compose.pulsar.mq.yml
│   ├── docker-compose.pulsar.yml
│   ├── docker-compose.singularity.yml
│   ├── docker-compose.slurm.yml
│   ├── docker-compose.yml
│   ├── galaxy-configurator/
│   │   ├── Dockerfile
│   │   ├── customize.py
│   │   ├── run.sh
│   │   └── templates/
│   │       ├── galaxy/
│   │       │   ├── GALAXY_PROXY_PREFIX.txt.j2
│   │       │   ├── container_resolvers_conf.yml.j2
│   │       │   ├── dependency_resolvers_conf.xml.j2
│   │       │   ├── galaxy.yml.j2
│   │       │   ├── job_conf.xml.j2
│   │       │   └── job_metrics.xml.j2
│   │       ├── htcondor/
│   │       │   ├── executor.conf.j2
│   │       │   ├── galaxy.conf.j2
│   │       │   └── master.conf.j2
│   │       ├── kind/
│   │       │   ├── k8s_config/
│   │       │   │   ├── persistent_volumes.yml.j2
│   │       │   │   └── pv_claims.yml.j2
│   │       │   └── kind_config.yml.j2
│   │       ├── nginx/
│   │       │   └── nginx.conf.j2
│   │       ├── pulsar/
│   │       │   ├── app.yml.j2
│   │       │   └── server.ini.j2
│   │       └── slurm/
│   │           └── slurm.conf.j2
│   ├── galaxy-htcondor/
│   │   ├── Dockerfile
│   │   ├── start.sh
│   │   └── supervisord.conf
│   ├── galaxy-kind/
│   │   ├── Dockerfile
│   │   └── docker-entrypoint.sh
│   ├── galaxy-nginx/
│   │   ├── Dockerfile
│   │   └── start.sh
│   ├── galaxy-server/
│   │   ├── Dockerfile
│   │   └── files/
│   │       ├── common_cleanup.sh
│   │       ├── create_galaxy_user.py
│   │       └── start.sh
│   ├── galaxy-slurm/
│   │   ├── Dockerfile
│   │   └── start.sh
│   ├── galaxy-slurm-node-discovery/
│   │   ├── Dockerfile
│   │   └── run.sh
│   ├── pulsar/
│   │   ├── Dockerfile
│   │   ├── docker-entrypoint.sh
│   │   └── files/
│   │       └── common_cleanup.sh
│   └── tests/
│       ├── docker-compose.test.bioblend.yml
│       ├── docker-compose.test.selenium.yml
│       ├── docker-compose.test.workflows.yml
│       ├── docker-compose.test.yml
│       ├── galaxy-bioblend-test/
│       │   ├── Dockerfile
│       │   └── run.sh
│       ├── galaxy-selenium-test/
│       │   ├── Dockerfile
│       │   └── run.sh
│       └── galaxy-workflow-test/
│           ├── Dockerfile
│           └── run.sh
├── cvmfs/
│   ├── Dockerfile
│   ├── README.md
│   ├── ansible/
│   │   ├── playbook.yml
│   │   └── requirements.yml
│   └── docker-entrypoint.sh
├── docs/
│   ├── README.md
│   ├── Running_jobs_outside_of_the_container.md
│   ├── css/
│   │   └── landing_page.css
│   ├── js/
│   │   └── landing_page.js
│   └── src/
│       ├── generate_docs.py
│       └── requirements.txt
├── galaxy/
│   ├── Dockerfile
│   ├── ansible/
│   │   ├── condor.yml
│   │   ├── cvmfs_client.yml
│   │   ├── docker.yml
│   │   ├── files/
│   │   │   ├── 413.html
│   │   │   ├── 500.html
│   │   │   ├── 502.html
│   │   │   ├── nginx_sample.crt
│   │   │   ├── nginx_sample.key
│   │   │   └── production_b2drop.yml
│   │   ├── flower.yml
│   │   ├── galaxy_file_source_templates.yml
│   │   ├── galaxy_job_conf.yml
│   │   ├── galaxy_job_metrics.yml
│   │   ├── galaxy_object_store_templates.yml
│   │   ├── galaxy_scripts.yml
│   │   ├── galaxy_vault_config.yml
│   │   ├── gravity.yml
│   │   ├── group_vars/
│   │   │   └── all.yml
│   │   ├── k8s.yml
│   │   ├── nginx.yml
│   │   ├── pbs.yml
│   │   ├── postgresql.yml
│   │   ├── proftpd.yml
│   │   ├── provision.yml
│   │   ├── rabbitmq.yml
│   │   ├── redis.yml
│   │   ├── requirements.yml
│   │   ├── slurm.yml
│   │   ├── supervisor.yml
│   │   ├── templates/
│   │   │   ├── add_tool_shed.py.j2
│   │   │   ├── cgroupfs_mount.sh.j2
│   │   │   ├── check_database.py.j2
│   │   │   ├── configure_rabbitmq_users.yml.j2
│   │   │   ├── configure_slurm.py.j2
│   │   │   ├── container_resolvers_conf.yml.j2
│   │   │   ├── create_galaxy_user.py.j2
│   │   │   ├── export_user_files.py.j2
│   │   │   ├── file_source_templates.yml.j2
│   │   │   ├── gravity.yml.j2
│   │   │   ├── job_conf.xml.j2
│   │   │   ├── job_metrics_conf.yml.j2
│   │   │   ├── macros.xml.j2
│   │   │   ├── nginx/
│   │   │   │   ├── delegated_uploads.conf.j2
│   │   │   │   ├── flower_auth.conf.j2
│   │   │   │   ├── galaxy_common.conf.j2
│   │   │   │   ├── galaxy_http.j2
│   │   │   │   ├── galaxy_https.j2
│   │   │   │   ├── galaxy_redirect_ssl.j2
│   │   │   │   ├── htpasswd.j2
│   │   │   │   ├── interactive_tools_common.conf.j2
│   │   │   │   ├── interactive_tools_http.j2
│   │   │   │   ├── interactive_tools_https.j2
│   │   │   │   └── interactive_tools_redirect_ssl.j2
│   │   │   ├── object_store_templates.yml.j2
│   │   │   ├── rabbitmq.sh.j2
│   │   │   ├── startup_lite.sh.j2
│   │   │   ├── supervisor.conf.j2
│   │   │   ├── update_yaml_value.py.j2
│   │   │   └── vault_conf.yml.j2
│   │   └── tusd.yml
│   ├── bashrc
│   ├── cgroupfs_mount.sh
│   ├── common_cleanup.sh
│   ├── docker-compose.yaml
│   ├── install_tools_wrapper.sh
│   ├── run.sh
│   ├── sample_tool_list.yaml
│   ├── setup_postgresql.py
│   ├── startup.sh
│   ├── startup2.sh
│   ├── tool_conf_interactive.xml.sample
│   ├── tool_sheds_conf.xml
│   └── welcome.html
├── skills/
│   └── galaxy-docker/
│       ├── SKILL.md
│       └── references/
│           └── upgrade-25.1.md
└── test/
    ├── bioblend/
    │   ├── Dockerfile
    │   └── test.sh
    ├── container_resolvers_conf.ci.yml
    ├── cvmfs/
    │   └── test.sh
    ├── gridengine/
    │   ├── Dockerfile
    │   ├── act_qmaster
    │   ├── job_conf.xml.sge
    │   ├── master_script.sh
    │   ├── outputhostname/
    │   │   └── outputhostname.xml
    │   ├── outputhostname.tool.xml
    │   ├── setup_gridengine.sh
    │   ├── setup_tool.sh
    │   ├── test.sh
    │   ├── test_outputhostname.py
    │   └── tool_conf.xml
    └── slurm/
        ├── Dockerfile
        ├── configure_slurm.py
        ├── job_conf.xml
        ├── munge.conf
        ├── startup.sh
        ├── supervisor_slurm.conf
        └── test.sh
Download .txt
SYMBOL INDEX (13 symbols across 6 files)

FILE: compose/galaxy-configurator/customize.py
  function j2_environment_params (line 4) | def j2_environment_params():
  function alter_context (line 14) | def alter_context(context):

FILE: compose/galaxy-server/files/create_galaxy_user.py
  function add_user (line 11) | def add_user(sa_session, security_agent, email, password, key=None, user...

FILE: docs/js/landing_page.js
  function gestureStart (line 11) | function gestureStart() {

FILE: docs/src/generate_docs.py
  function extract_html_structure (line 7) | def extract_html_structure(html_content):

FILE: galaxy/setup_postgresql.py
  function pg_ctl (line 7) | def pg_ctl(database_path, database_version, mod='start'):
  function set_pg_permission (line 19) | def set_pg_permission(database_path):
  function create_pg_db (line 27) | def create_pg_db(user, password, database, database_path, database_versi...

FILE: test/slurm/configure_slurm.py
  function _as_int (line 89) | def _as_int(value):
  function _slurmd_status (line 95) | def _slurmd_status():
  function _lscpu_status (line 107) | def _lscpu_status():
  function _real_memory_mb (line 133) | def _real_memory_mb():
  function main (line 145) | def main():
Condensed preview — 188 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (550K chars).
[
  {
    "path": ".dive-ci",
    "chars": 552,
    "preview": "rules:\n  # If the efficiency is measured below X%, mark as failed.\n  # Expressed as a ratio between 0-1.\n  lowestEfficie"
  },
  {
    "path": ".editorconfig",
    "chars": 154,
    "preview": "root = true\n\n[*]\nindent_style = space\nindent_size = 2\ncharset = utf-8\ntrim_trailing_whitespace = true\ninsert_final_newli"
  },
  {
    "path": ".github/workflows/compose.yml",
    "chars": 14430,
    "preview": "name: build-and-test\non: [push]\njobs:\n  build_container_base:\n    if: false  # Temporarily disable workflow \n    runs-on"
  },
  {
    "path": ".github/workflows/cvmfs.yml",
    "chars": 2213,
    "preview": "name: cvmfs-sidecar\non:\n  push:\n    branches:\n      - '**'\n    tags:\n      - '*'\n  pull_request:\n    paths:\n      - 'cvm"
  },
  {
    "path": ".github/workflows/lint.yml",
    "chars": 614,
    "preview": "name: Lint\non: [push]\njobs:\n  lint:\n    runs-on: ubuntu-latest\n    steps:\n    - name: Checkout\n      uses: actions/check"
  },
  {
    "path": ".github/workflows/pull-request.yml",
    "chars": 9652,
    "preview": "name: pr-test\non: pull_request\njobs:\n  test:\n    if: false  # Temporarily disable workflow \n    runs-on: ubuntu-22.04\n  "
  },
  {
    "path": ".github/workflows/release.yml",
    "chars": 956,
    "preview": "name: release-CI\n\non:\n  release:\n    types: [published]\n\n  # Allows you to run this workflow manually from the Actions t"
  },
  {
    "path": ".github/workflows/single.sh",
    "chars": 7616,
    "preview": "#!/bin/bash\nset -ex\n\ndocker --version\ndocker info\n\nexport GALAXY_HOME=/home/galaxy\nexport GALAXY_USER=admin@example.org\n"
  },
  {
    "path": ".github/workflows/single_container.yml",
    "chars": 912,
    "preview": "name: Single Container Test\non: [push, pull_request]\njobs:\n  build_and_test:\n    runs-on: ubuntu-latest\n    strategy:\n  "
  },
  {
    "path": ".github/workflows/update-site.yml",
    "chars": 779,
    "preview": "name: Deploy Documentation\n\non:\n  push:\n    branches:\n      - main\n    paths:\n      - 'README.md'\n\njobs:\n  deploy_docs:\n"
  },
  {
    "path": ".gitignore",
    "chars": 629,
    "preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\n"
  },
  {
    "path": ".travis.yml",
    "chars": 6009,
    "preview": "sudo: required\n\nlanguage: python\npython: 3.10\n\nservices:\n  - docker\n\nenv:\n  matrix:\n    - TOX_ENV=py310\n  global:\n    - "
  },
  {
    "path": "Changelog.md",
    "chars": 8705,
    "preview": "# Changelog\n\n## 0.1: Initial release!\n    - with Apache2, PostgreSQL and Tool Shed integration\n## 0.2: complete new Gala"
  },
  {
    "path": "LICENSE",
    "chars": 1080,
    "preview": "The MIT License (MIT)\n\nCopyright (c) 2014 Björn Grüning\n\nPermission is hereby granted, free of charge, to any person obt"
  },
  {
    "path": "README.md",
    "chars": 53070,
    "preview": "[![DOI](https://zenodo.org/badge/5466/bgruening/docker-galaxy-stable.svg)](https://zenodo.org/badge/latestdoi/5466/bgrue"
  },
  {
    "path": "compose/README.md",
    "chars": 26213,
    "preview": "\n⚠️ \n\nThe `compose` version of this project is currently not maintained. We update the files and versions as we have tim"
  },
  {
    "path": "compose/base-images/galaxy-cluster-base/Dockerfile",
    "chars": 1976,
    "preview": "ARG DOCKER_REGISTRY=quay.io\nARG DOCKER_REGISTRY_USERNAME=bgruening\nARG IMAGE_TAG=latest\n\nFROM $DOCKER_REGISTRY/$DOCKER_R"
  },
  {
    "path": "compose/base-images/galaxy-cluster-base/files/common_cleanup.sh",
    "chars": 410,
    "preview": "#!/bin/sh\n\nset -x\n\n# This usually drastically reduced the container size\n# at the cost of the startup time of your appli"
  },
  {
    "path": "compose/base-images/galaxy-cluster-base/files/cvmfs/default.local",
    "chars": 161,
    "preview": "CVMFS_REPOSITORIES=\"data.galaxyproject.org,singularity.galaxyproject.org\"\nCVMFS_HTTP_PROXY=\"DIRECT\"\nCVMFS_QUOTA_LIMIT=\"4"
  },
  {
    "path": "compose/base-images/galaxy-cluster-base/files/cvmfs/domain.d/galaxyproject.org.conf",
    "chars": 333,
    "preview": "CVMFS_SERVER_URL=\"http://cvmfs1-psu0.galaxyproject.org/cvmfs/@fqrn@;http://cvmfs1-iu0.galaxyproject.org/cvmfs/@fqrn@;htt"
  },
  {
    "path": "compose/base-images/galaxy-cluster-base/files/cvmfs/keys/galaxyproject.org/data.galaxyproject.org.pub",
    "chars": 450,
    "preview": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5LHQuKWzcX5iBbCGsXGt\n6CRi9+a9cKZG4UlX/lJukEJ+3dSx"
  },
  {
    "path": "compose/base-images/galaxy-cluster-base/files/cvmfs/keys/galaxyproject.org/singularity.galaxyproject.org.pub",
    "chars": 450,
    "preview": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5LHQuKWzcX5iBbCGsXGt\n6CRi9+a9cKZG4UlX/lJukEJ+3dSx"
  },
  {
    "path": "compose/base-images/galaxy-container-base/Dockerfile",
    "chars": 1721,
    "preview": "FROM buildpack-deps:22.04 as build_apptainer\n\nCOPY ./files/common_cleanup.sh /usr/bin/common_cleanup.sh\n\n# Install Go (o"
  },
  {
    "path": "compose/base-images/galaxy-container-base/files/common_cleanup.sh",
    "chars": 410,
    "preview": "#!/bin/sh\n\nset -x\n\n# This usually drastically reduced the container size\n# at the cost of the startup time of your appli"
  },
  {
    "path": "compose/base_config.yml",
    "chars": 4428,
    "preview": "gravity:\n  process_manager: supervisor\n  galaxy_root: /galaxy\n  virtualenv: /galaxy/.venv\n  gunicorn:\n    enable: True\n "
  },
  {
    "path": "compose/docker-compose.htcondor.yml",
    "chars": 1627,
    "preview": "# Extend Galaxy to run jobs using HTCondor.\n# Example: `docker-compose -f docker-compose.yml -f docker-compose.htcondor."
  },
  {
    "path": "compose/docker-compose.k8s.yml",
    "chars": 966,
    "preview": "# Extend Galaxy to run jobs on Kubernetes.\n# This will set up Kubernetes using kind (https://kind.sigs.k8s.io).\n# Note t"
  },
  {
    "path": "compose/docker-compose.pulsar.mq.yml",
    "chars": 866,
    "preview": "# Extend Pulsar to use RabbitMQ (Message Queue) instead of the REST API\n# for communicating with Galaxy.\n# Requirements:"
  },
  {
    "path": "compose/docker-compose.pulsar.yml",
    "chars": 1363,
    "preview": "# Extend Galaxy to run jobs using Pulsar. With this setup, you\n# don't need to share the `/galaxy/database` path with Ga"
  },
  {
    "path": "compose/docker-compose.singularity.yml",
    "chars": 553,
    "preview": "# Extend Galaxy to use Singularity for dependency resolution.\n# This is working with the base Galaxy, but also in combin"
  },
  {
    "path": "compose/docker-compose.slurm.yml",
    "chars": 2012,
    "preview": "# Extend Galaxy to run jobs using Slurm.\n# Example: `docker-compose -f docker-compose.yml -f docker-compose.slurm.yml up"
  },
  {
    "path": "compose/docker-compose.yml",
    "chars": 4501,
    "preview": "services:\n  galaxy-server:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-server:$"
  },
  {
    "path": "compose/galaxy-configurator/Dockerfile",
    "chars": 242,
    "preview": "FROM alpine:3.17\n\nRUN apk add --no-cache bash python3 py3-pip \\\n    && pip3 install j2cli[yaml] jinja2-ansible-filters\n\n"
  },
  {
    "path": "compose/galaxy-configurator/customize.py",
    "chars": 2558,
    "preview": "import os\n\n\ndef j2_environment_params():\n    \"\"\" Extra parameters for the Jinja2 Environment\n    Add AnsibleCoreFiltersE"
  },
  {
    "path": "compose/galaxy-configurator/run.sh",
    "chars": 5752,
    "preview": "#!/bin/bash\n\n# Set default config dirs\nexport GALAXY_CONF_DIR=${GALAXY_CONF_DIR:-/galaxy/config} \\\n       NGINX_CONF_DIR"
  },
  {
    "path": "compose/galaxy-configurator/templates/galaxy/GALAXY_PROXY_PREFIX.txt.j2",
    "chars": 26,
    "preview": "{{ GALAXY_PROXY_PREFIX }}\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/galaxy/container_resolvers_conf.yml.j2",
    "chars": 3230,
    "preview": "# Resolvers that are potentially used by default are uncommented (comments describe under \n# which premises they are in "
  },
  {
    "path": "compose/galaxy-configurator/templates/galaxy/dependency_resolvers_conf.xml.j2",
    "chars": 946,
    "preview": "<dependency_resolvers>\n  {% if GALAXY_DEPENDENCY_RESOLUTION != 'singularity' %}\n  <!-- the default configuration, first "
  },
  {
    "path": "compose/galaxy-configurator/templates/galaxy/galaxy.yml.j2",
    "chars": 459,
    "preview": "gravity:\n{{ gravity | to_nice_yaml(indent=2) | indent(2, first=True) }}\n\ngalaxy:\n{{ galaxy | to_nice_yaml(indent=2) | in"
  },
  {
    "path": "compose/galaxy-configurator/templates/galaxy/job_conf.xml.j2",
    "chars": 5400,
    "preview": "<?xml version=\"1.0\"?>\n<!-- A sample job config that explicitly configures job running the way it is configured by defaul"
  },
  {
    "path": "compose/galaxy-configurator/templates/galaxy/job_metrics.xml.j2",
    "chars": 454,
    "preview": "<?xml version=\"1.0\"?>\n<job_metrics>\n{% if galaxy_job_metrics.core %}\n  <core />\n{% endif %}\n{% if galaxy_job_metrics.cpu"
  },
  {
    "path": "compose/galaxy-configurator/templates/htcondor/executor.conf.j2",
    "chars": 86,
    "preview": "{% for key, value in htcondor_executor.items() -%}\n{{ key }}={{ value }}\n{% endfor %}\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/htcondor/galaxy.conf.j2",
    "chars": 84,
    "preview": "{% for key, value in htcondor_galaxy.items() -%}\n{{ key }}={{ value }}\n{% endfor %}\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/htcondor/master.conf.j2",
    "chars": 84,
    "preview": "{% for key, value in htcondor_master.items() -%}\n{{ key }}={{ value }}\n{% endfor %}\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/kind/k8s_config/persistent_volumes.yml.j2",
    "chars": 923,
    "preview": "kind: PersistentVolume\napiVersion: v1\nmetadata:\n  name: galaxy-root\nspec:\n  storageClassName: standard\n  capacity:\n    s"
  },
  {
    "path": "compose/galaxy-configurator/templates/kind/k8s_config/pv_claims.yml.j2",
    "chars": 791,
    "preview": "kind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: galaxy-root\nspec:\n  storageClassName: standard\n  accessMode"
  },
  {
    "path": "compose/galaxy-configurator/templates/kind/kind_config.yml.j2",
    "chars": 622,
    "preview": "kind: Cluster\napiVersion: kind.x-k8s.io/v1alpha4\nnodes:\n- role: control-plane\n  extraMounts:\n  - hostPath: {{ HOST_EXPOR"
  },
  {
    "path": "compose/galaxy-configurator/templates/nginx/nginx.conf.j2",
    "chars": 2924,
    "preview": "events { }\n\nhttp {\n  include mime.types;\n  # See https://docs.galaxyproject.org/en/latest/admin/nginx.html#serving-galax"
  },
  {
    "path": "compose/galaxy-configurator/templates/pulsar/app.yml.j2",
    "chars": 212,
    "preview": "managers:\n  {% if PULSAR_JOB_RUNNER == 'local' -%}\n  _default_:\n    type: queued_python\n    num_concurrent_jobs: {{ PULS"
  },
  {
    "path": "compose/galaxy-configurator/templates/pulsar/server.ini.j2",
    "chars": 1150,
    "preview": "[server:main]\nuse = egg:Paste#http\nport = {{ PULSAR_PORT | default(8913) }}\nhost = {{ PULSAR_HOSTNAME | default('pulsar'"
  },
  {
    "path": "compose/galaxy-configurator/templates/slurm/slurm.conf.j2",
    "chars": 647,
    "preview": "{% for key, value in slurm.items() -%}\n{{ key }}={{ value }}\n{% endfor %}\n\n{% set slurm_node_count = SLURM_NODE_COUNT | "
  },
  {
    "path": "compose/galaxy-htcondor/Dockerfile",
    "chars": 3019,
    "preview": "ARG DOCKER_REGISTRY=quay.io\nARG DOCKER_REGISTRY_USERNAME=bgruening\nARG IMAGE_TAG=latest\n\nFROM buildpack-deps:22.04 as ga"
  },
  {
    "path": "compose/galaxy-htcondor/start.sh",
    "chars": 344,
    "preview": "#!/bin/bash\n\nsleep 5\necho \"Waiting for Galaxy configurator to finish and release lock\"\nuntil [ ! -f /config/configurator"
  },
  {
    "path": "compose/galaxy-htcondor/supervisord.conf",
    "chars": 1543,
    "preview": "[unix_http_server]\nfile=/var/run/supervisor.sock   ; (the path to the socket file)\nchmod=0700                       ; so"
  },
  {
    "path": "compose/galaxy-kind/Dockerfile",
    "chars": 654,
    "preview": "FROM alpine:3.17\n\nARG KIND_RELEASE=v0.24.0\nARG KUBECTL_RELEASE=v1.31.1\n\nRUN apk add --no-cache docker\n\nRUN apk add --no-"
  },
  {
    "path": "compose/galaxy-kind/docker-entrypoint.sh",
    "chars": 1400,
    "preview": "#!/bin/bash\n\n_term() {\n  echo \"Caught SIGTERM signal!\"\n  echo \"Trying to stop Kind cluster\"\n  kind delete cluster --name"
  },
  {
    "path": "compose/galaxy-nginx/Dockerfile",
    "chars": 95,
    "preview": "FROM nginx:1.27-alpine\n\nCOPY start.sh /usr/bin/start.sh\n\nCMD [ \"/bin/sh\", \"/usr/bin/start.sh\"]\n"
  },
  {
    "path": "compose/galaxy-nginx/start.sh",
    "chars": 307,
    "preview": "#!/bin/bash\nsleep 5 # ToDo: Use locking or so to be sure we really have the newest version\necho \"Waiting for Nginx confi"
  },
  {
    "path": "compose/galaxy-server/Dockerfile",
    "chars": 5865,
    "preview": "ARG DOCKER_REGISTRY=quay.io\nARG DOCKER_REGISTRY_USERNAME=bgruening\nARG IMAGE_TAG=latest\n\nFROM buildpack-deps:22.04 as bu"
  },
  {
    "path": "compose/galaxy-server/files/common_cleanup.sh",
    "chars": 410,
    "preview": "#!/bin/sh\n\nset -x\n\n# This usually drastically reduced the container size\n# at the cost of the startup time of your appli"
  },
  {
    "path": "compose/galaxy-server/files/create_galaxy_user.py",
    "chars": 2167,
    "preview": "#!/usr/bin/env python\nimport sys\nsys.path.insert(1,'/galaxy')\nsys.path.insert(1,'/galaxy/lib')\n\nfrom galaxy.model import"
  },
  {
    "path": "compose/galaxy-server/files/start.sh",
    "chars": 5125,
    "preview": "#!/bin/bash\n\ncreate_user() {\n  GALAXY_PROXY_PREFIX=$(cat $GALAXY_CONFIG_DIR/GALAXY_PROXY_PREFIX.txt)\n  echo \"Waiting for"
  },
  {
    "path": "compose/galaxy-slurm/Dockerfile",
    "chars": 2032,
    "preview": "ARG DOCKER_REGISTRY=quay.io\nARG DOCKER_REGISTRY_USERNAME=bgruening\nARG IMAGE_TAG=latest\n\nFROM buildpack-deps:22.04 as ga"
  },
  {
    "path": "compose/galaxy-slurm/start.sh",
    "chars": 836,
    "preview": "#!/bin/bash\n\n# Inspired by: https://github.com/giovtorres/slurm-docker-cluster\n\nsleep 10 # ToDo: Use locking or so to be"
  },
  {
    "path": "compose/galaxy-slurm-node-discovery/Dockerfile",
    "chars": 95,
    "preview": "FROM alpine:3.17\n\nRUN apk add curl jq\n\nCOPY run.sh /usr/bin/run.sh\n\nENTRYPOINT /usr/bin/run.sh\n"
  },
  {
    "path": "compose/galaxy-slurm-node-discovery/run.sh",
    "chars": 870,
    "preview": "#!/bin/sh\n\n# This script is used to replace the container name of a slurm node\n# with its correct hostname. This is need"
  },
  {
    "path": "compose/pulsar/Dockerfile",
    "chars": 1466,
    "preview": "ARG DOCKER_REGISTRY=quay.io\nARG DOCKER_REGISTRY_USERNAME=bgruening\nARG IMAGE_TAG=latest\n\nFROM buildpack-deps:22.04 as bu"
  },
  {
    "path": "compose/pulsar/docker-entrypoint.sh",
    "chars": 862,
    "preview": "#!/bin/bash\n\nif [ -z \"$PULSAR_SKIP_CONFIG_LOCK\" ]; then\n  sleep 10\n  echo \"Waiting for Galaxy configurator to finish and"
  },
  {
    "path": "compose/pulsar/files/common_cleanup.sh",
    "chars": 410,
    "preview": "#!/bin/sh\n\nset -x\n\n# This usually drastically reduced the container size\n# at the cost of the startup time of your appli"
  },
  {
    "path": "compose/tests/docker-compose.test.bioblend.yml",
    "chars": 447,
    "preview": "services:\n  galaxy-bioblend-test:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-b"
  },
  {
    "path": "compose/tests/docker-compose.test.selenium.yml",
    "chars": 500,
    "preview": "services:\n  galaxy-selenium-test:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-s"
  },
  {
    "path": "compose/tests/docker-compose.test.workflows.yml",
    "chars": 515,
    "preview": "services:\n  galaxy-workflow-test:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-w"
  },
  {
    "path": "compose/tests/docker-compose.test.yml",
    "chars": 531,
    "preview": "services:\n  galaxy-configurator:\n    environment:\n      - GALAXY_CONFIG_CLEANUP_JOB=never\n      - NGINX_PROXY_READ_TIMEO"
  },
  {
    "path": "compose/tests/galaxy-bioblend-test/Dockerfile",
    "chars": 566,
    "preview": "FROM alpine:3.17 as build\n\nENV BIOBLEND_VERSION=1.3.0\n\nADD \"https://github.com/galaxyproject/bioblend/archive/v$BIOBLEND"
  },
  {
    "path": "compose/tests/galaxy-bioblend-test/run.sh",
    "chars": 1303,
    "preview": "#!/bin/sh\n\necho \"Waiting for Galaxy...\"\nuntil [ \"$(curl -s -o /dev/null -w '%{http_code}' ${GALAXY_URL:-nginx}/api/users"
  },
  {
    "path": "compose/tests/galaxy-selenium-test/Dockerfile",
    "chars": 726,
    "preview": "FROM selenium/standalone-chrome:4.25.0\n\nARG GALAXY_RELEASE=release_24.1\nARG GALAXY_REPO=https://github.com/galaxyproject"
  },
  {
    "path": "compose/tests/galaxy-selenium-test/run.sh",
    "chars": 821,
    "preview": "#!/bin/bash\nset -e # Stop script, if a test fails\n\nsupervisord &\n\nsleep 5\n\necho \"Waiting for Galaxy...\"\nuntil [ \"$(curl "
  },
  {
    "path": "compose/tests/galaxy-workflow-test/Dockerfile",
    "chars": 563,
    "preview": "FROM alpine:3.17\n\nENV TEST_REPO=${TEST_REPO:-https://github.com/jyotipm29/workflow-testing} \\\n    TEST_RELEASE=${TEST_RE"
  },
  {
    "path": "compose/tests/galaxy-workflow-test/run.sh",
    "chars": 725,
    "preview": "#!/bin/bash\nset -e # Stop script, if a test fails\n\necho \"Waiting for Galaxy...\"\nuntil [ \"$(curl -s -o /dev/null -w '%{ht"
  },
  {
    "path": "cvmfs/Dockerfile",
    "chars": 726,
    "preview": "FROM ubuntu:24.04\n\nENV DEBIAN_FRONTEND=noninteractive\n\nRUN apt-get update \\\n    && apt-get install -y --no-install-recom"
  },
  {
    "path": "cvmfs/README.md",
    "chars": 1566,
    "preview": "# CVMFS sidecar for Galaxy\n\nThis container provides a full CVMFS client (no cvmfsexec) and is intended to be used as an "
  },
  {
    "path": "cvmfs/ansible/playbook.yml",
    "chars": 344,
    "preview": "---\n- hosts: localhost\n  connection: local\n  gather_facts: true\n  vars:\n    cvmfs_role: client\n    galaxy_cvmfs_repos_en"
  },
  {
    "path": "cvmfs/ansible/requirements.yml",
    "chars": 115,
    "preview": "---\nroles:\n  - name: galaxyproject.cvmfs\n    src: https://github.com/galaxyproject/ansible-cvmfs\n    version: main\n"
  },
  {
    "path": "cvmfs/docker-entrypoint.sh",
    "chars": 653,
    "preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nrepos=\"${CVMFS_REPOSITORIES:-data.galaxyproject.org singularity.galaxyproject.org"
  },
  {
    "path": "docs/README.md",
    "chars": 803,
    "preview": "Documentation\n=============\n\nThe documentation is automatically generated when the main [`README.md`](https://github.com"
  },
  {
    "path": "docs/Running_jobs_outside_of_the_container.md",
    "chars": 7429,
    "preview": "Using an external Slurm cluster\n-------------------------------\n\nIt is often convenient to configure Galaxy to use a hig"
  },
  {
    "path": "docs/css/landing_page.css",
    "chars": 9163,
    "preview": "@font-face {\n  font-family: 'Noto Sans';\n  font-weight: 400;\n  font-style: normal;\n  src: url('../fonts/Noto-Sans-regula"
  },
  {
    "path": "docs/js/landing_page.js",
    "chars": 536,
    "preview": "var metas = document.getElementsByTagName('meta');\nvar i;\nif (navigator.userAgent.match(/iPhone/i)) {\n  for (i=0; i<meta"
  },
  {
    "path": "docs/src/generate_docs.py",
    "chars": 6384,
    "preview": "import os\n\nimport pycmarkgfm\nfrom bs4 import BeautifulSoup\n\n\ndef extract_html_structure(html_content):\n    html_structur"
  },
  {
    "path": "docs/src/requirements.txt",
    "chars": 25,
    "preview": "pycmarkgfm\nbeautifulsoup4"
  },
  {
    "path": "galaxy/Dockerfile",
    "chars": 20670,
    "preview": "# Galaxy - Stable\n#\n# VERSION       Galaxy in Docker\n\n# TODO\n#\n# * README: only Docker next to Docker is supported\n# * N"
  },
  {
    "path": "galaxy/ansible/condor.yml",
    "chars": 3671,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    htcondor_version: 25.x\n    htcondor_keyring_path:"
  },
  {
    "path": "galaxy/ansible/cvmfs_client.yml",
    "chars": 2781,
    "preview": "# Setup of the CernVM-File system (CVMFS) and configure so that the reference\n# data hosted by Galaxy on usegalaxy.org i"
  },
  {
    "path": "galaxy/ansible/docker.yml",
    "chars": 397,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    docker_install_compose: false\n    docker_install_"
  },
  {
    "path": "galaxy/ansible/files/413.html",
    "chars": 4113,
    "preview": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n    <!-- Simple HttpErrorPages | MIT License | https://github.com/HttpErrorPages"
  },
  {
    "path": "galaxy/ansible/files/500.html",
    "chars": 4101,
    "preview": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n    <!-- Simple HttpErrorPages | MIT License | https://github.com/HttpErrorPages"
  },
  {
    "path": "galaxy/ansible/files/502.html",
    "chars": 4216,
    "preview": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n    <!-- Simple HttpErrorPages | MIT License | https://github.com/HttpErrorPages"
  },
  {
    "path": "galaxy/ansible/files/nginx_sample.crt",
    "chars": 1769,
    "preview": "-----BEGIN CERTIFICATE-----\nMIIE7TCCAtWgAwIBAgIUHBIplAOVmxyIRH51KvXuSWydCj8wDQYJKoZIhvcNAQEL\nBQAwFDESMBAGA1UEAwwJbG9jYWx"
  },
  {
    "path": "galaxy/ansible/files/nginx_sample.key",
    "chars": 3243,
    "preview": "-----BEGIN RSA PRIVATE KEY-----\nMIIJKQIBAAKCAgEA1kSpfexOnDQvNDwSg/4Cjv13+41VF2RgJdpk0n1iBz92GKEl\n7SEh+nhUFinn+CKv2EaNQ7N"
  },
  {
    "path": "galaxy/ansible/files/production_b2drop.yml",
    "chars": 850,
    "preview": "- id: b2drop\n  version: 0\n  name: B2DROP\n  description: |\n    B2DROP is a Nextcloud to sync and share your research data"
  },
  {
    "path": "galaxy/ansible/flower.yml",
    "chars": 427,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    flower_python_package_version: 1.2.0\n    flower_c"
  },
  {
    "path": "galaxy/ansible/galaxy_file_source_templates.yml",
    "chars": 995,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: Install fs.webdavfs for Galaxy's file so"
  },
  {
    "path": "galaxy/ansible/galaxy_job_conf.yml",
    "chars": 933,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: \"Ensure dynamic handler assignment metho"
  },
  {
    "path": "galaxy/ansible/galaxy_job_metrics.yml",
    "chars": 242,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: \"Setup job metrics\"\n      template: src="
  },
  {
    "path": "galaxy/ansible/galaxy_object_store_templates.yml",
    "chars": 308,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: \"Setup user configurable object store te"
  },
  {
    "path": "galaxy/ansible/galaxy_scripts.yml",
    "chars": 1032,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: \"Install galaxy user creation script.\"\n "
  },
  {
    "path": "galaxy/ansible/galaxy_vault_config.yml",
    "chars": 490,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n# You should change this key in production. You can generate "
  },
  {
    "path": "galaxy/ansible/gravity.yml",
    "chars": 1049,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: \"Install gravity for galaxy\"\n      pip: "
  },
  {
    "path": "galaxy/ansible/group_vars/all.yml",
    "chars": 13777,
    "preview": "use_pbkdf2: true\npostgresql_version: 15\ngalaxy_apt_package_state: present\n\n# The storage backend to use for docker-in-do"
  },
  {
    "path": "galaxy/ansible/k8s.yml",
    "chars": 1062,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: \"Install secure urllib3 for galaxy - bet"
  },
  {
    "path": "galaxy/ansible/nginx.yml",
    "chars": 3644,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    # Default container config: avoid DH param genera"
  },
  {
    "path": "galaxy/ansible/pbs.yml",
    "chars": 911,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: Install PBS/torque system packages\n     "
  },
  {
    "path": "galaxy/ansible/postgresql.yml",
    "chars": 502,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    postgresql_backup_local_dir: /export/postgresql_b"
  },
  {
    "path": "galaxy/ansible/proftpd.yml",
    "chars": 3199,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    proftpd_galaxy_auth: yes\n    galaxy_user:\n      n"
  },
  {
    "path": "galaxy/ansible/provision.yml",
    "chars": 1935,
    "preview": "---\n- import_playbook: gravity.yml\n  when: galaxy_gravity | bool\n  tags: galaxy_gravity\n\n- import_playbook: postgresql.y"
  },
  {
    "path": "galaxy/ansible/rabbitmq.yml",
    "chars": 2695,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    rabbitmq_keyring_path: /usr/share/keyrings/com.ra"
  },
  {
    "path": "galaxy/ansible/redis.yml",
    "chars": 711,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  roles:\n    - role: geerlingguy.redis\n  tasks:\n    - name: S"
  },
  {
    "path": "galaxy/ansible/requirements.yml",
    "chars": 919,
    "preview": "---\nroles:\n  - name: galaxyproject.postgresql\n    version: 1.1.8\n  - name: geerlingguy.docker\n    version: 7.9.0\n  - nam"
  },
  {
    "path": "galaxy/ansible/slurm.yml",
    "chars": 1786,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    slurm_roles: ['controller', 'exec']\n    slurm_con"
  },
  {
    "path": "galaxy/ansible/supervisor.yml",
    "chars": 1974,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: Install supervisor package\n      apt: \n "
  },
  {
    "path": "galaxy/ansible/templates/add_tool_shed.py.j2",
    "chars": 845,
    "preview": "#!/usr/bin/env python\n\nimport os\nimport argparse\nimport xml.etree.ElementTree as ET\n\nTOOL_SHEDS_XML = os.path.join(os.en"
  },
  {
    "path": "galaxy/ansible/templates/cgroupfs_mount.sh.j2",
    "chars": 2995,
    "preview": "#!/bin/sh\nset -e\n\n# Get the latest version of this script from https://github.com/moby/moby/blob/65cfcc28ab37cb75e1560e4"
  },
  {
    "path": "galaxy/ansible/templates/check_database.py.j2",
    "chars": 707,
    "preview": "#!/usr/bin/env python\n\n# This script checks if the database is connected by querying an user\n\nimport sys\nsys.path.insert"
  },
  {
    "path": "galaxy/ansible/templates/configure_rabbitmq_users.yml.j2",
    "chars": 1166,
    "preview": "---\n- hosts: localhost\n  connection: local\n  become: yes\n  tasks:\n    - name: Delete 'guest' user\n      rabbitmq_user:\n "
  },
  {
    "path": "galaxy/ansible/templates/configure_slurm.py.j2",
    "chars": 5768,
    "preview": "from socket import gethostname\nfrom os import environ\nimport subprocess\nimport json\n\nCONFIG_FILE_PATH = \"/etc/slurm/slur"
  },
  {
    "path": "galaxy/ansible/templates/container_resolvers_conf.yml.j2",
    "chars": 558,
    "preview": "{% if container_resolution_explicit %}\n- type: explicit\n{% endif %}\n{% if container_resolution_cached_mulled %}\n- type: "
  },
  {
    "path": "galaxy/ansible/templates/create_galaxy_user.py.j2",
    "chars": 2437,
    "preview": "#!/usr/bin/env python\nimport sys\nsys.path.insert(1,'{{ galaxy_server_dir }}')\nsys.path.insert(1,'{{ galaxy_server_dir }}"
  },
  {
    "path": "galaxy/ansible/templates/export_user_files.py.j2",
    "chars": 10328,
    "preview": "#!/usr/bin/env python\nimport fnmatch\nimport glob\nimport sys\nimport os\nimport re\nimport hashlib\nimport shutil\nimport subp"
  },
  {
    "path": "galaxy/ansible/templates/file_source_templates.yml.j2",
    "chars": 937,
    "preview": "- include: \"{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_azure.yml\"\n- include: \"{{ galaxy_serv"
  },
  {
    "path": "galaxy/ansible/templates/gravity.yml.j2",
    "chars": 11619,
    "preview": "# Configuration for Gravity process manager.\ngravity:\n\n  # Process manager to use.\n  # ``supervisor`` is the default pro"
  },
  {
    "path": "galaxy/ansible/templates/job_conf.xml.j2",
    "chars": 7794,
    "preview": "<?xml version=\"1.0\"?>\n{% import \"macros.xml.j2\" as macros with context %}\n<job_conf>\n    <plugins workers=\"2\">\n{% if gal"
  },
  {
    "path": "galaxy/ansible/templates/job_metrics_conf.yml.j2",
    "chars": 545,
    "preview": "{% if galaxy_job_metrics_core %}\n- type: core\n{% endif %}\n{% if galaxy_job_metrics_cpuinfo and galaxy_job_metrics_cpuinf"
  },
  {
    "path": "galaxy/ansible/templates/macros.xml.j2",
    "chars": 3575,
    "preview": "{% macro destination(id, runner, container_type=None, force_container=False) -%}\n    <destination id=\"{{ id }}\" runner=\""
  },
  {
    "path": "galaxy/ansible/templates/nginx/delegated_uploads.conf.j2",
    "chars": 494,
    "preview": "# delegated uploads\nlocation {{ nginx_tusd_location }} {\n    # Disable request and response buffering\n    proxy_request_"
  },
  {
    "path": "galaxy/ansible/templates/nginx/flower_auth.conf.j2",
    "chars": 157,
    "preview": "# Authenticating with htpasswd file\n\nset $auth \"Flower is restricted. Please contact your administrator.\";\n\nauth_basic $"
  },
  {
    "path": "galaxy/ansible/templates/nginx/galaxy_common.conf.j2",
    "chars": 5318,
    "preview": "{% if nginx_use_passwords %}\n        auth_basic      \"devbox\";\n        auth_basic_user_file  /etc/nginx/htpasswd;\n{% end"
  },
  {
    "path": "galaxy/ansible/templates/nginx/galaxy_http.j2",
    "chars": 142,
    "preview": "server {\n        listen 80 default_server;\n        listen [::]:80 default_server;\n\n        include {{ nginx_conf_dir }}/"
  },
  {
    "path": "galaxy/ansible/templates/nginx/galaxy_https.j2",
    "chars": 152,
    "preview": "server {\n        listen 443 ssl default_server;\n        listen [::]:443 ssl default_server;\n\n        include {{ nginx_co"
  },
  {
    "path": "galaxy/ansible/templates/nginx/galaxy_redirect_ssl.j2",
    "chars": 228,
    "preview": "server {\n        listen 80 default_server;\n        listen [::]:80 default_server;\n\n        location /.well-known/ {\n\t   "
  },
  {
    "path": "galaxy/ansible/templates/nginx/htpasswd.j2",
    "chars": 51,
    "preview": "{% for p in nginx_htpasswds %}\n{{ p }}\n{% endfor %}"
  },
  {
    "path": "galaxy/ansible/templates/nginx/interactive_tools_common.conf.j2",
    "chars": 587,
    "preview": "# Match all requests for the interactive tools subdomain\nserver_name  *.interactivetool.{{ galaxy_domain }};\n\n# Log file"
  },
  {
    "path": "galaxy/ansible/templates/nginx/interactive_tools_http.j2",
    "chars": 111,
    "preview": "server {\n    listen 80;\n    listen [::]:80;\n\n    include {{ nginx_conf_dir }}/interactive_tools_common.conf;\n}\n"
  },
  {
    "path": "galaxy/ansible/templates/nginx/interactive_tools_https.j2",
    "chars": 121,
    "preview": "server {\n    listen 443 ssl;\n    listen [::]:443 ssl;\n\n    include {{ nginx_conf_dir }}/interactive_tools_common.conf;\n}"
  },
  {
    "path": "galaxy/ansible/templates/nginx/interactive_tools_redirect_ssl.j2",
    "chars": 97,
    "preview": "server {\n    listen 80;\n    listen [::]:80;\n    rewrite ^ https://$host$request_uri permanent;\n}\n"
  },
  {
    "path": "galaxy/ansible/templates/object_store_templates.yml.j2",
    "chars": 764,
    "preview": "# This is a catalog file for all the user object store templates\n- include: \"{{ galaxy_server_dir }}/lib/galaxy/objectst"
  },
  {
    "path": "galaxy/ansible/templates/rabbitmq.sh.j2",
    "chars": 777,
    "preview": "#!/bin/sh\n# call \"rabbitmqctl stop\" when exiting\n# taken from https://gist.github.com/caioariede/342a583f75467509ad42\nmk"
  },
  {
    "path": "galaxy/ansible/templates/startup_lite.sh.j2",
    "chars": 1245,
    "preview": "#!/bin/bash\n\ncd $GALAXY_ROOT_DIR\n\nexport GALAXY_CONFIG_STATIC_ENABLED=True\nexport GALAXY_CONFIG_ALLOW_PATH_PASTE=True\nun"
  },
  {
    "path": "galaxy/ansible/templates/supervisor.conf.j2",
    "chars": 5916,
    "preview": "[supervisord]\nnodaemon=false\n\n{% if supervisor_webserver %}\n[inet_http_server]\nport={{ supervisor_webserver_port }}\n{% i"
  },
  {
    "path": "galaxy/ansible/templates/update_yaml_value.py.j2",
    "chars": 1712,
    "preview": "import sys\nimport yaml\nimport argparse\n\ndef modify_yaml(file_path, key_path, new_value):\n    # Load the YAML file\n    wi"
  },
  {
    "path": "galaxy/ansible/templates/vault_conf.yml.j2",
    "chars": 327,
    "preview": "type: database\npath_prefix: /galaxy\n# Encryption keys must be valid fernet keys\n# To generate a valid key:\n#\n# Use the a"
  },
  {
    "path": "galaxy/ansible/tusd.yml",
    "chars": 156,
    "preview": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    tusd_version: v2.5.0\n    tusd_systemd: false\n  ro"
  },
  {
    "path": "galaxy/bashrc",
    "chars": 3401,
    "preview": "# ~/.bashrc: executed by bash(1) for non-login shells.\n# see /usr/share/doc/bash/examples/startup-files (in the package "
  },
  {
    "path": "galaxy/cgroupfs_mount.sh",
    "chars": 1000,
    "preview": "#!/bin/bash\nset -e\n\n# DinD: a wrapper script which allows docker to be run inside a docker container.\n# Original version"
  },
  {
    "path": "galaxy/common_cleanup.sh",
    "chars": 644,
    "preview": "#!/bin/sh\n\nset -x\n\n# This usually drastically reduced the container size\n# at the cost of the startup time of your appli"
  },
  {
    "path": "galaxy/docker-compose.yaml",
    "chars": 1947,
    "preview": "# docker-compose wrapper for the single Galaxy container. This is useful for systems like EGI IM.\n# Start via `IMAGE_TAG"
  },
  {
    "path": "galaxy/install_tools_wrapper.sh",
    "chars": 7269,
    "preview": "#!/bin/bash\nset -euo pipefail\n\n# Basic defaults so set -u does not choke when running outside the normal entrypoint.\nGAL"
  },
  {
    "path": "galaxy/run.sh",
    "chars": 1535,
    "preview": "#!/bin/sh\n\n\n# Usage: ./run.sh <start|stop|restart>\n#\n#\n# Description: This script can be used to start or stop the galax"
  },
  {
    "path": "galaxy/sample_tool_list.yaml",
    "chars": 518,
    "preview": "# This is just a sample file. For a fully documented version of this file, see\n# https://github.com/galaxyproject/ansibl"
  },
  {
    "path": "galaxy/setup_postgresql.py",
    "chars": 3481,
    "preview": "import os\nimport shutil\nimport argparse\nimport subprocess\n\n\ndef pg_ctl(database_path, database_version, mod='start'):\n  "
  },
  {
    "path": "galaxy/startup.sh",
    "chars": 27396,
    "preview": "#!/usr/bin/env bash\n\n# This is needed for Docker compose to have a unified alias for the main container.\n# Modifying /et"
  },
  {
    "path": "galaxy/startup2.sh",
    "chars": 35332,
    "preview": "#!/usr/bin/env bash\n\nSTARTUP_LOG_DIR=\"${STARTUP_LOG_DIR:-${GALAXY_LOGS_DIR:-/home/galaxy/logs}}\"\nSTARTUP_LOG=\"${STARTUP_"
  },
  {
    "path": "galaxy/tool_conf_interactive.xml.sample",
    "chars": 1050,
    "preview": "<?xml version='1.0' encoding='utf-8'?>\n<toolbox monitor=\"true\">\n  <section id=\"interactivetools\" name=\"Interactive Tools"
  },
  {
    "path": "galaxy/tool_sheds_conf.xml",
    "chars": 313,
    "preview": "<?xml version=\"1.0\"?>\n<!-- This file is only used if the container is started with -e ENABLE_TTS_INSTALL=True -->\n<tool_"
  },
  {
    "path": "galaxy/welcome.html",
    "chars": 2520,
    "preview": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n    <meta charset=\"utf-8\">\n    <link rel=\"stylesheet\" href=\"/static/dist/base.cs"
  },
  {
    "path": "skills/galaxy-docker/SKILL.md",
    "chars": 3317,
    "preview": "---\nname: galaxy-docker\ndescription: Maintain and upgrade the bgruening/docker-galaxy project: bump Galaxy/Ubuntu versio"
  },
  {
    "path": "skills/galaxy-docker/references/upgrade-25.1.md",
    "chars": 4811,
    "preview": "# 25.1 upgrade reference (docker-galaxy)\n\nThis reference captures the key decisions, pins, and fixes applied during the "
  },
  {
    "path": "test/bioblend/Dockerfile",
    "chars": 1037,
    "preview": "FROM alpine:3.17 as build\n\nENV BIOBLEND_VERSION=1.7.0 \\\n    TOX_ENV=py310 \\\n    BIOBLEND_GALAXY_API_KEY=fakekey \\\n    BI"
  },
  {
    "path": "test/bioblend/test.sh",
    "chars": 281,
    "preview": "#!/bin/bash\nif ! docker build -t bioblend_test .; then\n    echo \"Bioblend docker image build failed.\"\n    exit 1\nfi\n\nif "
  },
  {
    "path": "test/container_resolvers_conf.ci.yml",
    "chars": 182,
    "preview": "# Minimal container resolvers for CI to keep resolve_toolbox fast.\n- type: explicit\n- type: cached_mulled_singularity\n  "
  },
  {
    "path": "test/cvmfs/test.sh",
    "chars": 1589,
    "preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nif ! docker build -t galaxy:test ./galaxy; then\n    echo \"Galaxy docker image bui"
  },
  {
    "path": "test/gridengine/Dockerfile",
    "chars": 1472,
    "preview": "FROM ubuntu:22.04 AS sge_master\n\nENV DEBIAN_FRONTEND=noninteractive\n\nRUN apt-get update -qq \\\n    && apt-get install -y "
  },
  {
    "path": "test/gridengine/act_qmaster",
    "chars": 10,
    "preview": "sgemaster\n"
  },
  {
    "path": "test/gridengine/job_conf.xml.sge",
    "chars": 888,
    "preview": "<?xml version=\"1.0\"?>\n<job_conf>\n    <plugins workers=\"8\">\n        <plugin id=\"sge\" type=\"runner\" load=\"galaxy.jobs.runn"
  },
  {
    "path": "test/gridengine/master_script.sh",
    "chars": 120,
    "preview": "#!/bin/bash\nuseradd -u 1450 -m galaxy\n/usr/local/bin/setup_gridengine.sh\ntail -f /var/spool/gridengine/qmaster/messages\n"
  },
  {
    "path": "test/gridengine/outputhostname/outputhostname.xml",
    "chars": 321,
    "preview": "<tool id=\"outputhostname\" name=\"Output Hostname\" version=\"0.0.1\">\n    <description>data in ascending or descending order"
  },
  {
    "path": "test/gridengine/outputhostname.tool.xml",
    "chars": 199,
    "preview": "<?xml version=\"1.0\"?>\n<toolbox tool_path=\"${tool_conf_dir}\" is_shed_conf=\"false\">\n  <section id=\"outputhostname\" name=\"O"
  },
  {
    "path": "test/gridengine/setup_gridengine.sh",
    "chars": 2198,
    "preview": "#!/bin/bash\n\n# hostname > /var/lib/gridengine/default/common/act_qmaster\n/etc/init.d/gridengine-master start\n/etc/init.d"
  },
  {
    "path": "test/gridengine/setup_tool.sh",
    "chars": 141,
    "preview": "#!/bin/bash\n# cp tool_conf.xml config\nexport GALAXY_CONFIG_TOOL_CONFIG_FILE=/galaxy/tool_conf.xml\n/usr/bin/startup\ntailf"
  },
  {
    "path": "test/gridengine/test.sh",
    "chars": 3153,
    "preview": "#!/usr/bin/env bash\n\necho \"Test that jobs run successfully on an external gridengine cluster\"\n\ndocker build --target sge"
  },
  {
    "path": "test/gridengine/test_outputhostname.py",
    "chars": 834,
    "preview": "#!/usr/bin/python\nimport time\n\nfrom bioblend.galaxy import GalaxyInstance\ngi = GalaxyInstance('http://galaxytest', key='"
  },
  {
    "path": "test/gridengine/tool_conf.xml",
    "chars": 191,
    "preview": "<?xml version='1.0' encoding='utf-8'?>\n<toolbox monitor=\"true\">\n  <section id=\"testtool\" name=\"Output Hostname\">\n    <to"
  },
  {
    "path": "test/slurm/Dockerfile",
    "chars": 1767,
    "preview": "FROM ubuntu:24.04\n\nENV DEBIAN_FRONTEND=noninteractive\nENV UV_INSTALL_DIR=/usr/local/bin\n\nRUN apt-get update -qq && apt-g"
  },
  {
    "path": "test/slurm/configure_slurm.py",
    "chars": 5510,
    "preview": "from socket import gethostname\nfrom string import Template\nfrom os import environ\nimport subprocess\nimport json\n\n\nSLURM_"
  },
  {
    "path": "test/slurm/job_conf.xml",
    "chars": 2269,
    "preview": "<?xml version=\"1.0\"?>\n<job_conf>\n    <plugins workers=\"2\">\n        <plugin id=\"slurm\" type=\"runner\" load=\"galaxy.jobs.ru"
  },
  {
    "path": "test/slurm/munge.conf",
    "chars": 404,
    "preview": "###############################################################################\n# $Id: munge.sysconfig 507 2006-05-11 20"
  },
  {
    "path": "test/slurm/startup.sh",
    "chars": 1476,
    "preview": "#!/usr/bin/env bash\n\n# Setup the galaxy user UID/GID and pass control on to supervisor\nif id \"$SLURM_USER_NAME\" >/dev/nu"
  },
  {
    "path": "test/slurm/supervisor_slurm.conf",
    "chars": 509,
    "preview": "[program:munge]\nuser=root\ncommand=/usr/sbin/munged --key-file=%(ENV_MUNGE_KEY_PATH)s -F --force\n\n[program:slurmctld]\nuse"
  },
  {
    "path": "test/slurm/test.sh",
    "chars": 2983,
    "preview": "#!/usr/bin/env bash\n\nset -euo pipefail\nset -x\n# Test that jobs run successfully on an external slurm cluster\n\n# We use a"
  }
]

About this extraction

This page contains the full source code of the bgruening/docker-galaxy-stable GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 188 files (506.3 KB), approximately 147.1k tokens, and a symbol index with 13 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!