Repository: bgruening/docker-galaxy-stable
Branch: main
Commit: 5488282c3e3e
Files: 188
Total size: 506.3 KB
Directory structure:
gitextract__x213t1e/
├── .dive-ci
├── .editorconfig
├── .github/
│ └── workflows/
│ ├── compose.yml
│ ├── cvmfs.yml
│ ├── lint.yml
│ ├── pull-request.yml
│ ├── release.yml
│ ├── single.sh
│ ├── single_container.yml
│ └── update-site.yml
├── .gitignore
├── .travis.yml
├── Changelog.md
├── LICENSE
├── README.md
├── compose/
│ ├── README.md
│ ├── base-images/
│ │ ├── galaxy-cluster-base/
│ │ │ ├── Dockerfile
│ │ │ └── files/
│ │ │ ├── common_cleanup.sh
│ │ │ └── cvmfs/
│ │ │ ├── default.local
│ │ │ ├── domain.d/
│ │ │ │ └── galaxyproject.org.conf
│ │ │ └── keys/
│ │ │ └── galaxyproject.org/
│ │ │ ├── data.galaxyproject.org.pub
│ │ │ └── singularity.galaxyproject.org.pub
│ │ └── galaxy-container-base/
│ │ ├── Dockerfile
│ │ └── files/
│ │ └── common_cleanup.sh
│ ├── base_config.yml
│ ├── docker-compose.htcondor.yml
│ ├── docker-compose.k8s.yml
│ ├── docker-compose.pulsar.mq.yml
│ ├── docker-compose.pulsar.yml
│ ├── docker-compose.singularity.yml
│ ├── docker-compose.slurm.yml
│ ├── docker-compose.yml
│ ├── galaxy-configurator/
│ │ ├── Dockerfile
│ │ ├── customize.py
│ │ ├── run.sh
│ │ └── templates/
│ │ ├── galaxy/
│ │ │ ├── GALAXY_PROXY_PREFIX.txt.j2
│ │ │ ├── container_resolvers_conf.yml.j2
│ │ │ ├── dependency_resolvers_conf.xml.j2
│ │ │ ├── galaxy.yml.j2
│ │ │ ├── job_conf.xml.j2
│ │ │ └── job_metrics.xml.j2
│ │ ├── htcondor/
│ │ │ ├── executor.conf.j2
│ │ │ ├── galaxy.conf.j2
│ │ │ └── master.conf.j2
│ │ ├── kind/
│ │ │ ├── k8s_config/
│ │ │ │ ├── persistent_volumes.yml.j2
│ │ │ │ └── pv_claims.yml.j2
│ │ │ └── kind_config.yml.j2
│ │ ├── nginx/
│ │ │ └── nginx.conf.j2
│ │ ├── pulsar/
│ │ │ ├── app.yml.j2
│ │ │ └── server.ini.j2
│ │ └── slurm/
│ │ └── slurm.conf.j2
│ ├── galaxy-htcondor/
│ │ ├── Dockerfile
│ │ ├── start.sh
│ │ └── supervisord.conf
│ ├── galaxy-kind/
│ │ ├── Dockerfile
│ │ └── docker-entrypoint.sh
│ ├── galaxy-nginx/
│ │ ├── Dockerfile
│ │ └── start.sh
│ ├── galaxy-server/
│ │ ├── Dockerfile
│ │ └── files/
│ │ ├── common_cleanup.sh
│ │ ├── create_galaxy_user.py
│ │ └── start.sh
│ ├── galaxy-slurm/
│ │ ├── Dockerfile
│ │ └── start.sh
│ ├── galaxy-slurm-node-discovery/
│ │ ├── Dockerfile
│ │ └── run.sh
│ ├── pulsar/
│ │ ├── Dockerfile
│ │ ├── docker-entrypoint.sh
│ │ └── files/
│ │ └── common_cleanup.sh
│ └── tests/
│ ├── docker-compose.test.bioblend.yml
│ ├── docker-compose.test.selenium.yml
│ ├── docker-compose.test.workflows.yml
│ ├── docker-compose.test.yml
│ ├── galaxy-bioblend-test/
│ │ ├── Dockerfile
│ │ └── run.sh
│ ├── galaxy-selenium-test/
│ │ ├── Dockerfile
│ │ └── run.sh
│ └── galaxy-workflow-test/
│ ├── Dockerfile
│ └── run.sh
├── cvmfs/
│ ├── Dockerfile
│ ├── README.md
│ ├── ansible/
│ │ ├── playbook.yml
│ │ └── requirements.yml
│ └── docker-entrypoint.sh
├── docs/
│ ├── README.md
│ ├── Running_jobs_outside_of_the_container.md
│ ├── css/
│ │ └── landing_page.css
│ ├── js/
│ │ └── landing_page.js
│ └── src/
│ ├── generate_docs.py
│ └── requirements.txt
├── galaxy/
│ ├── Dockerfile
│ ├── ansible/
│ │ ├── condor.yml
│ │ ├── cvmfs_client.yml
│ │ ├── docker.yml
│ │ ├── files/
│ │ │ ├── 413.html
│ │ │ ├── 500.html
│ │ │ ├── 502.html
│ │ │ ├── nginx_sample.crt
│ │ │ ├── nginx_sample.key
│ │ │ └── production_b2drop.yml
│ │ ├── flower.yml
│ │ ├── galaxy_file_source_templates.yml
│ │ ├── galaxy_job_conf.yml
│ │ ├── galaxy_job_metrics.yml
│ │ ├── galaxy_object_store_templates.yml
│ │ ├── galaxy_scripts.yml
│ │ ├── galaxy_vault_config.yml
│ │ ├── gravity.yml
│ │ ├── group_vars/
│ │ │ └── all.yml
│ │ ├── k8s.yml
│ │ ├── nginx.yml
│ │ ├── pbs.yml
│ │ ├── postgresql.yml
│ │ ├── proftpd.yml
│ │ ├── provision.yml
│ │ ├── rabbitmq.yml
│ │ ├── redis.yml
│ │ ├── requirements.yml
│ │ ├── slurm.yml
│ │ ├── supervisor.yml
│ │ ├── templates/
│ │ │ ├── add_tool_shed.py.j2
│ │ │ ├── cgroupfs_mount.sh.j2
│ │ │ ├── check_database.py.j2
│ │ │ ├── configure_rabbitmq_users.yml.j2
│ │ │ ├── configure_slurm.py.j2
│ │ │ ├── container_resolvers_conf.yml.j2
│ │ │ ├── create_galaxy_user.py.j2
│ │ │ ├── export_user_files.py.j2
│ │ │ ├── file_source_templates.yml.j2
│ │ │ ├── gravity.yml.j2
│ │ │ ├── job_conf.xml.j2
│ │ │ ├── job_metrics_conf.yml.j2
│ │ │ ├── macros.xml.j2
│ │ │ ├── nginx/
│ │ │ │ ├── delegated_uploads.conf.j2
│ │ │ │ ├── flower_auth.conf.j2
│ │ │ │ ├── galaxy_common.conf.j2
│ │ │ │ ├── galaxy_http.j2
│ │ │ │ ├── galaxy_https.j2
│ │ │ │ ├── galaxy_redirect_ssl.j2
│ │ │ │ ├── htpasswd.j2
│ │ │ │ ├── interactive_tools_common.conf.j2
│ │ │ │ ├── interactive_tools_http.j2
│ │ │ │ ├── interactive_tools_https.j2
│ │ │ │ └── interactive_tools_redirect_ssl.j2
│ │ │ ├── object_store_templates.yml.j2
│ │ │ ├── rabbitmq.sh.j2
│ │ │ ├── startup_lite.sh.j2
│ │ │ ├── supervisor.conf.j2
│ │ │ ├── update_yaml_value.py.j2
│ │ │ └── vault_conf.yml.j2
│ │ └── tusd.yml
│ ├── bashrc
│ ├── cgroupfs_mount.sh
│ ├── common_cleanup.sh
│ ├── docker-compose.yaml
│ ├── install_tools_wrapper.sh
│ ├── run.sh
│ ├── sample_tool_list.yaml
│ ├── setup_postgresql.py
│ ├── startup.sh
│ ├── startup2.sh
│ ├── tool_conf_interactive.xml.sample
│ ├── tool_sheds_conf.xml
│ └── welcome.html
├── skills/
│ └── galaxy-docker/
│ ├── SKILL.md
│ └── references/
│ └── upgrade-25.1.md
└── test/
├── bioblend/
│ ├── Dockerfile
│ └── test.sh
├── container_resolvers_conf.ci.yml
├── cvmfs/
│ └── test.sh
├── gridengine/
│ ├── Dockerfile
│ ├── act_qmaster
│ ├── job_conf.xml.sge
│ ├── master_script.sh
│ ├── outputhostname/
│ │ └── outputhostname.xml
│ ├── outputhostname.tool.xml
│ ├── setup_gridengine.sh
│ ├── setup_tool.sh
│ ├── test.sh
│ ├── test_outputhostname.py
│ └── tool_conf.xml
└── slurm/
├── Dockerfile
├── configure_slurm.py
├── job_conf.xml
├── munge.conf
├── startup.sh
├── supervisor_slurm.conf
└── test.sh
================================================
FILE CONTENTS
================================================
================================================
FILE: .dive-ci
================================================
rules:
# If the efficiency is measured below X%, mark as failed.
# Expressed as a ratio between 0-1.
lowestEfficiency: 0.95
# If the amount of wasted space is at least X or larger than X, mark as failed.
# Expressed in B, KB, MB, and GB.
# highestWastedBytes: 20MB
# If the amount of wasted space makes up for X% or more of the image, mark as failed.
# Note: the base image layer is NOT included in the total image size.
# Expressed as a ratio between 0-1; fails if the threshold is met or crossed.
highestUserWastedPercent: 0.10
================================================
FILE: .editorconfig
================================================
root = true
[*]
indent_style = space
indent_size = 2
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
[*.py]
indent_size = 4
================================================
FILE: .github/workflows/compose.yml
================================================
name: build-and-test
on: [push]
jobs:
build_container_base:
if: false # Temporarily disable workflow
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Set image tag
id: image_tag
run: |
if [ "${GITHUB_REF#refs/heads/}" = "master" ]; then
echo "image_tag=latest" >> $GITHUB_OUTPUT;
else
echo "image_tag=${GITHUB_REF#refs/heads/}" >> $GITHUB_OUTPUT;
fi
- name: Docker Login
run: echo "${{ secrets.docker_registry_password }}" | docker login -u ${{ secrets.docker_registry_username }} --password-stdin ${{ secrets.docker_registry }}
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
with:
version: v0.17.1
- name: Run Buildx
env:
image_name: galaxy-container-base
run: |
for i in {1..4}; do
set +e
docker buildx build \
--output "type=image,name=${{ secrets.docker_registry }}/${{ secrets.docker_registry_username }}/$image_name:${{ steps.image_tag.outputs.image_tag }},push=true" \
--cache-from type=gha \
--cache-to type=gha,mode=max \
--build-arg IMAGE_TAG=${{ steps.image_tag.outputs.image_tag }} \
--build-arg DOCKER_REGISTRY=${{ secrets.docker_registry }} \
--build-arg DOCKER_REGISTRY_USERNAME=${{ secrets.docker_registry_username }} \
$image_name && break || echo "Fail.. Retrying"
done;
shell: bash
working-directory: ./compose/base-images
build_cluster_base:
needs: build_container_base
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Set image tag
id: image_tag
run: |
if [ "${GITHUB_REF#refs/heads/}" = "master" ]; then
echo "image_tag=latest" >> $GITHUB_OUTPUT;
else
echo "image_tag=${GITHUB_REF#refs/heads/}" >> $GITHUB_OUTPUT;
fi
- name: Docker Login
run: echo "${{ secrets.docker_registry_password }}" | docker login -u ${{ secrets.docker_registry_username }} --password-stdin ${{ secrets.docker_registry }}
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
with:
version: v0.17.1
- name: Run Buildx
env:
image_name: galaxy-cluster-base
run: |
for i in {1..4}; do
set +e
docker buildx build \
--output "type=image,name=${{ secrets.docker_registry }}/${{ secrets.docker_registry_username }}/$image_name:${{ steps.image_tag.outputs.image_tag }},push=true" \
--cache-from type=gha \
--cache-to type=gha,mode=max \
--build-arg IMAGE_TAG=${{ steps.image_tag.outputs.image_tag }} \
--build-arg DOCKER_REGISTRY=${{ secrets.docker_registry }} \
--build-arg DOCKER_REGISTRY_USERNAME=${{ secrets.docker_registry_username }} \
$image_name && break || echo "Fail.. Retrying"
done;
shell: bash
working-directory: ./compose/base-images
build:
needs: build_cluster_base
runs-on: ubuntu-latest
strategy:
matrix:
image:
- name: galaxy-server
- name: galaxy-nginx
- name: galaxy-htcondor
- name: galaxy-slurm
- name: galaxy-slurm-node-discovery
- name: galaxy-kind
- name: pulsar
- name: galaxy-configurator
- name: galaxy-bioblend-test
subdir: tests/
- name: galaxy-workflow-test
subdir: tests/
- name: galaxy-selenium-test
subdir: tests/
fail-fast: false
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Set image tag
id: image_tag
run: |
if [ "${GITHUB_REF#refs/heads/}" = "master" ]; then
echo "image_tag=latest" >> $GITHUB_OUTPUT;
else
echo "image_tag=${GITHUB_REF#refs/heads/}" >> $GITHUB_OUTPUT;
fi
- name: Docker Login
run: echo "${{ secrets.docker_registry_password }}" | docker login -u ${{ secrets.docker_registry_username }} --password-stdin ${{ secrets.docker_registry }}
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
with:
version: v0.17.1
- name: Run Buildx
run: |
for i in {1..4}; do
set +e
docker buildx build \
--output "type=image,name=${{ secrets.docker_registry }}/${{ secrets.docker_registry_username }}/${{ matrix.image.name }}:${{ steps.image_tag.outputs.image_tag }},push=true" \
--cache-from type=gha \
--cache-to type=gha,mode=max \
--build-arg IMAGE_TAG=${{ steps.image_tag.outputs.image_tag }} \
--build-arg DOCKER_REGISTRY=${{ secrets.docker_registry }} \
--build-arg DOCKER_REGISTRY_USERNAME=${{ secrets.docker_registry_username }} \
--build-arg GALAXY_REPO=https://github.com/galaxyproject/galaxy \
${{ matrix.image.subdir }}${{ matrix.image.name }} && break || echo "Fail.. Retrying"
done;
shell: bash
working-directory: ./compose
test:
needs: [build]
runs-on: ubuntu-latest
strategy:
matrix:
infrastructure:
- name: galaxy-base
files: -f docker-compose.yml
- name: galaxy-proxy-prefix
files: -f docker-compose.yml
env: GALAXY_PROXY_PREFIX=/arbitrary_Galaxy-prefix GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=http://localhost/arbitrary_Galaxy-prefix EXTRA_SKIP_TESTS_BIOBLEND="not test_import_export_workflow_dict and not test_import_export_workflow_from_local_path"
exclude_test:
- selenium
- name: galaxy-htcondor
files: -f docker-compose.yml -f docker-compose.htcondor.yml
- name: galaxy-slurm
files: -f docker-compose.yml -f docker-compose.slurm.yml
env: SLURM_NODE_COUNT=3
options: --scale slurm_node=3
- name: galaxy-pulsar
files: -f docker-compose.yml -f docker-compose.pulsar.yml
exclude_test:
- workflow_quality_control
env: EXTRA_SKIP_TESTS_BIOBLEND="not test_wait_for_job"
- name: galaxy-pulsar-mq
files: -f docker-compose.yml -f docker-compose.pulsar.yml -f docker-compose.pulsar.mq.yml
exclude_test:
- workflow_quality_control
env: EXTRA_SKIP_TESTS_BIOBLEND="not test_wait_for_job"
- name: galaxy-k8s
files: -f docker-compose.yml -f docker-compose.k8s.yml
- name: galaxy-singularity
files: -f docker-compose.yml -f docker-compose.singularity.yml
env: EXTRA_SKIP_TESTS_BIOBLEND="not test_get_container_resolvers and not test_show_container_resolver"
- name: galaxy-pulsar-mq-singularity
files: -f docker-compose.yml -f docker-compose.pulsar.yml -f docker-compose.pulsar.mq.yml -f docker-compose.singularity.yml
env: EXTRA_SKIP_TESTS_BIOBLEND="not test_wait_for_job and not test_get_container_resolvers and not test_show_container_resolver"
exclude_test:
- workflow_quality_control
- name: galaxy-slurm-singularity
files: -f docker-compose.yml -f docker-compose.slurm.yml -f docker-compose.singularity.yml
env: EXTRA_SKIP_TESTS_BIOBLEND="not test_get_container_resolvers and not test_show_container_resolver"
- name: galaxy-htcondor-singularity
files: -f docker-compose.yml -f docker-compose.htcondor.yml -f docker-compose.singularity.yml
env: EXTRA_SKIP_TESTS_BIOBLEND="not test_get_container_resolvers and not test_show_container_resolver"
test:
- name: bioblend
files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.bioblend.yml
exit-from: galaxy-bioblend-test
timeout: 60
second_run: "true"
- name: workflow_ard
files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml
exit-from: galaxy-workflow-test
workflow: sklearn/ard/ard.ga
timeout: 60
second_run: "true"
- name: workflow_quality_control
files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml
exit-from: galaxy-workflow-test
workflow: training/sequence-analysis/quality-control/quality_control.ga
timeout: 60
- name: workflow_example1
files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml
exit-from: galaxy-workflow-test
workflow: example1/wf3-shed-tools.ga
timeout: 60
- name: selenium
files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.selenium.yml
exit-from: galaxy-selenium-test
timeout: 60
fail-fast: false
steps:
# Self-made `exclude` as Github Actions currently does not support
# exclude/including of dicts in matrices
- name: Check if test should be run
id: run_check
if: contains(matrix.infrastructure.exclude_test, matrix.test.name) != true
run: echo "run=true" >> $GITHUB_OUTPUT
- name: Checkout
uses: actions/checkout@v6
- name: Set image tag in env
run: echo "IMAGE_TAG=${GITHUB_REF#refs/heads/}" >> $GITHUB_ENV
- name: Master branch - Set image to to 'latest'
if: github.ref == 'refs/heads/master'
run: echo "IMAGE_TAG=latest" >> $GITHUB_ENV
- name: Set WORKFLOWS env for worfklows-test
if: matrix.test.workflow
run: echo "WORKFLOWS=${{ matrix.test.workflow }}" >> $GITHUB_ENV
- name: Install Docker Compose
run: |
sudo apt-get update -qq && sudo apt-get install docker-compose -y
- name: Run tests for the first time
if: steps.run_check.outputs.run
run: |
export DOCKER_REGISTRY=${{ secrets.docker_registry }}
export DOCKER_REGISTRY_USERNAME=${{ secrets.docker_registry_username }}
export ${{ matrix.infrastructure.env }}
export TIMEOUT=${{ matrix.test.timeout }}
docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} config
env
for i in {1..4}; do
echo "Running test - try \#$i"
echo "Removing export directory if existent";
sudo rm -rf export
docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} pull
set +e
docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} up ${{ matrix.infrastructure.options }} --exit-code-from ${{ matrix.test.exit-from }}
test_exit_code=$?
error_exit_codes_count=$(expr $(docker ps -a --filter exited=1 | wc -l) - 1)
docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} down
if [ $error_exit_codes_count != 0 ] || [ $test_exit_code != 0 ] ; then
echo "Test failed..";
continue;
else
exit $test_exit_code;
fi
done;
exit 1
shell: bash
working-directory: ./compose
continue-on-error: false
- name: Fix file names before saving artifacts
if: failure()
run: |
sudo find ./compose/export/galaxy/database -depth -name '*:*' -execdir bash -c 'mv "$1" "${1//:/-}"' bash {} \;
- name: Allow upload-artifact read access
if: failure()
run: sudo chmod -R +r ./compose/export/galaxy/database
- name: Save artifacts for debugging a failed test
uses: actions/upload-artifact@v6
if: failure()
with:
name: ${{ matrix.infrastructure.name }}_${{ matrix.test.name }}_first-run
path: ./compose/export/galaxy/database
- name: Clean up after first run
if: matrix.test.second_run == 'true'
run: |
sudo rm -rf export/postgres
sudo rm -rf export/galaxy/database
working-directory: ./compose
- name: Run tests a second time
if: matrix.test.second_run == 'true' && steps.run_check.outputs.run
run: |
export DOCKER_REGISTRY=${{ secrets.docker_registry }}
export DOCKER_REGISTRY_USERNAME=${{ secrets.docker_registry_username }}
export ${{ matrix.infrastructure.env }}
export TIMEOUT=${{ matrix.test.timeout }}
for i in {1..4}; do
echo "Running test - try \#$i"
echo "Removing export directory if existent";
sudo rm -rf export
set +e
docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} up ${{ matrix.infrastructure.options }} --exit-code-from ${{ matrix.test.exit-from }}
test_exit_code=$?
error_exit_codes_count=$(expr $(docker ps -a --filter exited=1 | wc -l) - 1)
docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} down
if [ $error_exit_codes_count != 0 ] || [ $test_exit_code != 0 ] ; then
echo "Test failed..";
continue;
else
exit $test_exit_code;
fi
done;
exit 1
shell: bash
working-directory: ./compose
continue-on-error: false
- name: Fix file names before saving artifacts
if: failure() && matrix.test.second_run == 'true'
run: |
sudo find ./compose/export/galaxy/database -depth -name '*:*' -execdir bash -c 'mv "$1" "${1//:/-}"' bash {} \;
- name: Allow upload-artifact read access
if: failure() && matrix.test.second_run == 'true'
run: sudo chmod -R +r ./compose/export/galaxy/database
- name: Save artifacts for debugging a failed test
uses: actions/upload-artifact@v6
if: failure() && matrix.test.second_run == 'true'
with:
name: ${{ matrix.infrastructure.name }}_${{ matrix.test.name }}_second-run
path: ./compose/export/galaxy/database
================================================
FILE: .github/workflows/cvmfs.yml
================================================
name: cvmfs-sidecar
on:
push:
branches:
- '**'
tags:
- '*'
pull_request:
paths:
- 'cvmfs/**'
- 'test/cvmfs/**'
- '.github/workflows/cvmfs.yml'
jobs:
build_test_publish:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Detect CVMFS changes
id: changes
uses: dorny/paths-filter@v3
with:
filters: |
cvmfs:
- 'cvmfs/**'
- 'test/cvmfs/**'
- '.github/workflows/cvmfs.yml'
- name: Run CVMFS sidecar tests
if: github.event_name == 'pull_request' || steps.changes.outputs.cvmfs == 'true' || startsWith(github.ref, 'refs/tags/')
run: bash test/cvmfs/test.sh
- name: Set image version
id: version
if: github.event_name == 'push' && (steps.changes.outputs.cvmfs == 'true' || startsWith(github.ref, 'refs/tags/'))
run: |
set -euo pipefail
if [[ "${GITHUB_REF}" == refs/tags/* ]]; then
version="${GITHUB_REF_NAME}"
else
ref="${GITHUB_REF_NAME//\//-}"
version="${ref}-${GITHUB_SHA::7}"
fi
echo "version=$version" >> "$GITHUB_OUTPUT"
- name: Set up Docker Buildx
if: github.event_name == 'push' && (steps.changes.outputs.cvmfs == 'true' || startsWith(github.ref, 'refs/tags/'))
uses: docker/setup-buildx-action@v3
- name: Login to Quay IO
if: github.event_name == 'push' && (steps.changes.outputs.cvmfs == 'true' || startsWith(github.ref, 'refs/tags/'))
uses: docker/login-action@v3
with:
registry: quay.io
username: '$oauthtoken'
password: ${{ secrets.QUAY_OAUTH_TOKEN }}
- name: Build and push CVMFS image
if: github.event_name == 'push' && (steps.changes.outputs.cvmfs == 'true' || startsWith(github.ref, 'refs/tags/'))
uses: docker/build-push-action@v6
with:
context: "{{defaultContext}}:cvmfs"
push: true
tags: quay.io/bgruening/cvmfs:${{ steps.version.outputs.version }}
cache-from: type=gha
cache-to: type=gha,mode=max
================================================
FILE: .github/workflows/lint.yml
================================================
name: Lint
on: [push]
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
# - name: Cleanup to only use compose
# run: rm -R docs galaxy test
- name: Run shellcheck with reviewdog
uses: reviewdog/action-shellcheck@v1.27.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
reporter: github-check
level: warning
pattern: "*.sh"
- name: Run hadolint with reviewdog
uses: reviewdog/action-hadolint@v1.46.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
reporter: github-check
================================================
FILE: .github/workflows/pull-request.yml
================================================
name: pr-test
on: pull_request
jobs:
test:
if: false # Temporarily disable workflow
runs-on: ubuntu-22.04
strategy:
matrix:
infrastructure:
- name: galaxy-base
files: -f docker-compose.yml
- name: galaxy-proxy-prefix
files: -f docker-compose.yml
env: GALAXY_PROXY_PREFIX=/arbitrary_Galaxy-prefix GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=http://localhost/arbitrary_Galaxy-prefix EXTRA_SKIP_TESTS_BIOBLEND="not test_import_export_workflow_dict and not test_import_export_workflow_from_local_path"
exclude_test:
- selenium
- name: galaxy-htcondor
files: -f docker-compose.yml -f docker-compose.htcondor.yml
- name: galaxy-slurm
files: -f docker-compose.yml -f docker-compose.slurm.yml
env: SLURM_NODE_COUNT=3
options: --scale slurm_node=3
- name: galaxy-pulsar
files: -f docker-compose.yml -f docker-compose.pulsar.yml
env: EXTRA_SKIP_TESTS_BIOBLEND="not test_wait_for_job"
exclude_test:
- workflow_quality_control
- name: galaxy-pulsar-mq
files: -f docker-compose.yml -f docker-compose.pulsar.yml -f docker-compose.pulsar.mq.yml
env: EXTRA_SKIP_TESTS_BIOBLEND="not test_wait_for_job"
exclude_test:
- workflow_quality_control
- name: galaxy-k8s
files: -f docker-compose.yml -f docker-compose.k8s.yml
- name: galaxy-singularity
files: -f docker-compose.yml -f docker-compose.singularity.yml
env: EXTRA_SKIP_TESTS_BIOBLEND="not test_get_container_resolvers and not test_show_container_resolver"
- name: galaxy-pulsar-mq-singularity
files: -f docker-compose.yml -f docker-compose.pulsar.yml -f docker-compose.pulsar.mq.yml -f docker-compose.singularity.yml
env: EXTRA_SKIP_TESTS_BIOBLEND="not test_wait_for_job and not test_get_container_resolvers and not test_show_container_resolver"
exclude_test:
- workflow_quality_control
- name: galaxy-slurm-singularity
files: -f docker-compose.yml -f docker-compose.slurm.yml -f docker-compose.singularity.yml
env: EXTRA_SKIP_TESTS_BIOBLEND="not test_get_container_resolvers and not test_show_container_resolver"
- name: galaxy-htcondor-singularity
files: -f docker-compose.yml -f docker-compose.htcondor.yml -f docker-compose.singularity.yml
env: EXTRA_SKIP_TESTS_BIOBLEND="not test_get_container_resolvers and not test_show_container_resolver"
test:
- name: bioblend
files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.bioblend.yml
exit-from: galaxy-bioblend-test
timeout: 60
second_run: "true"
- name: workflow_ard
files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml
exit-from: galaxy-workflow-test
workflow: sklearn/ard/ard.ga
timeout: 60
second_run: "true"
- name: workflow_quality_control
files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml
exit-from: galaxy-workflow-test
workflow: training/sequence-analysis/quality-control/quality_control.ga
timeout: 60
- name: workflow_example1
files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml
exit-from: galaxy-workflow-test
workflow: example1/wf3-shed-tools.ga
timeout: 60
- name: selenium
files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.selenium.yml
exit-from: galaxy-selenium-test
timeout: 60
fail-fast: false
steps:
# Self-made `exclude` as Github Actions currently does not support
# exclude/including of dicts in matrices
- name: Check if test should be run
id: run_check
if: contains(matrix.infrastructure.exclude_test, matrix.test.name) != true
run: echo "run=true" >> $GITHUB_OUTPUT
- name: Checkout
uses: actions/checkout@v6
- name: Set WORKFLOWS env for worfklows-test
if: matrix.test.workflow
run: echo "WORKFLOWS=${{ matrix.test.workflow }}" >> $GITHUB_ENV
- name: Build galaxy-container-base
env:
image_name: galaxy-container-base
run: |
docker buildx build \
--output "type=image,name=quay.io/bgruening/$image_name:ci-testing" \
--cache-from type=gha \
--cache-to type=gha,mode=max \
--build-arg IMAGE_TAG=ci-testing \
$image_name
working-directory: ./compose/base-images
- name: Build galaxy-cluster-base
env:
image_name: galaxy-cluster-base
run: |
docker buildx build \
--output "type=image,name=quay.io/bgruening/$image_name:ci-testing" \
--cache-from type=gha \
--cache-to type=gha,mode=max \
--build-arg IMAGE_TAG=ci-testing \
$image_name
working-directory: ./compose/base-images
- name: Install Docker Compose
run: |
sudo apt-get update -qq && sudo apt-get install docker-compose -y
- name: Run tests for the first time
if: steps.run_check.outputs.run
run: |
export IMAGE_TAG=ci-testing
export COMPOSE_DOCKER_CLI_BUILD=1
export DOCKER_BUILDKIT=1
export ${{ matrix.infrastructure.env }}
export TIMEOUT=${{ matrix.test.timeout }}
docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} config
env
for i in {1..4}; do
echo "Running test - try \#$i"
echo "Removing export directory if existent";
sudo rm -rf export
set +e
docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} build --build-arg IMAGE_TAG=ci-testing --build-arg GALAXY_REPO=https://github.com/galaxyproject/galaxy
docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} up ${{ matrix.infrastructure.options }} --exit-code-from ${{ matrix.test.exit-from }}
test_exit_code=$?
error_exit_codes_count=$(expr $(docker ps -a --filter exited=1 | wc -l) - 1)
docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} down
if [ $error_exit_codes_count != 0 ] || [ $test_exit_code != 0 ] ; then
echo "Test failed..";
continue;
else
exit $test_exit_code;
fi
done;
exit 1
shell: bash
working-directory: ./compose
continue-on-error: false
- name: Fix file names before saving artifacts
if: failure()
run: |
sudo find ./compose/export/galaxy/database -depth -name '*:*' -execdir bash -c 'mv "$1" "${1//:/-}"' bash {} \;
- name: Allow upload-artifact read access
if: failure()
run: sudo chmod -R +r ./compose/export/galaxy/database
- name: Save artifacts for debugging a failed test
uses: actions/upload-artifact@v6
if: failure()
with:
name: ${{ matrix.infrastructure.name }}_${{ matrix.test.name }}_first-run
path: ./compose/export/galaxy/database
- name: Clean up after first run
if: matrix.test.second_run == 'true'
run: |
sudo rm -rf export/postgres
sudo rm -rf export/galaxy/database
working-directory: ./compose
- name: Run tests a second time
if: matrix.test.second_run == 'true' && steps.run_check.outputs.run
run: |
export IMAGE_TAG=ci-testing
export COMPOSE_DOCKER_CLI_BUILD=1
export DOCKER_BUILDKIT=1
export ${{ matrix.infrastructure.env }}
export TIMEOUT=${{ matrix.test.timeout }}
for i in {1..4}; do
echo "Running test - try \#$i"
echo "Removing export directory if existent";
sudo rm -rf export
set +e
docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} up ${{ matrix.infrastructure.options }} --exit-code-from ${{ matrix.test.exit-from }}
test_exit_code=$?
error_exit_codes_count=$(expr $(docker ps -a --filter exited=1 | wc -l) - 1)
if [ $error_exit_codes_count != 0 ] || [ $test_exit_code != 0 ] ; then
echo "Test failed..";
continue;
else
exit $test_exit_code;
fi
done;
exit 1
shell: bash
working-directory: ./compose
continue-on-error: false
- name: Fix file names before saving artifacts
if: failure() && matrix.test.second_run == 'true'
run: |
sudo find ./compose/export/galaxy/database -depth -name '*:*' -execdir bash -c 'mv "$1" "${1//:/-}"' bash {} \;
- name: Allow upload-artifact read access
if: failure() && matrix.test.second_run == 'true'
run: sudo chmod -R +r ./compose/export/galaxy/database
- name: Save artifacts for debugging a failed test
uses: actions/upload-artifact@v6
if: failure() && matrix.test.second_run == 'true'
with:
name: ${{ matrix.infrastructure.name }}_${{ matrix.test.name }}_second-run
path: ./compose/export/galaxy/database
================================================
FILE: .github/workflows/release.yml
================================================
name: release-CI
on:
release:
types: [published]
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
build_and_publish:
runs-on: ubuntu-latest
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v6
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Quay IO
uses: docker/login-action@v3
with:
registry: quay.io
username: '$oauthtoken'
password: ${{ secrets.QUAY_OAUTH_TOKEN }}
- name: Build docker image and push to quay.io
uses: docker/build-push-action@v6
with:
context: "{{defaultContext}}:galaxy"
push: true
tags: quay.io/bgruening/galaxy:${{ github.event.release.tag_name }}
cache-from: type=gha
cache-to: type=gha,mode=max
================================================
FILE: .github/workflows/single.sh
================================================
#!/bin/bash
set -ex
docker --version
docker info
export GALAXY_HOME=/home/galaxy
export GALAXY_USER=admin@example.org
export GALAXY_USER_EMAIL=admin@example.org
export GALAXY_USER_PASSWD=password
export BIOBLEND_GALAXY_API_KEY=fakekey
export BIOBLEND_GALAXY_URL=http://localhost:8080
export EPHEMERIS_IMAGE=${EPHEMERIS_IMAGE:-quay.io/biocontainers/ephemeris:0.10.11--pyhdfd78af_0}
export GALAXY_WAIT_TIMEOUT=${GALAXY_WAIT_TIMEOUT:-600}
SKIP_SFTP=false
SKIP_DIVE=false
if [[ "${CI:-}" == "true" ]]; then
sudo apt-get update -qq
#sudo apt-get install docker-ce --no-install-recommends -y -o Dpkg::Options::="--force-confmiss" -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confnew"
sudo apt-get install sshpass --no-install-recommends -y
else
if ! command -v sshpass >/dev/null 2>&1; then
echo "sshpass not found; skipping SFTP test."
SKIP_SFTP=true
fi
fi
if [[ "${CI:-}" == "true" ]]; then
DIVE_VERSION=$(curl -sL "https://api.github.com/repos/wagoodman/dive/releases/latest" | grep '"tag_name":' | sed -E 's/.*"v([^"]+)".*/\1/')
curl -OL https://github.com/wagoodman/dive/releases/download/v${DIVE_VERSION}/dive_${DIVE_VERSION}_linux_amd64.deb
sudo apt install ./dive_${DIVE_VERSION}_linux_amd64.deb
rm ./dive_${DIVE_VERSION}_linux_amd64.deb
else
if ! command -v dive >/dev/null 2>&1; then
echo "dive not found; skipping image analysis."
SKIP_DIVE=true
fi
fi
galaxy_wait() {
docker run --rm --link galaxy:galaxy \
"${EPHEMERIS_IMAGE}" galaxy-wait -g http://galaxy --timeout "${1:-$GALAXY_WAIT_TIMEOUT}"
}
# start building this repo
if [[ "${CI:-}" == "true" ]]; then
sudo chown 1450 /tmp && sudo chmod a=rwx /tmp
fi
## define a container size check function, first parameter is the container name, second the max allowed size in MB
container_size_check () {
# check that the image size is not growing too much between releases
# the 19.05 monolithic image was around 1.500 MB
size="${docker image inspect $1 --format='{{.Size}}'}"
size_in_mb=$(($size/(1024*1024)))
if [[ $size_in_mb -ge $2 ]]
then
echo "The new compiled image ($1) is larger than allowed. $size_in_mb vs. $2"
sleep 2
#exit
fi
}
export WORKING_DIR=${GITHUB_WORKSPACE:-$PWD}
export DOCKER_RUN_CONTAINER="quay.io/bgruening/galaxy"
SAMPLE_TOOLS=$GALAXY_HOME/ephemeris/sample_tool_list.yaml
GALAXY_EXTRA_MOUNTS=()
if [ -f "$WORKING_DIR/test/container_resolvers_conf.ci.yml" ]; then
GALAXY_EXTRA_MOUNTS+=(-v "$WORKING_DIR/test/container_resolvers_conf.ci.yml:/etc/galaxy/container_resolvers_conf.yml:ro")
fi
cd "$WORKING_DIR"
docker buildx build \
--load \
--cache-from type=gha \
--cache-to type=gha,mode=max \
-t quay.io/bgruening/galaxy \
galaxy/
#container_size_check quay.io/bgruening/galaxy 1500
docker rm -f galaxy httpstest || true
mkdir -p local_folder
docker run -d -p 8080:80 -p 8021:21 -p 8022:22 \
--name galaxy \
--privileged=true \
-v "$(pwd)/local_folder:/export/" \
"${GALAXY_EXTRA_MOUNTS[@]}" \
-e GALAXY_CONFIG_ALLOW_USER_DATASET_PURGE=True \
-e GALAXY_CONFIG_ALLOW_PATH_PASTE=True \
-e GALAXY_CONFIG_ALLOW_USER_DELETION=True \
-e GALAXY_CONFIG_ENABLE_BETA_WORKFLOW_MODULES=True \
-v /tmp/:/tmp/ \
quay.io/bgruening/galaxy
sleep 30
docker logs galaxy
# Define start functions
docker_exec() {
cd "$WORKING_DIR"
docker exec galaxy "$@"
}
docker_exec_run() {
cd "$WORKING_DIR"
docker run quay.io/bgruening/galaxy "$@"
}
docker_run() {
cd "$WORKING_DIR"
docker run "$@"
}
docker ps
# Test submitting jobs to an external slurm cluster
cd "${WORKING_DIR}/test/slurm/" && bash test.sh && cd "$WORKING_DIR"
# Test submitting jobs to an external gridengine cluster
cd $WORKING_DIR/test/gridengine/ && bash test.sh || exit 1 && cd $WORKING_DIR
echo "SLURM and SGE tests have finished."
docker ps
echo 'Waiting for Galaxy to come up.'
galaxy_wait_timeout=$GALAXY_WAIT_TIMEOUT
galaxy_wait_interval=30
galaxy_wait_end=$((SECONDS + galaxy_wait_timeout))
while [ $SECONDS -lt $galaxy_wait_end ]; do
if galaxy_wait 30; then
break
fi
echo "Galaxy still starting, tailing logs..."
docker logs --tail 200 galaxy || true
sleep $galaxy_wait_interval
done
if [ $SECONDS -ge $galaxy_wait_end ]; then
echo "Galaxy did not become ready within ${galaxy_wait_timeout}s."
docker logs --tail 400 galaxy || true
exit 1
fi
curl -v --fail $BIOBLEND_GALAXY_URL/api/version
# Test self-signed HTTPS
docker_run -d --name httpstest -p 443:443 -e "USE_HTTPS=True" $DOCKER_RUN_CONTAINER
sleep 30
docker logs httpstest
sleep 180s && curl -v -k --fail https://127.0.0.1:443/api/version
echo | openssl s_client -connect 127.0.0.1:443 2>/dev/null | openssl x509 -issuer -noout| grep localhost
docker rm -f httpstest || true
# Test FTP Server upload
date > time.txt
# FIXME passive mode does not work, it would require the container to run with --net=host
#curl -v --fail -T time.txt ftp://localhost:8021 --user $GALAXY_USER:$GALAXY_USER_PASSWD || true
# Test FTP Server get
#curl -v --fail ftp://localhost:8021 --user $GALAXY_USER:$GALAXY_USER_PASSWD
# Test SFTP Server
if [[ "$SKIP_SFTP" != "true" ]]; then
sshpass -p $GALAXY_USER_PASSWD sftp -v -P 8022 -o User=$GALAXY_USER -o "StrictHostKeyChecking no" localhost <<< $'put time.txt'
fi
# Test FTP Server from within the container (avoids host NAT/passive issues)
docker_exec python - <<'PY'
import ftplib
ftp = ftplib.FTP()
ftp.connect("localhost", 21, timeout=30)
ftp.login("admin@example.org", "password")
ftp.retrlines("LIST")
ftp.quit()
PY
# Test CVMFS
docker_exec bash -c "service autofs start"
docker_exec bash -c "cvmfs_config chksetup"
docker_exec bash -c "ls /cvmfs/data.galaxyproject.org/byhand"
# Run a ton of BioBlend test against our servers.
cd "$WORKING_DIR/test/bioblend/" && . ./test.sh && cd "$WORKING_DIR/"
# Test without install-repository wrapper
curl -v --fail POST -H "Content-Type: application/json" -H "x-api-key: fakekey" -d \
'{
"tool_shed_url": "https://toolshed.g2.bx.psu.edu",
"name": "cut_columns",
"owner": "devteam",
"changeset_revision": "cec635fab700",
"new_tool_panel_section_label": "BEDTools"
}' \
"http://localhost:8080/api/tool_shed_repositories"
# Test the 'new' tool installation script
docker_exec install-tools "$SAMPLE_TOOLS"
# Test the Conda installation
docker_exec_run bash -c 'export PATH=$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR/_conda/bin/:$PATH && conda --version && conda install samtools -c bioconda --yes'
# Test if data persistence works
docker stop galaxy
docker rm -f galaxy
cd "$WORKING_DIR"
docker run -d -p 8080:80 \
--name galaxy \
--privileged=true \
-v "$(pwd)/local_folder:/export/" \
"${GALAXY_EXTRA_MOUNTS[@]}" \
-e GALAXY_CONFIG_ALLOW_USER_DATASET_PURGE=True \
-e GALAXY_CONFIG_ALLOW_PATH_PASTE=True \
-e GALAXY_CONFIG_ALLOW_USER_DELETION=True \
-e GALAXY_CONFIG_ENABLE_BETA_WORKFLOW_MODULES=True \
-v /tmp/:/tmp/ \
quay.io/bgruening/galaxy
echo 'Waiting for Galaxy to come up.'
galaxy_wait "$GALAXY_WAIT_TIMEOUT"
# Test if the tool installed previously is available
curl -v --fail 'http://localhost:8080/api/tools/toolshed.g2.bx.psu.edu/repos/devteam/cut_columns/Cut1/1.0.2'
# analyze image using dive tool
if [[ "$SKIP_DIVE" == "true" ]]; then
echo "Skipping dive image analysis (dive not installed)."
else
CI=true dive quay.io/bgruening/galaxy
fi
docker stop galaxy
docker rm -f galaxy
docker rmi -f $DOCKER_RUN_CONTAINER || true
================================================
FILE: .github/workflows/single_container.yml
================================================
name: Single Container Test
on: [push, pull_request]
jobs:
build_and_test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.10']
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Configure Docker data-root
run: |
sudo mkdir -p /mnt/docker
if [ ! -f /etc/docker/daemon.json ]; then
echo '{}' | sudo tee /etc/docker/daemon.json
fi
sudo jq '."data-root"="/mnt/docker"' /etc/docker/daemon.json > /tmp/docker_daemon.json
sudo mv /tmp/docker_daemon.json /etc/docker/daemon.json
sudo systemctl daemon-reload
sudo systemctl restart docker
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
- name: Build and Test
run: bash .github/workflows/single.sh
================================================
FILE: .github/workflows/update-site.yml
================================================
name: Deploy Documentation
on:
push:
branches:
- main
paths:
- 'README.md'
jobs:
deploy_docs:
runs-on: ubuntu-latest
steps:
- name: Check out the repository
uses: actions/checkout@v6
with:
persist-credentials: false
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.12"
cache: "pip"
- name: Install python dependencies
run: pip install -r docs/src/requirements.txt
- name: Generate documentation
run: python docs/src/generate_docs.py
- name: Deploy to GitHub Pages
uses: peaceiris/actions-gh-pages@v4
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./docs
================================================
FILE: .gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
# C extensions
*.so
# Distribution / packaging
.Python
env/
bin/
build/
develop-eggs/
dist/
eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.cache
nosetests.xml
coverage.xml
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# Rope
.ropeproject
# Django stuff:
*.log
*.pot
# Sphinx documentation
docs/_build/
# Export folder for docker-compose setup
compose/export
compose-v2/export
.DS_Store
================================================
FILE: .travis.yml
================================================
sudo: required
language: python
python: 3.10
services:
- docker
env:
matrix:
- TOX_ENV=py310
global:
- secure: "SEjcKJQ0NGXdpFxFhLVlyJmiBvgiLtR5Uufg90Vm3owKlMy0NSfIrOR+2dwNniqOp7QI3eVepnqjid/Ka0QStzVqMCe55OLkJ/TbTHnMLpbtY63mpGfogVRvxMMAVpzLpcQqtJFORZmO/MIWSLlBiXMMzOg3+tbXvQXmL17Rbmw="
matrix:
allow_failures:
- env: KUBE=True
git:
submodules: false
before_install:
- set -e
- export GALAXY_HOME=/home/galaxy
- export GALAXY_USER=admin@example.org
- export GALAXY_USER_EMAIL=admin@example.org
- export GALAXY_USER_PASSWD=password
- export BIOBLEND_GALAXY_API_KEY=fakekey
- export BIOBLEND_GALAXY_URL=http://localhost:8080
- sudo apt-get update -qq
- sudo apt-get install docker-ce --no-install-recommends -y -o Dpkg::Options::="--force-confmiss" -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confnew"
- sudo apt-get install sshpass --no-install-recommends -y
- pip install ephemeris
- docker --version
- docker info
# start building this repo
- sudo chown 1450 /tmp && sudo chmod a=rwx /tmp
- export WORKING_DIR="$TRAVIS_BUILD_DIR"
- export DOCKER_RUN_CONTAINER="quay.io/bgruening/galaxy"
- export INSTALL_REPO_ARG=""
- export SAMPLE_TOOLS=$GALAXY_HOME/ephemeris/sample_tool_list.yaml
- travis_wait 30 cd "$WORKING_DIR" && docker build -t quay.io/bgruening/galaxy galaxy/
- |
## define a container size check function, first parameter is the container name, second the max allowed size in MB
container_size_check () {
# check that the image size is not growing too much between releases
# the 19.05 monolithic image was around 1.500 MB
size=`docker image inspect $1 --format='{{.Size}}'`
size_in_mb=$(($size/(1024*1024)))
if [[ $size_in_mb -ge $2 ]]
then
echo "The new compiled image ($1) is larger than allowed. $size_in_mb vs. $2"
sleep 2
#exit
fi
}
container_size_check quay.io/bgruening/galaxy 1500
mkdir local_folder
docker run -d -p 8080:80 -p 8021:21 -p 8022:22 \
--name galaxy \
--privileged=true \
-v `pwd`/local_folder:/export/ \
-e GALAXY_CONFIG_ALLOW_USER_DATASET_PURGE=True \
-e GALAXY_CONFIG_ALLOW_PATH_PASTE=True \
-e GALAXY_CONFIG_ALLOW_USER_DELETION=True \
-e GALAXY_CONFIG_ENABLE_BETA_WORKFLOW_MODULES=True \
-v /tmp/:/tmp/ \
quay.io/bgruening/galaxy
sleep 30
docker logs galaxy
# Define start functions
docker_exec() {
cd $WORKING_DIR
docker exec -t -i galaxy "$@"
}
docker_exec_run() {
cd $WORKING_DIR
docker run quay.io/bgruening/galaxy "$@"
}
docker_run() {
cd $WORKING_DIR
docker run "$@"
}
- docker ps
script:
- set -e
# Test submitting jobs to an external slurm cluster
- cd $TRAVIS_BUILD_DIR/test/slurm/ && bash test.sh && cd $WORKING_DIR
# Test submitting jobs to an external gridengine cluster
# TODO 19.05, need to enable this again!
# - cd $TRAVIS_BUILD_DIR/test/gridengine/ && bash test.sh && cd $WORKING_DIR
- echo 'Waiting for Galaxy to come up.'
- galaxy-wait -g $BIOBLEND_GALAXY_URL --timeout 600
- curl -v --fail $BIOBLEND_GALAXY_URL/api/version
# Test self-signed HTTPS
- docker_run -d --name httpstest -p 443:443 -e "USE_HTTPS=True" $DOCKER_RUN_CONTAINER
- sleep 180s && curl -v -k --fail https://127.0.0.1:443/api/version
- echo | openssl s_client -connect 127.0.0.1:443 2>/dev/null | openssl x509 -issuer -noout| grep localhost
- docker logs httpstest && docker stop httpstest && docker rm httpstest
# Test FTP Server upload
- date > time.txt && curl -v --fail -T time.txt ftp://localhost:8021 --user $GALAXY_USER:$GALAXY_USER_PASSWD || true
# Test FTP Server get
- curl -v --fail ftp://localhost:8021 --user $GALAXY_USER:$GALAXY_USER_PASSWD
# Test CVMFS
- docker_exec bash -c "service autofs start"
- docker_exec bash -c "cvmfs_config chksetup"
- docker_exec bash -c "ls /cvmfs/data.galaxyproject.org/byhand"
# Test SFTP Server
- sshpass -p $GALAXY_USER_PASSWD sftp -v -P 8022 -o User=$GALAXY_USER -o "StrictHostKeyChecking no" localhost <<< $'put time.txt'
# Run a ton of BioBlend test against our servers.
- cd $TRAVIS_BUILD_DIR/test/bioblend/ && . ./test.sh && cd $WORKING_DIR/
# not working anymore in 18.01
# executing: /galaxy_venv/bin/uwsgi --yaml /etc/galaxy/galaxy.yml --master --daemonize2 galaxy.log --pidfile2 galaxy.pid --log-file=galaxy_install.log --pid-file=galaxy_install.pid
# [uWSGI] getting YAML configuration from /etc/galaxy/galaxy.yml
# /galaxy_venv/bin/python: unrecognized option '--log-file=galaxy_install.log'
# getopt_long() error
# cat: galaxy_install.pid: No such file or directory
# tail: cannot open ‘galaxy_install.log’ for reading: No such file or directory
#- |
# if [ "${COMPOSE_SLURM}" ] || [ "${KUBE}" ] || [ "${COMPOSE_CONDOR_DOCKER}" ] || [ "${COMPOSE_SLURM_SINGULARITY}" ]
# then
# # Test without install-repository wrapper
# sleep 10
# docker_exec_run bash -c 'cd $GALAXY_ROOT_DIR && python ./scripts/api/install_tool_shed_repositories.py --api admin -l http://localhost:80 --url https://toolshed.g2.bx.psu.edu -o devteam --name cut_columns --panel-section-name BEDTools'
# fi
# Test the 'new' tool installation script
- docker_exec install-tools "$SAMPLE_TOOLS"
# Test the Conda installation
- docker_exec_run bash -c 'export PATH=$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR/_conda/bin/:$PATH && conda --version && conda install samtools -c bioconda --yes'
after_success:
- |
if [ "$TRAVIS_PULL_REQUEST" == "false" -a "$TRAVIS_BRANCH" == "master" ]
then
cd ${TRAVIS_BUILD_DIR}
echo "Generate and deploy html documentation"
./docs/bin/deploy_docs
fi
notifications:
webhooks:
urls:
- https://webhooks.gitter.im/e/559f5480ac7a4ef238af
on_success: change
on_failure: always
on_start: never
================================================
FILE: Changelog.md
================================================
# Changelog
## 0.1: Initial release!
- with Apache2, PostgreSQL and Tool Shed integration
## 0.2: complete new Galaxy stack.
- with nginx, uwsgi, proftpd, docker, supervisord and SLURM
## 0.3: Add Interactive Environments
- IPython in docker in Galaxy in docker
- advanged logging
## 0.4:
- base the image on toolshed/requirements with all required Galaxy dependencies
- use Ansible roles to build large parts of the image
- export the supervisord web interface on port 9002
- enable Galaxy reports webapp
## 15.07:
- `install-biojs` can install BioJS visualisations into Galaxy
- `add-tool-shed` can be used to activate third party Tool Sheds in child Dockerfiles
- many documentation improvements
- RStudio is now part of Galaxy and this Image
- configurable postgres UID/GID by @chambm
- smarter starting of postgres during Tool installations by @shiltemann
## 15.10:
- new Galaxy 15.10 release
- fix https://github.com/bgruening/docker-galaxy-stable/issues/94
## 16.01:
- enable Travis testing for all builds and PR
- offer new [yaml based tool installations](https://github.com/galaxyproject/ansible-galaxy-tools/blob/master/files/tool_list.yaml.sample)
- enable dynamic UWSGI processes and threads with `-e UWSGI_PROCESSES=2` and `-e UWSGI_THREADS=4`
- enable dynamic Galaxy handlers `-e GALAXY_HANDLER_NUMPROCS=2`
- Addition of a new `lite` mode contributed by @kellrott
- first release with Jupyter integration
## 16.04:
- include a Galaxy-bare mode, enable with `-e BARE=True`
- first release with [HTCondor](https://research.cs.wisc.edu/htcondor/) installed and pre-configured
## 16.07:
- documentation and tests updates for SLURM integration by @mvdbeek
- first version with initial Docker compose support (proftpd ✔️)
- SFTP support by @zfrenchee
## 16.10:
- [HTTPS support](https://github.com/bgruening/docker-galaxy-stable/pull/240 ) by @zfrenchee and @mvdbeek
## 17.01:
- enable Conda dependency resolution by default
- [new Galaxy version](https://docs.galaxyproject.org/en/master/releases/17.01_announce.html)
- more compose work (slurm, postgresql)
## 17.05:
- add PROXY_PREFIX variable to enable automatic configuration of Galaxy running under some prefix (@abretaud)
- enable quota by default (just the funtionality, not any specific value)
- HT-Condor is now supported in compose with semi-autoscaling and BioContainers
- Galaxy Docker Compose is completely under Travis testing and available with SLURM and HT-Condor
- using Docker `build-arg`s for GALAXY_RELEASE and GALAXY_REPO
## 17.09:
- much improved documentation about using Galaxy Docker and an external cluster (@rhpvorderman)
- CVMFS support - mounting in 4TB of pre-build reference data (@chambm)
- Singularity support and tests (compose only)
- more work on K8s support and testing (@jmchilton)
- using .env files to configure the compose setup for SLURM, Condor, K8s, SLURM-Singularity, Condor-Docker
## 18.01:
- tracking the Galaxy release_18.01 branch
- uwsgi work to adopt to changes for 18.01
- remove nodejs-legacy & npm from Dockerfile and install latest version from ansible-extras
- initial galaxy.ini → galaxy.yml integration
- grafana and influxdb container (compose)
- Galaxy telegraf integration to push to influxdb (compose)
- added some documentation (compose)
## 18.05:
- Nothing very special, but a awesome Galaxy release as usual
## 18.09:
- new and more powerful orchestration build script (build-orchestration-images.sh) by @pcm32
- a lot of bug-fixes to the compose setup by @abretaud
## 19.01:
- This is featuring the latest and greatest from the Galaxy community
- Please note that this release will be the last release which is based on `ubuntu:14.04` and PostgreSQL 9.3.
We will migrate to `ubuntu:18.04` and a newer PostgreSQL version in `19.05`. Furthermore, we will not
support old Galaxy tool dependencies.
## 19.05:
- The image is now based on `ubuntu:18.04` (instead of ubuntu:14.04 previously) and PostgreSQL 11.5 (9.3 previously).
See [migration documention](#Postgresql-migration) to migrate the postgresql database from 9.3 to 11.5.
- We not longer support old Galaxy tool dependencies.
## 20.05:
- Featuring Galaxy 20.05
- Completely reworked compose setup
- The default admin password and apikey (`GALAXY_DEFAULT_ADMIN_PASSWORD` and `GALAXY_DEFAULT_ADMIN_KEY`) have changed: the password is now `password` (instead of `admin`) and the apikey `fakekey` (instead of `admin`).
## 20.09:
- Featuring Galaxy 20.09
## 24.1:
- Deprecating the `compose` setup.
- Complete new setup, adjusting to the latest Galaxy stack.
- Base Ubuntu Image: Upgraded from version 18.04 to 22.04
- Galaxy: Upgraded from version 20.09 to 24.1
- PostgreSQL: Upgraded from version 11 to 15
- Python3: Upgraded from version 3.7 to 3.10 (Python 3.10 is set as the default interpreter)
- The dockerfile now uses a multi-stage build to reduce the final image size and include only necessary files.
- New Service Support:
- Gunicorn: Replaces uWSGI as the web server for Galaxy. Installed by default inside Galaxy's virtual environment. Configured Nginx to proxy Gunicorn enabled on port 4001.
- Celery: Installed by default inside Galaxy's virtual environment. Enabled Celery for distributed task queues and Celery Beat for periodic task running. RabbitMQ serves as the broker for Celery (if RabbitMQ is disabled, it defaults to PostgreSQL database connection).
- Redis is used as the backend for Celery (if Redis is disabled, it defaults to a SQLite database). Flower service is added for monitoring and debugging Celery.
- RabbitMQ Management: Enabled the RabbitMQ management plugin on port 15672 for managing and monitoring the RabbitMQ server. The dashboard is exposed via Nginx and is accessible at the /rabbitmq/ path. The default access credentials are admin:admin.
- Flower: Added Flower service on port 5555 for monitoring and debugging Celery. The dashboard is exposed via Nginx and is available at the /flower/ path. The default access credentials are admin:admin.
- TUSd: Added TUSd server on port 1080 to support fault-tolerant uploads; Nginx is configured to proxy TUSd.
- gx-it-proxy: Added gx-it-proxy service on port 4002 to support Interactive Tools.
- Ansible Playbooks:
- Migrated from galaxyextras git submodule to using mainatined ansible roles.
- Added configure_rabbitmq_users.yml Ansible playbook, which removes the default guest user and adds admin, galaxy, and flower users for RabbitMQ during container startup.
- Environment Variables:
- Added `GUNICORN_WORKERS` and `CELERY_WORKERS` magic environment variables to set the number of Gunicorn and Celery workers, respectively, during container startup.
- Configuration Changes:
- Replaced the Galaxy Reports sample configuration file.
- Removed galaxy_web, handlers, reports, and ie_proxy services from Supervisor.
- Added Gravity for managing Galaxy services such as Gunicorn, Celery, gx-it-proxy, TUSd, reports, and handlers. It uses Supervisor as the process manager, with the configuration file located at /etc/galaxy/gravity.yml.
- Added support for dynamic handlers (set as the default handler type).
- Redis and Flower services are now managed by Supervisor.
- Since Galaxy Interactive Environments are deprecated, they have been replaced by Interactive Tools (ITs). The sample configuration file tools_conf_interactive.xml.sample is placed inside GALAXY_CONFIG_DIR. Nginx is also configured to support both domain and path-based ITs.
- Switched to using the cvmfs-config.galaxyproject.org repository for automatic configuration and updates of Galaxy project CVMFS repositories. Updated tool data table config path to include CVMFS locations from data.galaxyproject.org in --privileged mode.
- Enabled IPv6 support in Nginx for ports 80 and 443.
- Added Subject Alternative Name (SAN) extension (DNS:localhost and IP:127.0.0.1) while generating a self-signed SSL certificate.
- Ensured the Nginx SSL certificate is trusted system-wide by adding it to the CA store.
- Updated Galaxy extra dependencies.
- Added docker_net, docker_auto_rm, and docker_set_user parameters for Docker-enabled job destinations.
- Added update_yaml_value.py script to update nested key values in a YAML file.
- Replaced ie_proxy with gx-it-proxy.
- Replaced nginx_upload_module with TUSd for delegated uploads.
- CI Tests
- Added dive tool for analyzing the docker image
- Added test for check data persistence
================================================
FILE: LICENSE
================================================
The MIT License (MIT)
Copyright (c) 2014 Björn Grüning
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: README.md
================================================
[](https://zenodo.org/badge/latestdoi/5466/bgruening/docker-galaxy-stable)
[](https://travis-ci.org/bgruening/docker-galaxy-stable)
[](https://quay.io/repository/bgruening/galaxy)
[](https://gitter.im/bgruening/docker-galaxy-stable?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
 
[](https://microbadger.com/images/bgruening/galaxy-stable "Get your own image badge on microbadger.com")
Galaxy Docker Image
===================
The [Galaxy](http://www.galaxyproject.org) [Docker](http://www.docker.io) Image is an easy distributable full-fledged Galaxy installation, that can be used for testing, teaching and presenting new tools and features.
One of the main goals is to make access to entire tool suites as easy as possible. Usually,
this includes the setup of a public available web-service that needs to be maintained, or that the Tool-user needs to either setup a Galaxy Server by its own or to have Admin access to a local Galaxy server.
With docker, tool developers can create their own Image with all dependencies and the user only needs to run it within docker.
The Image is based on [Ubuntu 24.04 LTS](http://releases.ubuntu.com/24.04/) and all recommended Galaxy requirements are installed. The following chart should illustrate the [Docker](http://www.docker.io) image hierarchy we have build to make is as easy as possible to build on different layers of our stack and create many exciting Galaxy flavors.

Breaking changes
================
:information_source: After a long pause, due to interesting times at the beginning of the "golden 2020s", we are finally back with release `24.1`. Many things have changed in Galaxy.
It is deployed completely differently and gained many new features with many new dependencies. We recommend starting with a fresh `/export` folder and contacting us if you encounter any problems.
# Table of Contents
- [Usage](#Usage)
- [Upgrading images](#Upgrading-images)
- [PostgreSQL migration](#Postgresql-migration)
- [Enabling Interactive Tools in Galaxy](#Enabling-Interactive-Tools-in-Galaxy)
- [Using passive mode FTP or SFTP](#Using-passive-mode-FTP-or-SFTP)
- [Using Parent docker](#Using-Parent-docker)
- [RabbitMQ Management](#RabbitMQ-Management)
- [Flower Webapp](#Flower-Webapp)
- [Galaxy's config settings](#Galaxys-config-settings)
- [Configuring Galaxy's behind a proxy](#Galaxy-behind-proxy)
- [On-demand reference data with CVMFS](#cvmfs)
- [Personalize your Galaxy](#Personalize-your-Galaxy)
- [Deactivating services](#Deactivating-services)
- [Restarting Galaxy](#Restarting-Galaxy)
- [Advanced Logging](#Advanced-Logging)
- [Running on an external cluster (DRM)](#Running-on-an-external-cluster-(DRM))
- [Basic setup for the filesystem](#Basic-setup-for-the-filesystem)
- [Using an external Slurm cluster](#Using-an-external-Slurm-cluster)
- [Using an external Grid Engine cluster](#Using-an-external-Grid-Engine-cluster)
- [Tips for Running Jobs Outside the Container](#Tips-for-Running-Jobs-Outside-the-Container)
- [Enable Galaxy to use BioContainers (Docker)](#auto-exec-tools-in-docker)
- [Magic Environment variables](#Magic-Environment-variables)
- [HTTPS Support](#HTTPS-Support)
- [Lite Mode](#Lite-Mode)
- [Extending the Docker Image](#Extending-the-Docker-Image)
- [List of Galaxy flavours](#List-of-Galaxy-flavours)
- [Integrating non-Tool Shed tools into the container](#Integrating-non-Tool-Shed-tools-into-the-container)
- [Users & Passwords](#Users-Passwords)
- [Development](#Development)
- [Requirements](#Requirements)
- [Changelog](./Changelog.md)
- [Support & Bug Reports](#Support-Bug-Reports)
# Usage [[toc]](#toc)
This chapter explains how to launch the container manually.
At first you need to install docker. Please follow the [very good instructions](https://docs.docker.com/installation/) from the Docker project.
After the successful installation, all you need to do is:
```sh
docker run -d -p 8080:80 -p 8021:21 -p 8022:22 quay.io/bgruening/galaxy
```
I will shortly explain the meaning of all the parameters. For a more detailed description please consult the [docker manual](http://docs.docker.io/), it's really worth reading.
Let's start:
- `docker run` will run the Image/Container for you.
In case you do not have the Container stored locally, docker will download it for you.
- `-p 8080:80` will make the port 80 (inside of the container) available on port 8080 on your host. Same holds for port 8021 and 8022, that can be used to transfer data via the FTP or SFTP protocol, respectively.
Inside the container a nginx Webserver is running on port 80 and that port can be bound to a local port on your host computer. With this parameter you can access your Galaxy
instance via `http://localhost:8080` immediately after executing the command above. If you work with the [Docker Toolbox](https://www.docker.com/products/docker-toolbox) on Mac or Windows,
you need to connect to the machine generated by 'Docker Quickstart'. You get its IP address from `docker-machine ls` or from the first line in the terminal, e.g.: `docker is configured to use the default machine with IP 192.168.99.100`.
- `quay.io/bgruening/galaxy` is the Image/Container name, that directs docker to the correct path in the [docker index](https://quay.io/repository/bgruening/galaxy?tab=tags).
- `-d` will start the docker container in daemon mode.
For an interactive session, you can execute:
```sh
docker run -i -t -p 8080:80 \
quay.io/bgruening/galaxy \
/bin/bash
```
and run the `startup` script by yourself, to start PostgreSQL, nginx and Galaxy.
Docker images are "read-only", all your changes inside one session will be lost after restart. This mode is useful to present Galaxy to your colleagues or to run workshops with it. To install Tool Shed repositories or to save your data you need to export the calculated data to the host computer.
Fortunately, this is as easy as:
```sh
docker run -d -p 8080:80 \
-v /home/user/galaxy_storage/:/export/ \
quay.io/bgruening/galaxy
```
With the additional `-v /home/user/galaxy_storage/:/export/` parameter, Docker will mount the local folder `/home/user/galaxy_storage` into the Container under `/export/`. A `startup.sh` script, that is usually starting nginx, PostgreSQL and Galaxy, will recognize the export directory with one of the following outcomes:
- In case of an empty `/export/` directory, it will move the [PostgreSQL](http://www.postgresql.org/) database, the Galaxy database directory, Shed Tools and Tool Dependencies and various config scripts to /export/ and symlink back to the original location.
- In case of a non-empty `/export/`, for example if you continue a previous session within the same folder, nothing will be moved, but the symlinks will be created.
This enables you to have different export folders for different sessions - means real separation of your different projects.
To detect when the Galaxy distribution in the image changes, the container writes a marker at
`/export/.galaxy_export_marker`. You can override the marker value with `GALAXY_EXPORT_MARKER` if you
need deterministic export refresh behavior.
You can also collect and store `/export/` data of Galaxy instances in a dedicated docker [Data volume Container](https://docs.docker.com/engine/userguide/dockervolumes/) created by:
```sh
docker create -v /export \
--name galaxy-store \
quay.io/bgruening/galaxy \
/bin/true
```
To mount this data volume in a Galaxy container, use the `--volumes-from` parameter:
```sh
docker run -d -p 8080:80 \
--volumes-from galaxy-store \
quay.io/bgruening/galaxy
```
This also allows for data separation, but keeps everything encapsulated within the docker engine (e.g. on OS X within your `$HOME/.docker` folder - easy to backup, archive and restore. This approach, albeit at the expense of disk space, avoids the problems with permissions [reported](https://github.com/bgruening/docker-galaxy-stable/issues/68) for data export on non-Linux hosts.
## Upgrading images [[toc]](#toc)
We will release a new version of this image concurrent with every new Galaxy release. For upgrading an image to a new version we have assembled a few hints for you. Please, take in account that upgrading may vary depending on your Galaxy installation, and the changes in new versions. Use this example carefully!
* Create a test instance with only the database and configuration files. This will allow testing to ensure that things run but won't require copying all of the data.
* New unmodified configuration files are always stored in a hidden directory called `.distribution_config`. Use this folder to diff your configurations with the new configuration files shipped with Galaxy. This prevents needing to go through the change log files to find out which new files were added or which new features you can activate.
Here are 2 suggested upgrade methods, a quick one, and a safer one.
### The quick upgrade method
This method involves less data copying, which makes the process quicker, but makes it impossible to downgrade in case of problems.
If you are upgrading from <19.05 to >=19.05, you need to migrate the PostgreSQL database, have a look at [PostgreSQL migration](#Postgresql-migration).
1. Stop the old Galaxy container
```sh
docker stop
docker pull quay.io/bgruening/galaxy
```
2. Run the container with the updated image
```sh
docker run -p 8080:80 -v /data/galaxy-data:/export --name quay.io/bgruening/galaxy
```
3. Use diff to find changes in the config files (only if you changed any config file).
```sh
cd /data/galaxy-data/.distribution_config
for f in *; do echo $f; diff $f ../galaxy/config/$f; read; done
```
4. Upgrade the database schema
```sh
docker exec -it bash
galaxyctl stop
sh manage_db.sh upgrade
exit
```
5. Restart Galaxy
```sh
docker exec -it galaxyctl start
```
(Alternatively, restart the whole container)
### The safe upgrade method
With this method, you keep a backup in case you decide to downgrade, but requires some potentially long data copying.
* Note that copying database and datasets can be expensive if you have many GB of data.
* If you are upgrading from <19.05 to >=19.05, you need to migrate the PostgreSQL database, have a look at [PostgreSQL migration](#Postgresql-migration).
1. Download newer version of the Galaxy image
```
$ sudo docker pull quay.io/bgruening/galaxy
```
2. Stop and rename the current galaxy container
```
$ sudo docker stop galaxy-instance
$ sudo docker rename galaxy-instance galaxy-instance-old
```
3. Rename the data directory (the one that is mounted to /export in the docker)
```
$ sudo mv /data/galaxy-data /data/galaxy-data-old
```
4. Run a new Galaxy container using newer image and wait while Galaxy generates the default content for /export
```
$ sudo docker run -p 8080:80 -v /data/galaxy-data:/export --name galaxy-instance quay.io/bgruening/galaxy
```
5. Stop the Galaxy container
```
$ sudo docker stop galaxy-instance
```
6. Replace the content of the postgres database by the old db data
```
$ sudo rm -r /data/galaxy-data/postgresql/
$ sudo rsync -var /data/galaxy-data-old/postgresql/ /data/galaxy-data/postgresql/
```
7. Use diff to find changes in the config files (only if you changed any config file).
```
$ cd /data/galaxy-data/.distribution_config
$ for f in *; do echo $f; diff $f ../../galaxy-data-old/galaxy/config/$f; read; done
```
8. Copy all the users' datasets to the new instance
```
$ sudo rsync -var /data/galaxy-data-old/galaxy/database/files/* /data/galaxy-data/galaxy/database/files/
```
9. Copy all the installed tools
```
$ sudo rsync -var /data/galaxy-data-old/tool_deps/* /data/galaxy-data/tool_deps/
$ sudo rsync -var /data/galaxy-data-old/galaxy/database/shed_tools/* /data/galaxy-data/galaxy/database/shed_tools/
$ sudo rsync -var /data/galaxy-data-old/galaxy/database/config/* /data/galaxy-data/galaxy/database/config/
```
10. Copy the welcome page and all its files.
```
$ sudo rsync -var /data/galaxy-data-old/welcome* /data/galaxy-data/
```
11. Create an auxiliary docker in interactive mode and upgrade the database.
```
$ sudo docker run -it --rm -v /data/galaxy-data:/export quay.io/bgruening/galaxy /bin/bash
# Startup all processes
> startup &
#Upgrade the database to the most recent version
> sh manage_db.sh upgrade
#Logout
> exit
```
12. Start the docker and test
```
$ sudo docker start galaxy-instance
```
13. Clean the old container and image
### Postgresql migration [[toc]](#toc)
In the 19.05 version, Postgresql was updated from version 9.3 to version 11.5. If you are upgrading from a version <19.05, you will need to migrate the database.
You can do it the following way (based on the "The quick upgrade method" above):
1. Stop Galaxy in the old container
```sh
docker exec -it galaxyctl stop
```
2. Dump the old database
```sh
docker exec -it bash
su postgres
pg_dumpall --clean > /export/postgresql/9.3dump.sql
exit
exit
```
3. Update the container (= step 1 of the "The quick upgrade method" above)
```sh
docker stop
docker pull quay.io/bgruening/galaxy
```
4. Run the container with the updated image (= step 2 of the "The quick upgrade method" above)
```sh
docker run -p 8080:80 -v /data/galaxy-data:/export --name quay.io/bgruening/galaxy
```
5. Restore the dump to the new postgres version
Wait for the startup process to finish (Galaxy should be accessible)
```sh
docker exec -it bash
galaxyctl stop
su postgres
psql -f /export/postgresql/9.3dump.sql postgres
exit
exit
```
6. Use diff to find changes in the config files (only if you changed any config file). (= step 3 of the "The quick upgrade method" above)
```sh
cd /data/galaxy-data/.distribution_config
for f in *; do echo $f; diff $f ../galaxy/config/$f; read; done
```
7. Upgrade the database schema (= step 4 of the "The quick upgrade method" above)
```sh
docker exec -it bash
galaxyctl stop
sh manage_db.sh upgrade
exit
```
5. Restart Galaxy (= step 5 of the "The quick upgrade method" above)
```sh
docker exec -it galaxyctl start
```
(Alternatively, restart the whole container)
6. Clean old files
If you are *very* sure that everything went well, you can delete `/export/postgresql/9.3dump.sql` and `/export/postgresql/9.3/` to save some space.
## Enabling Interactive Tools in Galaxy [[toc]](#toc)
Interactive Tools (IT) are sophisticated ways to extend Galaxy with powerful services, like [Jupyter](http://jupyter.org/), in a secure and reproducible way.
For this we need to be able to launch Docker containers inside our Galaxy Docker container.
```sh
docker run -d -p 8080:80 -p 8021:21 -p 4002:4002 \
--privileged=true \
-v /home/user/galaxy_storage/:/export/ \
quay.io/bgruening/galaxy
```
The port 4002 is the proxy port that is used to handle Interactive Tools. `--privileged` is needed to start docker containers inside docker.
Additionally, you can set the `GALAXY_DOMAIN` environment variable to specify the domain name for your Galaxy instance to ensure that domain-based ITs work correctly. By default, it is set to `localhost`. If you have your own domain, you can specify it instead.
If you're using the default job configuration, set the `GALAXY_DESTINATIONS_DEFAULT` environment variable to a Docker-enabled destination. By default, this is set to `slurm_cluster`, so you'll need to update it accordingly. Alternatively, you can also provide your own job configuration file.
```sh
docker run -d -p 8080:80 -p 8021:21 -p 4002:4002 \
--privileged=true \
-v /home/user/galaxy_storage/:/export/ \
-e "GALAXY_DOMAIN=your.domain.com" \
-e "GALAXY_DESTINATIONS_DEFAULT=slurm_cluster_docker" \
quay.io/bgruening/galaxy
```
## Using passive mode FTP or SFTP [[toc]](#toc)
By default, FTP servers running inside of docker containers are not accessible via passive mode FTP, due to not being able to expose extra ports. To circumvent this, you can use the `--net=host` option to allow Docker to directly open ports on the host server:
```sh
docker run -d \
--net=host \
-v /home/user/galaxy_storage/:/export/ \
quay.io/bgruening/galaxy
```
Note that there is no need to specifically bind individual ports (e.g., `-p 80:80`) if you use `--net`.
An alternative to FTP and it's shortcomings it to use the SFTP protocol via port 22. Start your Galaxy container with a port binding to 22.
```sh
docker run -i -t -p 8080:80 -p 8022:22 \
-v /home/user/galaxy_storage/:/export/ \
quay.io/bgruening/galaxy
```
And use for example [Filezilla](https://filezilla-project.org/) or the `sftp` program to transfer data:
```sh
sftp -v -P 8022 -o User=admin@example.org localhost <<< $'put '
```
## Using Parent docker [[toc]](#toc)
On some linux distributions, Docker-In-Docker can run into issues (such as running out of loopback interfaces). If this is an issue, you can use a 'legacy' mode that use a docker socket for the parent docker installation mounted inside the container. To engage, set the environmental variable `DOCKER_PARENT`
```sh
docker run -p 8080:80 -p 8021:21 \
--privileged=true -e DOCKER_PARENT=True \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /home/user/galaxy_storage/:/export/ \
quay.io/bgruening/galaxy
```
## RabbitMQ Management [[toc]](#toc)
RabbitMQ is used as the broker for services like Celery. RabbitMQ provides a dedicated web interface for managing message queues, accessible at `http://localhost:8080/rabbitmq/`. This interface allows you to monitor queues, exchanges, bindings, and more. By default, it is password protected with `admin:admin`, but the credentials can be changed after logging in.
To completely disable RabbitMQ, you can set the `NONUSE` environment variable during container startup.
```sh
docker run -p 8080:80 \
-e "NONUSE=rabbitmq" \
quay.io/bgruening/galaxy
```
## Flower Webapp [[toc]](#toc)
Flower is a web-based tool for monitoring and administering Celery. It is accessible at `http://localhost:8080/flower`. By default, this site is password protected with `admin:admin`. You can change this by providing a `common_htpasswd` file in `/home/user/galaxy_storage/`.
The Flower Webapp will only be available if both Celery and RabbitMQ are enabled, meaning the environment variable `NONUSE` does not include `celery` and `rabbitmq`. To completely disable the Flower Webapp, you can set the `NONUSE` environment variable during container startup.
```sh
docker run -p 8080:80 \
-e "NONUSE=flower" \
quay.io/bgruening/galaxy
```
## Galaxy's config settings [[toc]](#toc)
Every Galaxy configuration parameter in `config/galaxy.yml` can be overwritten by passing an environment variable to the `docker run` command during startup. The name of the environment variable has to be:
`GALAXY_CONFIG`+ *the_original_parameter_name_in_capital_letters*
For example, you can set the Galaxy session timeout to 5 minutes and set your own Galaxy brand by invoking the `docker run` like this:
```sh
docker run -p 8080:80 \
-e "GALAXY_CONFIG_BRAND='My own Galaxy flavour'" \
-e "GALAXY_CONFIG_SESSION_DURATION=5" \
quay.io/bgruening/galaxy
```
Note, that if you would like to run any of the [cleanup scripts](https://galaxyproject.org/admin/config/performance/purge-histories-and-datasets/), you will need to add the following to `/export/galaxy/config/galaxy.yml`:
```
database_connection = postgresql://galaxy:galaxy@localhost:5432/galaxy
file_path = /export/galaxy/database/files
```
## Security Configuration
*By default* the `admin_users` and `bootstrap_admin_api_key` variables are set to:
```
admin_users: admin@example.org
bootstrap_admin_api_key: HSNiugRFvgT574F43jZ7N9F3
```
Additionally, Galaxy encodes various internal values that can be part of output using a secret string configurable as `id_secret` in the config file (use 5-65 bytes long string).
This prevents 'guessing' of Galaxy's internal database sequences. Example:
```
id_secret: d5c910cc6e32cad08599987ab64dcfae
```
You should manually change all three configuration variables above in `/export/galaxy/config/galaxy.yml`.
Alternatively, you can pass the security configuration when running the image but please note that it is a security problem.
E.g. if a tool exposes all `env`'s your secret API key will also be exposed.
In addition with 24.2 we enabled Galaxy Vault configuration. This enables users to store secrets in a user-owned password safe, called vault.
It is highly recommended to change the pre-configured key under `$GALAXY_CONFIG_DIR/vault_conf.yml` following the instructions inside the file.
## Configuring Galaxy's behind a proxy [[toc]](#toc)
If your Galaxy docker instance is running behind an HTTP proxy server, and if you're accessing it with a specific path prefix (e.g. http://www.example.org/some/prefix/), you need to make Galaxy aware of it. There is an environment variable available to do so:
```
PROXY_PREFIX=/some/prefix
```
You can and should overwrite these during launching your container:
```sh
docker run -p 8080:80 \
-e "PROXY_PREFIX=/some/prefix" \
quay.io/bgruening/galaxy
```
## On-demand reference data with CVMFS [[toc]](#toc)
By default, Galaxy instances launched with this image will have on-demand access to approximately 4TB of
reference genomes and indexes. These are the same reference data available on the main Galaxy server.
This is achieved by connecting to Galaxy's CernVM filesystem (CVMFS) at `cvmfs-config.galaxyproject.org` repository, which provides automatic configuration for all galaxyproject.org CVMFS repositories, including `data.galaxyproject.org`, and ensures they remain up to date.
The CVMFS capability doesn't add to the size of the Docker image, but when running, CVMFS maintains
a cache to keep the most recently used data on the local disk.
*Note*: for CVMFS directories to be mounted-on-demand with `autofs`, you must launch Docker as `--privileged`.
If privileged mode is not an option, use the optional CVMFS sidecar in `galaxy/docker-compose.yaml`:
```sh
cd galaxy
CVMFS_MOUNT_DIR=/cvmfs EXPORT_DIR=./export docker compose --profile cvmfs up
```
This starts a dedicated CVMFS container that mounts the repositories and shares `/cvmfs` with the Galaxy
container. The CVMFS cache is persisted in `${EXPORT_DIR}/cvmfs-cache`.
## Personalize your Galaxy [[toc]](#toc)
The Galaxy welcome screen can be changed by providing a `welcome.html` page in `/home/user/galaxy_storage/`. All files starting with `welcome` will be copied during startup and served as introduction page. If you want to include images or other media, name them `welcome_*` and link them relative to your `welcome.html` ([example](`https://github.com/bgruening/docker-galaxy-stable/blob/master/galaxy/welcome.html`)).
## Deactivating services [[toc]](#toc)
Non-essential services can be deactivated during startup. Set the environment variable `NONUSE` to a comma separated list of services. Currently, `postgres`, `cron`, `proftp`, `nodejs`, `condor`, `slurmd`, `slurmctld`, `celery`, `rabbitmq`, `redis`, `flower` and `tusd` are supported.
```sh
docker run -d -p 8080:80 -p 9002:9002 \
-e "NONUSE=cron,proftp,nodejs,condor,slurmd,slurmctld,celery,rabbitmq,redis,flower,tusd" \
quay.io/bgruening/galaxy
```
A graphical user interface for starting/stopping services is available on port `9002` if you map it (e.g. `-p 9002:9002`).
This is the Supervisor web UI and it is unauthenticated by default, so only expose it on trusted networks or adjust the
Supervisor credentials in the image build.
## Restarting Galaxy [[toc]](#toc)
If you want to restart Galaxy without restarting the entire Galaxy container you can use `docker exec` (docker > 1.3).
```sh
docker exec galaxyctl restart
```
To restart only web workers or handlers:
```sh
docker exec galaxyctl restart gunicorn
docker exec galaxyctl restart handler
```
Use `galaxyctl --help` for service names available in your configuration.
In addition, you can start/stop every supervisord process using a web interface on port `9002`. Start your container with:
```sh
docker run -p 9002:9002 quay.io/bgruening/galaxy
```
## Advanced Logging [[toc]](#toc)
You can set the environment variable $GALAXY_LOGGING to FULL to access all logs from supervisor. For example start your container with:
```sh
docker run -d -p 8080:80 -p 8021:21 \
-e "GALAXY_LOGGING=full" \
quay.io/bgruening/galaxy
```
Then, you can access the supervisord web interface on port `9002` and get access to log files. To do so, start your container with:
```sh
docker run -d -p 8080:80 -p 8021:21 -p 9002:9002 \
-e "GALAXY_LOGGING=full" \
quay.io/bgruening/galaxy
```
Alternatively, you can access the container directly using the following command:
```sh
docker exec -it bash
```
Once connected to the container, log files are available in `/home/galaxy/logs`.
A volume can also be used to map this directory to one external to the container - for instance if logs need to be persisted for auditing reasons (security, debugging, performance testing, etc...).:
```sh
mkdir gx_logs
docker run -d -p 8080:80 -p 8021:21 -e "GALAXY_LOGGING=full" -v `pwd`/gx_logs:/home/galaxy/logs quay.io/bgruening/galaxy
```
## Running on an external cluster (DRM) [[toc]](#toc)
### Basic setup for the filesystem [[toc]](#toc)
#### The easy way
The easiest way is to create a `/export` mount point on the cluster and mount the container with `/export:/export`.
#### Not using the /export mount point on the cluster.
The docker container sets up all its files on the /export directory, but this directory may not exist on the cluster filesystem. This can be solved with symbolic links on the cluster filesystem but it can also be solved within the container itself.
In this example configuration the cluster file system has a directory `/cluster_storage/galaxy_data` which is accessible for the galaxy user in the container (UID 1450) and the user starting the container.
The container should be started with the following settings configured:
```bash
docker run -d -p 8080:80 -p 8021:21
-v /cluster_storage/galaxy_data/galaxy_export:/export # This makes sure all galaxy files are on the cluster filesystem
-v /cluster_storage/galaxy_data:/cluster_storage/galaxy_data # This ensures the links within the docker container and on the cluster fs are the same
# The following settings make sure that each job is configured with the paths on the cluster fs instead of /export
-e GALAXY_CONFIG_TOOL_DEPENDENCY_DIR="/cluster_storage/galaxy_data/galaxy_export/tool_deps"
-e GALAXY_CONFIG_TOOL_DEPENDENCY_CACHE_DIR="/cluster_storage/galaxy_data/galaxy_export/tool_deps/_cache"
-e GALAXY_CONFIG_FILE_PATH="/cluster_storage/galaxy_data/galaxy_export/galaxy/database/files"
-e GALAXY_CONFIG_TOOL_PATH="/cluster_storage/galaxy_data/galaxy_export/galaxy/tools"
-e GALAXY_CONFIG_TOOL_DATA_PATH="/cluster_storage/galaxy_data/galaxy_export/galaxy/tool-data"
-e GALAXY_CONFIG_SHED_TOOL_DATA_PATH="/cluster_storage/galaxy_data/galaxy_export/galaxy/tool-data"
# The following settings are for directories that can be anywhere on the cluster fs.
-e GALAXY_CONFIG_JOB_WORKING_DIRECTORY="/cluster_storage/galaxy_data/galaxy_export/galaxy/database/job_working_directory" #IMPORTANT: needs to be created manually. Can also be placed elsewhere, but is originally located here
-e GALAXY_CONFIG_NEW_FILE_PATH="/cluster_storage/galaxy_data/tmp" # IMPORTANT: needs to be created manually. This needs to be writable by UID=1450 and have its flippy bit set (chmod 1777 for world-writable with flippy bit)
-e GALAXY_CONFIG_OUTPUTS_TO_WORKING_DIRECTORY=False # Writes Job scripts, stdout and stderr to job_working_directory.
-e GALAXY_CONFIG_RETRY_JOB_OUTPUT_COLLECTION=5 #IF your cluster fs uses nfs this may introduce latency. You can set galaxy to retry if a job output is not yet created.
# Conda settings. IMPORTANT!
-e GALAXY_CONFIG_CONDA_PREFIX="/cluster_storage/galaxy_data/_conda" # Can be anywhere EXCEPT cluster_storage/galaxy/galaxy_export!
# Conda uses $PWD to determine where the virtual environment is. If placed inside the export directory conda will determine $PWD to be a subirectory of the /export folder which does not exist on the cluster!
-e GALAXY_CONFIG_CONDA_AUTO_INIT=True # When the necessary environment can not be found a new one will automatically be created
```
### Setting up a Python virtual environment on the cluster [[toc]](#toc)
The Python environment in the container is not accessible from the cluster. So it needs to be created beforehand.
In this example configuration the Python virtual environment is created on `/cluster_storage/galaxy_data/galaxy_venv` and the export folder on `/cluster_storage/galaxy_data/galaxy_export`. To create the virtual environment:
1. Create the virtual environment `virtualenv /cluster_storage/galaxy_data/galaxy_venv`
2. Activate the virtual environment `source /cluster_storage/galaxy_data/galaxy_venv/bin/activate`
3. Install the galaxy requirements `pip install --index-url https://wheels.galaxyproject.org/simple --only-binary all -r /cluster_storage/galaxy_data/galaxy/lib/galaxy/dependencies/pinned-requirements.txt`
* Make sure to upgrade the environment with the new requirements when a new version of galaxy is released.
To make the Python environment usable on the cluster, create your custom `job_conf.xml` file and put it in `/cluster_storage/galaxy_data/galaxy_export/galaxy/config`.
In the destination section the following code should be added:
```xml
/cluster_storage/galaxy_data/galaxy_export/galaxy/cluster_storage/galaxy_data/galaxy_export/galaxy/lib/cluster_storage/galaxy_data/galaxy_export/galaxy/lib
True
```
In this way, Python tools on the cluster are able to use the Galaxy libraries.
More information can be found [here](https://github.com/galaxyproject/galaxy/blob/dev/doc/source/admin/framework_dependencies.rst#managing-dependencies-manually)
and
[here](https://github.com/galaxyproject/galaxy/blob/dev/doc/source/admin/framework_dependencies.rst#galaxy-job-handlers).
### Using an external Slurm cluster [[toc]](#toc)
It is often convenient to configure Galaxy to use a high-performance cluster for running jobs. To do so, two files are required:
1. munge.key
2. slurm.conf
These files from the cluster must be copied to the `/export` mount point (i.e., `/cluster_storage/galaxy_data/galaxy_export/` on the host if using below command) accessible to Galaxy before starting the container. This must be done regardless of which Slurm daemons are running within Docker. At start, symbolic links will be created to these files to `/etc` within the container, allowing the various Slurm functions to communicate properly with your cluster. In such cases, there's no reason to run `slurmctld`, the Slurm controller daemon, from within Docker, so specify `-e "NONUSE=slurmctld"`. Unless you would like to also use Slurm (rather than the local job runner) to run jobs within the Docker container, then alternatively specify `-e "NONUSE=slurmctld,slurmd"`.
Importantly, Slurm relies on a shared filesystem between the Docker container and the execution nodes. To allow things to function correctly, checkout the basic filesystem setup above.
A brief note is in order regarding the version of Slurm installed. This Docker image uses Ubuntu 14.04 as its base image. The version of Slurm in the Ubuntu 14.04 repository is 2.6.5 and that is what is installed in this image. If your cluster is using an incompatible version of Slurm then you will likely need to modify this Docker image.
The following is an example for how to specify a destination in `job_conf.xml` that uses a custom partition ("work", rather than "debug") and 4 cores rather than 1:
```
False
-p work -n 4
```
The usage of `-n` can be confusing. Note that it will specify the number of cores, not the number of tasks (i.e., it's not equivalent to `srun -n 4`).
### Using an external Grid Engine cluster [[toc]](#toc)
Set up the filesystem on the cluster as mentioned above.
To use Grid Engine (Sun Grid Engine, Open Grid Scheduler), one configuration file and an environment variable are required:
1. create an `act_qmaster` file in the /export folder.
* In ***act_qmaster*** is something like this.
```
YOUR_GRIDENGINE_MASTER_HOST
```
* this file will automatically be installed in the container's `/var/lib/gridengine` folder.
2. set the environment variable `SGE_ROOT`
* By default
```
-e SGE_ROOT=/var/lib/gridengine
```
3. Make sure that YOUR_GRIDENGINE_MASTER_HOST can be pinged from the docker container. If this is not the case you can put the qmaster's hostname and ip in the containers `/etc/hosts`
Your Grid Engine needs to accept job submissions from inside the container. If your container is already on a host that can submit jobs, set the hostname of the container to be exactly the same as the host. (The hostname can be changed by using the --hostname flag when starting the container).
Alternatively, you can add the container's hostname (default=galaxy-docker) to the /etc/hosts file on the gridengine head node. And setting the container's hostname as a submit host.
### Tips for Running Jobs Outside the Container [[toc]](#toc)
In its default state Galaxy assumes both the Galaxy source code and
various temporary files are available on shared file systems across the
cluster. When using Condor or SLURM (as described above) to run jobs outside
of the Docker container one can take steps to mitigate these assumptions.
The `embed_metadata_in_job` option on job destinations in `job_conf.xml`
forces Galaxy collect metadata inside the container instead of on the
cluster:
```
False
```
This has performance implications and may not scale as well as performing
these calculations on the remote cluster - but this should not be a problem
for most Galaxy instances.
# Enable Galaxy to use BioContainers (Docker) [[toc]](#toc)
This is a very cool feature where Galaxy automatically detects that your tool has an associated docker image, pulls it and runs it for you. These images (when available) have been generated using [mulled](https://docs.galaxyproject.org/en/latest/admin/special_topics/mulled_containers.html).
To test, install the [IUC bedtools](https://toolshed.g2.bx.psu.edu/repository?repository_id=8d84903cc667dbe7&changeset_revision=7b3aaff0d78c) from the toolshed. When you try to execute *ClusterBed* for example. You may get a missing dependancy error for *bedtools*. But bedtools has an associated docker image on [quay.io](https://quay.io/). Now configure Galaxy as follows:
- Add this environment variable to `docker run`: `-e GALAXY_CONFIG_ENABLE_MULLED_CONTAINERS=True`
- Persist mulled Singularity caches by mounting `/export` and reusing `/export/container_cache/singularity/mulled` across runs.
- In `job_conf.xml` configure a Docker enabled destination as follows:
```xml
true
$galaxy_root:ro,$galaxy_root/database/tmp:rw,$tool_directory:ro,$job_directory:ro,$working_directory:rw,$default_file_path:rw
false
```
When you execute the tool again, Galaxy will pull the image from Biocontainers ([quay.io/biocontainers](https://quay.io/organization/biocontainers)), run the tool inside of this container to produce the desired output.
# Magic Environment variables [[toc]](#toc)
| Name | Description |
|---|---|
| `ENABLE_TTS_INSTALL` | Enables the Test Tool Shed during container startup. This change is not persistent. (`ENABLE_TTS_INSTALL=True`) |
| `GALAXY_LOGGING` | Enables for verbose logging at Docker stdout. (`GALAXY_LOGGING=full`) |
| `BARE` | Disables all default Galaxy tools. (`BARE=True`) |
| `NONUSE` | Disable services during container startup. (`NONUSE=cron,proftp,nodejs,condor,slurmd,slurmctld,celery,rabbitmq,redis,flower,tusd`) |
| `GUNICORN_WORKERS` | Set the number of gunicorn workers (`GUNICORN_WORKERS=2`) |
| `CELERY_WORKERS` | Set the number of celery workers (`CELERY_WORKERS=2`) |
| `GALAXY_DOCKER_ENABLED` | Enable Galaxy to use Docker containers if annotated in tools (`GALAXY_DOCKER_ENABLED=False`) |
| `GALAXY_DOCKER_VOLUMES` | Specify volumes that should be mounted into tool containers (`GALAXY_DOCKER_VOLUMES=""`) |
| `GALAXY_HANDLER_NUMPROCS` | Set the number of Galaxy handler (`GALAXY_HANDLER_NUMPROCS=2`) |
| `LOAD_GALAXY_CONDITIONAL_DEPENDENCIES` | Installing optional dependencies into the Galaxy virtual environment |
| `LOAD_PYTHON_DEV_DEPENDENCIES` | Installation of Galaxy's dev dependencies. Needs `LOAD_GALAXY_CONDITIONAL_DEPENDENCIES` as well |
| `GALAXY_AUTO_UPDATE_DB` | Run the Galaxy database migration script during startup |
| `GALAXY_EXPORT_MARKER` | Override the export marker used to refresh `/export/galaxy`. |
# HTTPS Support [[toc]](#toc)
It's possible to automatically configure your container with HTTPS, either with
certificates of your own or by automatically requesting an HTTPS certificate from
Letsencrypt with the following environment variables:
| Name | Description |
|---|---|
| `USE_HTTPS` | Set `USE_HTTPS=True` to set up HTTPS via self-signed certificates (CN is set to the value of `GALAXY_DOMAIN` variable, defaulting to `localhost` if no value is provided). If you have your own certificates, copy them to `/export/{server.key,server.crt}`. |
| `USE_HTTPS_LETSENCRYPT` | Set `USE_HTTPS_LETSENCRYPT=True` to automatically set up HTTPS using Letsencrypt as a certificate authority. (Requires you to also set `GALAXY_DOMAIN`) Note: only set one of `USE_HTTPS` and `USE_HTTPS_LETSENCRYPT` to true. |
| `GALAXY_DOMAIN` | Set `GALAXY_DOMAIN=` so that Letsencrypt can test your that you own the domain you claim to own in order to issue you your HTTPS cert. |
# Lite Mode [[toc]](#toc)
The lite mode will only start postgresql and a single Galaxy process, without nginx, gunicorn or any other special feature from the normal mode. In particular there is no support for the export folder or any Magic Environment variables.
```sh
docker run -i -t -p 8080:8080 quay.io/bgruening/galaxy startup_lite
```
This will also use the standard `job_conf.xml.sample_basic` shipped by Galaxy. If you want to use the regular one from the normal mode you can pass `-j` to the `startup_lite` script.
# Extending the Docker Image [[toc]](#toc)
If the desired tools are already included in the Tool Shed, building your own personalised Galaxy docker Image (Galaxy flavour) can be done using the following steps:
1. Create a file named `Dockerfile`
2. Include `FROM quay.io/bgruening/galaxy` at the top of the file. This means that you use the Galaxy Docker Image as base Image and build your own extensions on top of it.
3. Supply the list of desired tools in a file (`my_tool_list.yml` below). See [this page](https://github.com/galaxyproject/ansible-galaxy-tools/blob/master/files/tool_list.yaml.sample) for the file format requirements.
4. Execute `docker build -t my-docker-test .`
4a. (if behind proxy). Add the ENV http_proxy and https_proxy variables as IPs (to avoid nameserver resolution problems) as in the example below.
5. Run your container with `docker run -p 8080:80 my-docker-test`
6. Open your web browser on `http://localhost:8080`
For a working example, have a look at these Dockerfiles.
- [deepTools](http://deeptools.github.io/) [Dockerfile](https://github.com/bgruening/docker-recipes/blob/master/galaxy-deeptools/Dockerfile)
- [ChemicalToolBox](https://github.com/bgruening/galaxytools/tree/master/chemicaltoolbox) [Dockerfile](https://github.com/bgruening/docker-recipes/blob/master/galaxy-chemicaltoolbox/Dockerfile)
```
# Galaxy - deepTools
#
# VERSION 0.2
FROM quay.io/bgruening/galaxy
MAINTAINER Björn A. Grüning, bjoern.gruening@gmail.com
ENV GALAXY_CONFIG_BRAND deepTools
# The following two lines are optional and can be given during runtime
# with the -e http_proxy='http://yourproxyIP:8080' parameter
ENV http_proxy 'http://yourproxyIP:8080'
ENV https_proxy 'http://yourproxyIP:8080'
WORKDIR /galaxy
RUN add-tool-shed --url 'http://testtoolshed.g2.bx.psu.edu/' --name 'Test Tool Shed'
# Install Visualisation
RUN install-biojs msa
# Adding the tool definitions to the container
ADD my_tool_list.yml $GALAXY_ROOT_DIR/my_tool_list.yml
# Install deepTools
RUN install-tools $GALAXY_ROOT_DIR/my_tool_list.yml
# Mark folders as imported from the host.
VOLUME ["/export/", "/data/", "/var/lib/docker"]
# Expose port 80 (webserver), 21 (FTP server)
EXPOSE :80
EXPOSE :21
# Autostart script that is invoked during container start
CMD ["/usr/bin/startup"]
```
or the [RNA-workbench](https://github.com/bgruening/galaxy-rna-workbench/blob/master/Dockerfile).
The RNA-workbench has advanced examples about:
- populating Galaxy data libraries
```bash
setup-data-libraries -i $GALAXY_ROOT_DIR/library_data.yaml -g http://localhost:8080
-u $GALAXY_DEFAULT_ADMIN_USER -p $GALAXY_DEFAULT_ADMIN_PASSWORD
```
The actual data is references in a YAML file similar this [one](https://github.com/bgruening/galaxy-rna-workbench/blob/master/library_data.yaml).
- installing workflows
```bash
workflow-install --workflow_path $GALAXY_HOME/workflows/ -g http://localhost:8080
-u $GALAXY_DEFAULT_ADMIN_USER -p $GALAXY_DEFAULT_ADMIN_PASSWORD
```
Where all Galaxy workflows needs to be in one directory, here the `$GALAXY_HOME/workflows/`.
- running Galaxy data-managers to create indices or download data
```bash
run-data-managers -u $GALAXY_DEFAULT_ADMIN_USER -p $GALAXY_DEFAULT_ADMIN_PASSWORD -g http://localhost:8080
--config data_manager_rna_seq.yaml
```
The data-managers can be configured and specified in a YAML file similar to this [one](https://github.com/galaxyproject/training-material/blob/master/RNA-Seq/docker/data_manager_rna_seq.yaml).
If you host your flavor on GitHub consider to test our build with Travis-CI. This project will help you:
https://github.com/bgruening/galaxy-flavor-testing
## Test matrix [[toc]](#toc)
The project includes local test scripts and CI workflows. Use the matrix below to decide what to run.
| Area | Script / Workflow | Requires | Notes |
| --- | --- | --- | --- |
| Image build | `docker build -t galaxy:test galaxy/` | Docker | Baseline image build. |
| Startup sanity | `docker run --rm --privileged galaxy:test /usr/bin/startup2` | Privileged | Confirms services start and CVMFS messaging is sane. |
| Bioblend | `test/bioblend/test.sh` | Running Galaxy container | Uses a Bioblend test image against Galaxy. |
| Slurm | `test/slurm/test.sh` | Docker, Slurm test image | Uses external Slurm container; set `GALAXY_IMAGE=galaxy:test` if needed. |
| SGE (Grid Engine) | `test/gridengine/test.sh` | Docker, SGE test image | Uses ephemeris container to wait for Galaxy. |
| CVMFS sidecar | `test/cvmfs/test.sh` | Privileged | Builds and validates mount propagation from sidecar. |
| FTP/SFTP | `.github/workflows/single.sh` | Docker, sshpass (CI) | FTP and SFTP checks run in CI; local run skips SFTP if `sshpass` is missing. |
| /export persistence | `startup.sh` / `startup2.sh` | `/export` volume | Export and cache relocation happens during startup; exercised by CI runs. |
| HTTPS/TLS | `.github/workflows/single.sh` | Docker | Uses `curl` and `openssl s_client` against port 443. |
| Tool install smoke | `.github/workflows/single.sh` | Docker | Installs sample tools and verifies tool availability. |
| Container resolvers | `test/container_resolvers_conf.ci.yml` | Galaxy container | CI uses a minimal resolver config for toolbox resolution tests. |
| Image analysis (optional) | `.github/workflows/single.sh` | `dive` | Runs only when `dive` is installed. |
| Single-container CI | `.github/workflows/single_container.yml` | CI | Full container test (privileged). |
| Multi-test CI | `.github/workflows/single.sh` | CI | Builds image + runs SLURM, SGE, Bioblend; uses buildx cache. |
Notes:
- If `/tmp` is small in CI, set `TMPDIR=/var/tmp` for test scripts.
- CVMFS sidecar CI builds/pushes on tags; branch pushes run tests only when CVMFS paths change.
## List of Galaxy flavours [[toc]](#toc)
- [Aurora Galaxy](https://github.com/statonlab/aurora-galaxy-tools)
- [SNP analysis Workflows on Docker (sniplay)](https://github.com/ValentinMarcon/docker-galaxy-sniplay)
- [NCBI-Blast](https://github.com/bgruening/docker-galaxy-blast)
- [ChemicalToolBox](https://github.com/bgruening/docker-recipes/blob/master/galaxy-chemicaltoolbox)
- [ballaxy](https://github.com/anhi/docker-scripts/tree/master/ballaxy)
- [NGS-deepTools](https://github.com/bgruening/docker-recipes/blob/master/galaxy-deeptools)
- [Galaxy ChIP-exo](https://github.com/gregvonkuster/docker-galaxy-ChIP-exo)
- [Galaxy Proteomics](https://github.com/bgruening/docker-galaxyp)
- [Imaging](https://github.com/bgruening/docker-galaxy-imaging)
- [Constructive Solid Geometry](https://github.com/gregvonkuster/docker-galaxy-csg)
- [Galaxy for metagenomics](https://github.com/bgruening/galaxy-metagenomics)
- [Galaxy with the Language Application Grid tools](https://github.com/lappsgrid-incubator/docker-galaxy-lappsgrid)
- [RNAcommender](https://github.com/gianlucacorrado/galaxy-RNAcommender)
- [OpenMoleculeGenerator](https://github.com/bgruening/galaxy-open-molecule-generator)
- [Workflow4Metabolomics](https://github.com/workflow4metabolomics/w4m-docker)
- [HiC-Explorer](https://github.com/maxplanck-ie/docker-galaxy-hicexplorer)
- [SNVPhyl](https://github.com/phac-nml/snvphyl-galaxy)
- [GraphClust](https://github.com/BackofenLab/docker-galaxy-graphclust)
- [RNA workbench](https://github.com/bgruening/galaxy-rna-workbench)
- [Cancer Genomics Toolkit](https://github.com/morinlab/tools-morinlab/tree/master/docker)
- [Clustered Heatmaps for Interactive Exploration of Molecular Profiling Data](http://cancerres.aacrjournals.org/content/77/21/e23)
# Integrating non-Tool Shed tools into the container [[toc]](#toc)
We recommend to use the [Main Galaxy Tool Shed](https://toolshed.g2.bx.psu.edu/) for all your tools and workflows that you would like to share.
In rare situations where you cannot share your tools but still want to include them into your Galaxy Docker instance, please follow the next steps.
- Get your tools into the container.
Mount your tool directory into the container with a separate `-v /home/user/my_galaxy_tools/:/local_tools`.
- Create a `tool_conf.xml` file for your tools.
This should look similar to the main [`tool_conf.xml`](https://github.com/galaxyproject/galaxy/blob/dev/lib/galaxy/config/sample/tool_conf.xml.sample) file, but references your tools from the new directory. In other words a tool entry should look like this ``.
Your `tool_conf.xml` should be available from inside of the container. We assume you have it stored under `/local_tools/my_tools.xml`.
- Add the new tool config file to the Galaxy configuration.
To make Galaxy aware of your new tool configuration file you need to add the path to `tool_config_file`, which is set to `/etc/galaxy/tool_conf.xml`. You can do this during container start by setting the environment variable `-e GALAXY_CONFIG_TOOL_CONFIG_FILE=/etc/galaxy/tool_conf.xml,/local_tools/my_tools.xml`.
# Users & Passwords [[toc]](#toc)
The Galaxy Admin User has the username `admin@example.org` and the password `password`.
The PostgreSQL username is `galaxy`, the password is `galaxy` and the database name is `galaxy` (I know I was really creative ;)).
If you want to create new users, please make sure to use the `/export/` volume. Otherwise your user will be removed after your docker session is finished.
The proftpd server is configured to use the main galaxy PostgreSQL user to access the database and select the username and password. If you want to run the
docker container in production, please do not forget to change the user credentials in `/etc/proftpd/proftpd.conf` too.
The Flower Webapp is `htpasswd` protected with username and password set to `admin`.
RabbitMQ is configured with:
- Admin username: `admin`
- Admin password: `admin`
- Galaxy vhost: `galaxy`
- Galaxy username: `galaxy`
- Galaxy password: `galaxy`
- Flower username: `flower`
- Flower password: `flower`
# Development [[toc]](#toc)
You can clone this repository with:
```sh
git clone https://github.com/bgruening/docker-galaxy-stable.git
```
This repository uses various [Ansible](http://www.ansible.com/) roles as specified in [requirements.yml](galaxy/ansible/requirements.yml) to manage configurations and dependencies. You can install these roles with the following command:
```sh
cd galaxy/ansible/ && ansible-galaxy install -r requirements.yml -p roles
```
If you simply want to change the Galaxy repository and/or the Galaxy branch, from which the container is build you can do this with Docker `--build-arg` during the `docker build` step. For example you can use these parameters during container build:
```
--build-arg GALAXY_RELEASE=install_workflow_and_tools
--build-arg GALAXY_REPO=https://github.com/manabuishii/galaxy
```
To keep docker images lean and optimize storage, we recommend using [Dive](https://github.com/wagoodman/dive). It provides an interactive UI that lets you explore each layer of the image, helping you quickly identify files and directories that take up significant space. To install Dive, follow the installation instructions provided in the [Dive GitHub repository](https://github.com/wagoodman/dive?tab=readme-ov-file#installation). After building your docker image, use Dive to analyze it:
```bash
dive
```
# Requirements [[toc]](#toc)
- [Docker](https://www.docker.io/gettingstarted/#h_installation)
# Support & Bug Reports [[toc]](#toc)
You can file an [github issue](https://github.com/bgruening/docker-galaxy-stable/issues) or ask
us on the [Galaxy development list](http://lists.bx.psu.edu/listinfo/galaxy-dev).
If you like this service please fill out this survey: https://www.surveymonkey.de/r/denbi-service?sc=rbc&tool=galaxy-docker
================================================
FILE: compose/README.md
================================================
⚠️
The `compose` version of this project is currently not maintained. We update the files and versions as we have time, but it's not a priority at the moment.
We will concentrate on the single-container version. If you want to deploy a composable version of Galaxy please have a look at https://github.com/galaxyproject/galaxy-helm or take over the maintainership of this version here :)
⚠️
# Galaxy Docker Compose
This setup is built on the idea of using a basic docker-compose file and extending it
for additional use cases. Therefore the `docker-compose.yml` is the base of the
whole setup. By concatenating additional files, you can extend it to use, for
example, HTCondor (see [Usage](#usage)).
All working data (database, virtual environment, etc.) is exported in the
`EXPORT_DIR`, which defaults to ./export.
## Usage
### First startup
When starting the setup for the first time, the Galaxy container will copy
a bunch of files into the `EXPORT_DIR`. This might take quite some time
to finish (even 20 minutes or more). Please don't interrupt the setup in
this period, as this might result in a broken state of the `EXPORT_DIR`
(see [Killing while first start up](#killing-while-first-start-up)).
### Basic setup
Simply run
> docker-compose up
to start Galaxy. In the basic setup, Galaxy together with Nginx as the proxy,
Postgres as the DB, and RabbitMQ as the message queue is run.
The default username and password is "admin", "password" (API key "fakekey").
Those credentials are set at first run and can be tweaked using the environment
variables `GALAXY_DEFAULT_ADMIN_USER`, `GALAXY_DEFAULT_ADMIN_EMAIL`,
`GALAXY_DEFAULT_ADMIN_PASSWORD`, and `GALAXY_DEFAULT_ADMIN_KEY` in the
`docker-compose.yml` file. If you want to change the email address of an admin,
remember to update the `admin_users` setting of the Galaxy config (also
see [Configuration](#configuration) to learn how to configure Galaxy).
### Running in background
If you want to run the setup in the background, use the detach option (`-d`):
> docker-compose up -d
### Upgrading to a newer Galaxy version
When not setting `IMAGE_TAG` to a specific version, Docker-Compose will always
fetch the newest image and therefore Galaxy version available. Depending
on the magnitude of the upgrade, you may need to delete the virtual
environment of Galaxy (EXPORT_PATH/galaxy/.venv) before you start the
setup again. The DB migration depends on the `database_auto_migrate`
setting for Galaxy (which is not
set on default and will therefore be `false` normally).
## Extending the setup
Beyond the basic usage, extending the setup is as easy as adding a additional
docker-compose extension file. This is done be the [standard docker-compose syntax](https://docs.docker.com/compose/extends/):
`docker-compose -f docker-compose.yml -f docker-compose.EXTENSION.yml`. Simply
concatenate the extensions you want to use. The rest should be handled for you.
### Running a HTCondor cluster
The `docker-compose.htcondor.yml` file is responsible to build up
an HTCondor cluster. Simply run:
> docker-compose -f docker-compose.yml -f docker-compose.htcondor.yml up
This will bring up a "cluster" with one master and one executor. Galaxy
acts like the submit node. To scale
the cluster, run the up statement with a `--scale htcondor-executor=n` option.
The setup ships with a basic configuration for HTCondor (see the
`base_config.yml` file). To customize the settings, set the appropriate
`HTCONDOR_MASTER_CONFIG_`, `HTCONDOR_EXECUTOR_CONFIG_`, `HTCONDOR_GALAXY_CONFIG`
environment variables (see [Configuration](#configuration)).
### Running a SLURM cluster
Append the `docker-compose.slurm.yml` file to your `docker-compose up` command. This
will spin up a small Slurm cluster and configure Galaxy to schedule jobs there.
To scale the cluster, run the up statement with a `--scale slurm_node=n` option.
As all nodes need to be defined in the slurm.conf file, you will also need to
set the env variable `SLURM_NODE_COUNT` to the correct node count.
Here is an example for scaling to three nodes:
`SLURM_NODE_COUNT=3 docker-compose -f docker-compose.yml -f docker-compose.slurm.yml up --scale slurm_node=3`.
Some background info about the slurm.conf configuration: As said earlier, Slurm
expects to have all nodes be defined in the conf file, together with valid
hostnames. Therefore `galaxy-configurator` automatically adds references
(the names of the slurm_node-containers) to the nodes by utilizing `SLURM_NODE_COUNT`.
As the docker-compose containers can contain underscores, the names are not
valid as hostnames (even though they are resolvable from inside the containers).
To cope with this problem, the `galaxy-slurm-node-discovery`-container
uses the Docker API to fetch the correct hostnames and replaces them on the
fly inside the slurm.conf file.
### Running a Kubernetes Cluster (with kind)
It is possible to start a small Kubernetes (k8s) cluster using [kind](https://kind.sigs.k8s.io)
(Kubernetes in Docker) and let Galaxy run your jobs there. For this use the
`docker-compose.k8s.yml` file. Note that this extension is only meant
to run individually (so no Pulsar, HTCondor etc.).
The `galaxy-kind` container is responsible for starting up your local Kubernetes
cluster and applying all the configuration the Galaxy-Configurator created. You can
find these files under `galaxy-configurator/templates/kind`. The `kind_config.yml`
file is used to configure Kind itself (also see https://kind.sigs.k8s.io/docs/user/configuration/),
while the files in the `k8s_config` are the configs that will be applied to
Kubernetes using `kubectl apply -f `. By default, k8s is configured
to add some persistent volumes (PV) and persistent volume claims (PVC) so jobs
can access all the needed files from Galaxy.
It is relatively easy to add your own k8s_configs: Simply place your files into the
template folder (remember to add the `.j2` extension!) and mention it in the
`kind_configs` variable in the run.sh file of the galaxy-configurator
(see [Extend the Galaxy-Configurator](#extend-the-galaxy-configurator)).
While Kind is starting up the cluster, it blocks Galaxy from starting itself.
This is needed as Galaxy will parse the KUBECONFIG (that is created after k8s has started)
only once on startup. So don't be surprised if Galaxy is quiet for some time :)
Note that the cluster is being rebuilt on every start (to be more precise,
a `kind delete cluster` is called on shut down), so manual changes will
be overwritten if they are not defined in the k8s_config!
### Using Singularity for dependency resolution
Conda is used as the default dependency resolution. To switch to using
Singularity containers, add the `docker-compose.singularity.yml` file.
This will advice Galaxy to - if possible - stick with Singularity
for the dependency resolution. See the
[Galaxy documentation](https://docs.galaxyproject.org/en/master/admin/special_topics/mulled_containers.html)
for more information.
### Configuration
The `galaxy-configurator` is the central place for configuration
and is used to configure Galaxy and its
additional services (currently Nginx, and Slurm). For this, it utilizes
environment variables (set in the docker-compose file) for common configs,
and the `base_config.yml` file, used for base-configuration that does not
change often. For environment variables, there are two categories of
configuration: The ones that contain a `_CONFIG_`
(like `GALAXY_CONFIG_ADMIN_USERS`) and the ones that don't (like
`GALAXY_PROXY_PREFIX`). The first category contains configuration
options within the tools itself and they are simply mapped to the
corresponding config-file one-to-one (see for example
[galaxy.yml.sample](https://github.com/galaxyproject/galaxy/blob/dev/lib/galaxy/config/sample/galaxy.yml.sample)
for reference). The other category contains options that have some
logic within the docker-compose setup. `GALAXY_PROXY_PREFIX`, for example,
touches multiple Galaxy and Nginx options, so you don't have to.
The base of the configrations are [Jinja2](https://jinja.palletsprojects.com/en/2.11.x/)
templates, located at `galaxy-configurator/templates`.
The `galaxy-configurator` renders these
templates on startup and saves them in the export-folder to be
used by the other containers. A diff is created to surface changes
that will be applied. To disable the configurator, simply remove the
corresponding `*_OVERWRITE_CONFIG` environment variable
(like `GALAXY_OVERWRITE_CONFIG`) or set it to `false`.
All options are discussed under [configuration reference](#configuration-reference).
### Use specific Galaxy version or Docker images
The `IMAGE_TAG` environment variable allows to use specific versions of the
setup. Say, you want to stay with Galaxy v24.1 for now:
> export IMAGE_TAG=24.1
> docker-compose up
Without setting this variable, you will always get updated to the newest
version available.
### Restarting
To restart the setup (for example after a configuration change), you can simply
kill (CTRL-C) Docker Compose and re-run `docker-compose ... up`. Your data will
not be lost, as long as you keep the `export`-folder.
### Using prefix
It is possible to host Galaxy under a prefix like example.com/galaxy. For that,
set the env variable in the `galaxy-configurator` part to
`GALAXY_PROXY_PREFIX=/your/wanted/prefix` (like `/galaxy`)
and remember to also update `GALAXY_CONFIG_INFRASTRUCTURE_URL` accordingly.
## More advanced stuff
### "SSH"ing into a container
When facing a bug it may be helpful to have command-line controle over a
container. This is as simple as running `docker exec -it CONTAINER_NAME /bin/bash`.
For the galaxy-server container that would mean:
> docker exec -it compose_galaxy-server_1 /bin/bash
Note that not all containers have bash shipped with them. In this case replace
it by `/bin/sh`.
### Build containers locally
When developing locally, you may come to the point were you need to build
images yourself. In most cases adding a `--build` to the docker-compose statement
should be enough. It's
recommended to build the images using custom tags, so it's easy to switch between
versions. Simply set `IMAGE_TAG` to something other than `latest`:
> export IMAGE_TAG=bugfix1
> docker-compose up --build
Maybe you found a bug in Galaxy itself and you want to test it now. For this,
you can set the `GALAXY_REPO` and `GALAXY_RELEASE` build arguments to your
own fork and branch.
> docker build galaxy-server -t quay.io/bgruening/galaxy-server:$IMAGE_TAG --build-arg GALAXY_REPO=https://github.com/YOUR-USERNAME/galaxy --build-arg GALAXY_RELEASE=my_custom_branch
Some containers use base-images that share some common dependencies (like
Docker that is not only used for Galaxy, but also Pulsar, HTCondor, or Slurm).
After re-building these images yourself, you may also need to add
`--build-arg IMAGE_TAG=your_base_image_tag` and `SETUP_REPO` if your
base-images are tagged differently or are stored in a different repository.
### Extend the Galaxy-Configurator
It is possible to extend the usage of the configurator, both in extending the
Jinja2 templates, but also in adding additional files.
All environment variables of the `galaxy-configurator` are accessible
within the templates. Additionally,
the configurator parses specific `*_CONFIG_*`
variables and makes them accessible as a dict (for example `galaxy` or
`gravity`). It may be helpful to understand the current use cases
within the templates and how the `customize.py` file (actually just an
extension of the [J2cli](https://github.com/kolypto/j2cli) parses env
variables.
To add more template files, have a look into the `run.sh` file. For example
adding a configuration file for Galaxy is as simple as adding an entry
into the `galaxy_configs` array.
### Adding additional containers or configurations
So you want to extend the setup to - for example - support a new
Workload Manager for Galaxy? Or you have a specific configuration
of Galaxy in mind that goes out of the scope of the basic
`docker-compose.yml` file? Aweseome!
Let's have a look at two examples for how you can create a custom
extension:
**HTCondor**:
The `docker-compose.htcondor.yml` file is a good example of what
the idea of extensions are in the context of this setup.
The HTCondor "cluster" is based on a single image (`galaxy-htcondor`)
and, depending on the containers purpose, it gets exposed to
different volumes. As Galaxy needs some addition files, one volume
is added to its container. The `galaxy-configurator` part
overwrites a single
environment variable and sets a new one. The neat thing of this
approach is that if you don't need
to run HTCondor, the base setup will work just fine without
much additional ballast. However, adding HTCondor isn't a hassle
either.
**Singularity**
Changing a bunch of variables all the time, just to be able to switch
between different setups can become a hassle quickly. The
`docker-compose.singularity.yml` file is a good a example of how you
can avoid that. In normal cases, Galaxy should run jobs in the
shell directly, changing that to Singularity requires some
different settings. The file is a good example in how you can
quickly overwrite settings and be able to reuse it for different
occasions (remember that by concatenating this file behind
HTCondor, Slurm, or Pulsar enables Singularity the same way). Another
example would be to create a custom `docker-compose.debug.yml` file
that could be used to enable some debug flags or
setting `GALAXY_CONFIG_CLEANUP_JOB=never`.
### Running the CI pipeline on your own fork
The GitHub Actions workflow used to build, test and deploy this setup
is independent of any specific username or Docker Registry. To run
the workflow on your fork, simply
[set the following secrets](https://help.github.com/en/actions/configuring-and-managing-workflows/creating-and-storing-encrypted-secrets):
* `docker_registry`: The Registry the images should be pushed
to (`docker.io`, for example)
* `docker_registry_username`: Your username
* `docker_registry_password`: Your password
## Troubleshooting
### Killing while first start up
If you kill (CTRL-C) Docker Compose while Galaxy is performing the first
startup, you may come into the situation where not all files have been properly
exported. As the exporting is only done for the first start, this can result in
missing dependencies. In this case it is good to remove the whole
`export`-folder (or at least Galaxy related files - the `postgres` folder can
stay, if wanted).
### Resetting the setup
To start from the beginning, you of course need to delete the `export`-folder.
But remember to also do a `docker-compose -f down`, as this
will shut down and remove all containers. If you forget this, while still
deleting the `export`-folder, the Galaxy container may have problems with
exporting all necessary files, as they are usually deleted within the container
after the first proper startup.
## Testing
The setup provides a bunch of different integration tests to run against Galaxy.
Have a look inside the `tests` folder. There you find the containers that run
the tests and their docker-compose files. The containers are essentially just
a wrapper around the test tools to simplify using them. Running a tests
is the same as extending
any other part of the setup: Just concatenate the test file at the end.
To run, for example, some Planemo Worklow tests against a Galaxy installation that
is connected to a HTCondor cluster using Singularity, just enter:
`docker-compose -f docker-compose.yml -f docker-compose.htcondor.yml
-f docker-compose.singularity.yml -f tests/docker-compose.test.yml
-f tests/docker-compose.test.workflows.yml up`. To stop the setup when a test
has finished, you may want to add the option `--exit-code-from galaxy-workflow-test`.
This returns the exit code of the test container (should be 0 if successful),
which you could use for further automation.
The tests are run using GitHub Actions on every commit. So feel free to inspect
the `.github/workflows/compose.yml` file for more test cases and get inspired
by them :)
### Planemo workflow tests
Like the name suggests, this runs [Planemo](https://planemo.readthedocs.io/en/latest/)
workflow tests. The container uses the tests from [UseGalaxy.eu](https://github.com/usegalaxy-eu/workflow-testing),
but you can mount any test you could think of inside the container at the `/src` path.
By default, it will run some select workflows, but you can choose your own
by setting the `WORKFLOWS` env variable to a comma separated list of paths to some tests
(e.g. `WORKFLOWS=test1/test1.ga,test2/test2.ga docker-compose ...`).
### Selenium tests
The Selenium tests simulate a real user that is accessing Galaxy through the
browser to perform some actions. For that it uses a headless Chrome to runs the
tests from the [Galaxy repo](https://github.com/galaxyproject/galaxy/tree/dev/lib/galaxy_test/selenium).
The GitHub Actions currently just run a few of those. To select more tests,
set the env variable `TESTS` to a comma separated list (like `TESTS=navigates_galaxy.py,login.py`).
Note that you don't need to append the `test_` prefix for every
single file!
### BioBlend tests
BioBlend has some tests that run against Galaxy. We are using some of them to test
our setup too. Have a look into the `run.sh` file of the container to see
which tests we have excluded (at least for now).
## Configuration reference
Tool specific configuration can be applied via `base_config.yml` or the following
environment variables:
* `GALAXY_CONFIG_`
* `GRAVITY_CONFIG_`
* `NGINX_CONFIG_`
* `PULSAR_CONFIG_`
* `HTCONDOR_MASTER_CONFIG_`
* `HTCONDOR_EXECUTOR_CONFIG_`
* `HTCONDOR_GALAXY_CONFIG`
* `SLURM_CONFIG_`
The following are settings specific to this docker-compose setup:
### Galaxy
| Variable | Description |
|---------------------------|--------------------------------------------------------------------------------------------------------------------|
| `GALAXY_OVERWRITE_CONFIG` | Enable Galaxy-configurator, which may result in overwriting manual config changes done in `EXPORT_DIR/galaxy/config`. |
| `GALAXY_PROXY_PREFIX` | Host Galaxy under a prefix (like example.com/galaxy). Note that you also need to update `GALAXY_CONFIG_INFRASTRUCTURE_URL` accordingly. |
| `GALAXY_JOB_DESTINATION` | The name of the preferred job destination (local, condor, slurm, singularity..) defined in `job_conf.xml`. Generally, this does not need to be changed, as the docker-compose extensions are already taking care of that. |
| `GALAXY_JOB_RUNNER` | The job runner Galaxy will use to process jobs. Can be `local`, `condor`, `slurm`, `pular_rest` or `pulsar_mq`, or `k8s`. |
| `GALAXY_DEPENDENCY_RESOLUTION ` | Determines how Galaxy should resolve dependencies. You can choose between Conda (`conda`) or running them inside a Singularity container (`singularity`).|
| `GALAXY_PULSAR_URL` | The URL Galaxy will communicate with Pulsar, when choosing the `pulsar_rest` job runner. |
| `GALAXY_JOB_METRICS_*` | Enable the corresponding job metrics. Can be `CORE`, `CPUINFO` (`true` or `verbose`), `MEMINFO`, `UNAME`, and `ENV`, also see [job_metrics.xml.sample](https://github.com/galaxyproject/galaxy/blob/dev/lib/galaxy/config/sample/job_metrics_conf.xml.sample) for reference.
### Nginx
| Variable | Description |
|---------------------------|--------------------------------------------------------------------------------------------------------------------|
| `NGINX_OVERWRITE_CONFIG` | Also see `GALAXY_OVERWRITE_CONFIG`. |
| `NGINX_PROXY_READ_TIMEOUT` | Determines how long Nginx will wait (in seconds) for Galaxy to respond to a request until it times out. Defaults to 180 seconds. |
### Pulsar
| Variable | Description |
|---------------------------|--------------------------------------------------------------------------------------------------------------------|
| `PULSAR_OVERWRITE_CONFIG` | Also see `GALAXY_OVERWRITE_CONFIG`. |
| `PULSAR_JOB_RUNNER` | The job runner Pulsar will use to process jobs. Currently, only `local` is supported, but this will be extended to HTCondor and Slurm in the future. |
| `PULSAR_NUM_CONCURRENT_JOBS ` | The number of jobs Pulsar will run concurrently. Defaults to 1. |
| `PULSAR_GALAXY_URL` | The URL Pulsar will use to send results back to Galaxy. Defaults to `http://nginx:80`. |
| `PULSAR_HOSTNAME` | The hostname Pulsar will listen to for requests. Defaults to `pulsar`. |
| `PULSAR_PORT` | The port Pulsar will listen to for requests. Defaults to 8913. |
| `PULSAR_LOG_LEVEL` | The log level (like `DEBUG` or `INFO`) of Pulsar. Defaults to `INFO`. |
### Kind (Kubernetes in Docker)
| Variable | Description |
|---------------------------|--------------------------------------------------------------------------------------------------------------------|
| `KIND_OVERWRITE_CONFIG` | Also see `GALAXY_OVERWRITE_CONFIG`. |
| `KIND_NODE_COUNT` | The number of Kubernetes nodes kind should start. Defaults to 1. |
| `KIND_PV_STORAGE_SIZE` | The size limit (in Gi) of a Kubernetes Persistent Volume. Defaults to 100. |
| `GALAXY_KUBECONFIG` | The path to the KUBECONFIG that Galaxy will use to connect to Kubernetes. Defaults to the one created with galaxy-kind. |
| `GALAXY_K8S_PVC` | The PVCs a job pod should mount. Defaults to `galaxy-root:/galaxy,galaxy-database:/galaxy/database,galaxy-tool-deps:/tool_deps`. |
| `GALAXY_K8S_DOCKER_REPO_DEFAULT` | The Docker Repo/Registry to use if the resolver could not resolve the proper image for a job. Defaults to `docker.io`. |
| `GALAXY_K8S_DOCKER_OWNER_DEFAULT` | The Owner/Username to use if the resolver could not resolve the proper image for a job. Is not set by default. |
| `GALAXY_K8S_DOCKER_IMAGE_DEFAULT` | The Image to use if the resolver could not resolve the proper image for a job. Defaults to `ubuntu`. |
| `GALAXY_K8S_DOCKER_TAG_DEFAULT` | The Image Tag to use if the resolver could not resolve the proper image for a job. Defaults to `22.04`. |
### HTCondor
| Variable | Description |
|-----------------------------|--------------------------------------------------------------------------------------------------------------------|
| `HTCONDOR_OVERWRITE_CONFIG` | Also see `GALAXY_OVERWRITE_CONFIG`. |
### Slurm
| Variable | Description |
|---------------------------|--------------------------------------------------------------------------------------------------------------------|
| `SLURM_OVERWRITE_CONFIG` | Also see `GALAXY_OVERWRITE_CONFIG`. |
| `SLURM_NODE_COUNT` | The number of Slurm nodes running. This needs to be changed when scaling the setup (eg. `docker-compose up --scale slurm_node=n`) to let the Slurm controller know of all available nodes. |
| `SLURM_NODE_CPUS` | Number of CPUs per node. Defaults to 1. |
| `SLURM_NODE_MEMORY` | Amount of memory per node. Defaults to 1024. |
| `SLURM_NODE_HOSTNAME` | Docker Compose adds a prefix in front of the container names by default. Change this value to the name of your setup and `_slurm_node` (e.g. `compose_slurm_node`) to ensure a correct mapping of the Slurm nodes. |
### Github Workflow Tests (Branch 24.1)
| Setup | bioblend | workflow ard | workflow quality_control | workflow wf3-shed-tools (example1) | selenium |
|------------------------|--------------------|--------------------|--------------------------|------------------------------------|--------------------|
| Galaxy Base | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Galaxy Proxy Prefix | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :x: |
| HTCondor | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Slurm | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Pulsar-REST | :heavy_check_mark: | :heavy_check_mark: | :x: | :heavy_check_mark: | :heavy_check_mark: |
| Pulsar-MQ | :heavy_check_mark: | :heavy_check_mark: | :x: | :heavy_check_mark: | :heavy_check_mark: |
| k8s | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Singularity | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Pulsar-MQ + Singularity| :heavy_check_mark: | :heavy_check_mark: | :x: | :heavy_check_mark: | :heavy_check_mark: |
| Slurm + Singularity | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| HTCondor + Singularity | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
Implemented: :heavy_check_mark:
Not Implemented: :x:
================================================
FILE: compose/base-images/galaxy-cluster-base/Dockerfile
================================================
ARG DOCKER_REGISTRY=quay.io
ARG DOCKER_REGISTRY_USERNAME=bgruening
ARG IMAGE_TAG=latest
FROM $DOCKER_REGISTRY/$DOCKER_REGISTRY_USERNAME/galaxy-container-base:$IMAGE_TAG
# Base dependencies
RUN apt update && apt install --no-install-recommends gnupg2 curl -y \
&& /usr/bin/common_cleanup.sh
# Install HTCondor
ENV DEBIAN_FRONTEND=noninteractive
RUN curl -fsSL https://research.cs.wisc.edu/htcondor/repo/keys/HTCondor-current-Key | apt-key add - \
&& echo "deb https://research.cs.wisc.edu/htcondor/repo/ubuntu/current jammy main" >> /etc/apt/sources.list \
&& apt update && apt install --no-install-recommends htcondor -y \
&& rm -f /etc/condor/condor_config.local \
&& /usr/bin/common_cleanup.sh
# Install Slurm client
ENV MUNGE_USER=munge \
MUNGE_UID=1200 \
MUNGE_GID=1200
RUN groupadd -r $MUNGE_USER -g $MUNGE_GID \
&& useradd -u $MUNGE_UID -r -g $MUNGE_USER $MUNGE_USER \
&& echo "deb http://ppa.launchpad.net/natefoo/slurm-drmaa/ubuntu jammy main" >> /etc/apt/sources.list \
&& echo "deb-src http://ppa.launchpad.net/natefoo/slurm-drmaa/ubuntu jammy main" >> /etc/apt/sources.list \
&& apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 8DE68488997C5C6BA19021136F2CC56412788738 \
&& apt update \
&& apt install --no-install-recommends python3-distutils slurm-client slurmd slurmctld slurm-drmaa1 -y \
&& apt --no-install-recommends install munge libmunge-dev -y \
&& ln -s /usr/lib/slurm-drmaa/lib/libdrmaa.so.1 /usr/lib/slurm-drmaa/lib/libdrmaa.so \
&& /usr/bin/common_cleanup.sh
# Install CVMFS
RUN apt update \
&& apt install wget lsb-release -y \
&& wget https://ecsft.cern.ch/dist/cvmfs/cvmfs-release/cvmfs-release-latest_all.deb \
&& dpkg -i cvmfs-release-latest_all.deb \
&& rm -f cvmfs-release-latest_all.deb \
&& apt update \
&& apt install --no-install-recommends cvmfs -y \
&& mkdir /srv/cvmfs \
&& /usr/bin/common_cleanup.sh
COPY files/cvmfs /etc/cvmfs
================================================
FILE: compose/base-images/galaxy-cluster-base/files/common_cleanup.sh
================================================
#!/bin/sh
set -x
# This usually drastically reduced the container size
# at the cost of the startup time of your application
find / -name '*.pyc' -delete
find / -name '*.log' -delete
find / -name '.cache' -delete
rm -rf /var/lib/apt/lists/*
rm -rf /var/cache/*
# https://askubuntu.com/questions/266738/how-to-truncate-all-logfiles
truncate -s 0 /var/log/*log || true
truncate -s 0 /var/log/**/*log || true
================================================
FILE: compose/base-images/galaxy-cluster-base/files/cvmfs/default.local
================================================
CVMFS_REPOSITORIES="data.galaxyproject.org,singularity.galaxyproject.org"
CVMFS_HTTP_PROXY="DIRECT"
CVMFS_QUOTA_LIMIT="4000"
CVMFS_CACHE_BASE="/srv/cvmfs/cache"
================================================
FILE: compose/base-images/galaxy-cluster-base/files/cvmfs/domain.d/galaxyproject.org.conf
================================================
CVMFS_SERVER_URL="http://cvmfs1-psu0.galaxyproject.org/cvmfs/@fqrn@;http://cvmfs1-iu0.galaxyproject.org/cvmfs/@fqrn@;http://cvmfs1-tacc0.galaxyproject.org/cvmfs/@fqrn@;http://cvmfs1-mel0.gvl.org.au/cvmfs/@fqrn@;http://cvmfs1-ufr0.galaxyproject.eu/cvmfs/@fqrn@"
CVMFS_KEYS_DIR=/etc/cvmfs/keys/galaxyproject.org
CVMFS_USE_GEOAPI="yes"
================================================
FILE: compose/base-images/galaxy-cluster-base/files/cvmfs/keys/galaxyproject.org/data.galaxyproject.org.pub
================================================
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5LHQuKWzcX5iBbCGsXGt
6CRi9+a9cKZG4UlX/lJukEJ+3dSxVDWJs88PSdLk+E25494oU56hB8YeVq+W8AQE
3LWx2K2ruRjEAI2o8sRgs/IbafjZ7cBuERzqj3Tn5qUIBFoKUMWMSIiWTQe2Sfnj
GzfDoswr5TTk7aH/FIXUjLnLGGCOzPtUC244IhHARzu86bWYxQJUw0/kZl5wVGcH
maSgr39h1xPst0Vx1keJ95AH0wqxPbCcyBGtF1L6HQlLidmoIDqcCQpLsGJJEoOs
NVNhhcb66OJHah5ppI1N3cZehdaKyr1XcF9eedwLFTvuiwTn6qMmttT/tHX7rcxT
owIDAQAB
-----END PUBLIC KEY-----
================================================
FILE: compose/base-images/galaxy-cluster-base/files/cvmfs/keys/galaxyproject.org/singularity.galaxyproject.org.pub
================================================
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5LHQuKWzcX5iBbCGsXGt
6CRi9+a9cKZG4UlX/lJukEJ+3dSxVDWJs88PSdLk+E25494oU56hB8YeVq+W8AQE
3LWx2K2ruRjEAI2o8sRgs/IbafjZ7cBuERzqj3Tn5qUIBFoKUMWMSIiWTQe2Sfnj
GzfDoswr5TTk7aH/FIXUjLnLGGCOzPtUC244IhHARzu86bWYxQJUw0/kZl5wVGcH
maSgr39h1xPst0Vx1keJ95AH0wqxPbCcyBGtF1L6HQlLidmoIDqcCQpLsGJJEoOs
NVNhhcb66OJHah5ppI1N3cZehdaKyr1XcF9eedwLFTvuiwTn6qMmttT/tHX7rcxT
owIDAQAB
-----END PUBLIC KEY-----
================================================
FILE: compose/base-images/galaxy-container-base/Dockerfile
================================================
FROM buildpack-deps:22.04 as build_apptainer
COPY ./files/common_cleanup.sh /usr/bin/common_cleanup.sh
# Install Go (only needed for building apptainer)
ENV GO_VERSION=1.22.7
RUN apt update && apt install --no-install-recommends cryptsetup-bin uuid-dev libseccomp-dev libfuse-dev libfuse3-dev -y \
&& wget https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz \
&& tar -C /usr/local -xzvf go${GO_VERSION}.linux-amd64.tar.gz \
&& rm go${GO_VERSION}.linux-amd64.tar.gz \
&& /usr/bin/common_cleanup.sh
ENV PATH=/usr/local/go/bin:${PATH}
ENV APPTAINER_VERSION=1.3.4
RUN wget https://github.com/apptainer/apptainer/releases/download/v${APPTAINER_VERSION}/apptainer-${APPTAINER_VERSION}.tar.gz \
&& mkdir -p apptainer \
&& tar -xzf apptainer-${APPTAINER_VERSION}.tar.gz --strip-components=1 -C apptainer \
&& cd apptainer \
&& ./mconfig --with-suid \
&& make -C builddir \
&& /usr/bin/common_cleanup.sh
# --- Final image ---
FROM ubuntu:22.04 as final
COPY ./files/common_cleanup.sh /usr/bin/common_cleanup.sh
# Base dependencies
RUN apt update && apt install --no-install-recommends ca-certificates python3-distutils squashfs-tools tzdata -y \
&& /usr/bin/common_cleanup.sh
# Install Docker
RUN apt update \
&& apt install --no-install-recommends docker.io -y \
&& /usr/bin/common_cleanup.sh
# Install Apptainer
COPY --from=build_apptainer /apptainer /apptainer
RUN apt update && apt install --no-install-recommends make -y \
&& make -C /apptainer/builddir install \
&& apt remove make -y \
&& rm -rf /apptainer \
&& sed -e '/bind path = \/etc\/localtime/s/^/#/g' -i /usr/local/etc/apptainer/apptainer.conf \
&& /usr/bin/common_cleanup.sh
================================================
FILE: compose/base-images/galaxy-container-base/files/common_cleanup.sh
================================================
#!/bin/sh
set -x
# This usually drastically reduced the container size
# at the cost of the startup time of your application
find / -name '*.pyc' -delete
find / -name '*.log' -delete
find / -name '.cache' -delete
rm -rf /var/lib/apt/lists/*
rm -rf /var/cache/*
# https://askubuntu.com/questions/266738/how-to-truncate-all-logfiles
truncate -s 0 /var/log/*log || true
truncate -s 0 /var/log/**/*log || true
================================================
FILE: compose/base_config.yml
================================================
gravity:
process_manager: supervisor
galaxy_root: /galaxy
virtualenv: /galaxy/.venv
gunicorn:
enable: True
bind: 0.0.0.0:5555
workers: 2
celery:
enable: true
enable_beat: true
concurrency: 2
handlers:
handler:
processes: 2
pools:
- job-handlers
- workflow-schedulers
galaxy:
tool_dependency_dir: /tool_deps
tool_data_table_config_path: /cvmfs/data.galaxyproject.org/byhand/location/tool_data_table_conf.xml,/cvmfs/data.galaxyproject.org/managed/location/tool_data_table_conf.xml
tus_upload_store: /tus_upload_store
enable_celery_tasks: true
celery_conf:
result_backend: redis://redis:6379/0
pulsar:
conda_auto_init: True
conda_auto_install: True
tool_dependency_dir: dependencies
dependency_resolution:
resolvers:
- type: conda
auto_init: true
auto_install: true
- type: conda
versionless: true
# Probably needs more polishing, but at least it works..
slurm:
SlurmctldHost: "slurmctld"
AuthType: "auth/munge"
CryptoType: "crypto/munge"
MpiDefault: "none"
ProctrackType: "proctrack/pgid"
ReturnToService: "1"
SlurmctldPidFile: "/var/run/slurmctld.pid"
SlurmctldPort: "6817"
SlurmdPidFile: "/var/run/slurmd.pid"
SlurmdPort: "6818"
SlurmdSpoolDir: "/tmp/slurmd"
SlurmUser: "slurm"
StateSaveLocation: "/tmp/slurm"
SwitchType: "switch/none"
TaskPlugin: "task/none"
InactiveLimit: "0"
KillWait: "30"
MinJobAge: "300"
SlurmctldTimeout: "120"
SlurmdTimeout: "300"
Waittime: "0"
SchedulerType: "sched/backfill"
SelectType: "select/cons_res"
SelectTypeParameters: "CR_Core_Memory"
AccountingStorageType: "accounting_storage/none"
AccountingStoreFlags: "job_comment"
ClusterName: "Cluster"
JobCompType: "jobcomp/none"
JobAcctGatherFrequency: "30"
JobAcctGatherType: "jobacct_gather/none"
SlurmctldDebug: info
SlurmdDebug: info
htcondor_galaxy:
CONDOR_HOST: "htcondor-master"
ALLOW_ADMINISTRATOR: "*"
ALLOW_OWNER: "*"
ALLOW_READ: "*"
ALLOW_WRITE: "*"
ALLOW_CLIENT: "*"
ALLOW_DAEMON: "*"
ALLOW_NEGOTIATOR: "*"
DAEMON_LIST: "MASTER, SCHEDD"
UID_DOMAIN: "galaxy"
DISCARD_SESSION_KEYRING_ON_STARTUP: "False"
TRUST_UID_DOMAIN: "true"
SEC_PASSWORD_FILE: "/var/lib/condor/pool_password"
SEC_DAEMON_AUTHENTICATION: "REQUIRED"
SEC_DAEMON_INTEGRITY: "REQUIRED"
SEC_DAEMON_AUTHENTICATION_METHODS: "PASSWORD"
SEC_NEGOTIATOR_AUTHENTICATION: "REQUIRED"
SEC_NEGOTIATOR_INTEGRITY: "REQUIRED"
SEC_NEGOTIATOR_AUTHENTICATION_METHODS: "PASSWORD"
SEC_CLIENT_AUTHENTICATION_METHODS: "FS, PASSWORD"
htcondor_master:
BASE_CGROUP: ""
CONDOR_HOST: "$(FULL_HOSTNAME)"
DAEMON_LIST: "MASTER, COLLECTOR, NEGOTIATOR, SCHEDD"
DISCARD_SESSION_KEYRING_ON_STARTUP: "False"
TRUST_UID_DOMAIN: "True"
ALLOW_ADMINISTRATOR: "*"
ALLOW_OWNER: "*"
ALLOW_READ: "*"
ALLOW_WRITE: "*"
ALLOW_NEGOTIATOR: "*"
ALLOW_NEGOTIATOR_SCHEDD: "*"
ALLOW_WRITE_COLLECTOR: "*"
ALLOW_WRITE_STARTD: "*"
ALLOW_READ_COLLECTOR: "*"
ALLOW_READ_STARTD: "*"
ALLOW_CLIENT: "*"
ALLOW_DAEMON: "*"
DOCKER_IMAGE_CACHE_SIZE: "20"
UID_DOMAIN: "galaxy"
TRUST_UID_DOMAIN: "TRUE"
SEC_PASSWORD_FILE: "/var/lib/condor/pool_password"
SEC_DAEMON_AUTHENTICATION: "REQUIRED"
SEC_DAEMON_INTEGRITY: "REQUIRED"
SEC_DAEMON_AUTHENTICATION_METHODS: "PASSWORD"
SEC_NEGOTIATOR_AUTHENTICATION: "REQUIRED"
SEC_NEGOTIATOR_INTEGRITY: "REQUIRED"
SEC_NEGOTIATOR_AUTHENTICATION_METHODS: "PASSWORD"
SEC_CLIENT_AUTHENTICATION_METHODS: "FS, PASSWORD"
htcondor_executor:
CONDOR_HOST: "htcondor-master"
DAEMON_LIST: "MASTER, STARTD"
DISCARD_SESSION_KEYRING_ON_STARTUP: "False"
TRUST_UID_DOMAIN: "true"
NUM_SLOTS: "1"
NUM_SLOTS_TYPE_1: "1"
BASE_CGROUP: ""
ALLOW_ADMINISTRATOR: "*"
ALLOW_OWNER: "*"
ALLOW_READ: "*"
ALLOW_WRITE: "*"
ALLOW_CLIENT: "*"
ALLOW_DAEMON: "*"
ALLOW_NEGOTIATOR_SCHEDD: "*"
ALLOW_WRITE_COLLECTOR: "*"
ALLOW_WRITE_STARTD: "*"
ALLOW_READ_COLLECTOR: "*"
ALLOW_READ_STARTD: "*"
UID_DOMAIN: "galaxy"
SCHED_NAME: "htcondor-master"
SEC_PASSWORD_FILE: "/var/lib/condor/pool_password"
SEC_DAEMON_AUTHENTICATION: "REQUIRED"
SEC_DAEMON_INTEGRITY: "REQUIRED"
SEC_DAEMON_AUTHENTICATION_METHODS: "PASSWORD"
SEC_NEGOTIATOR_AUTHENTICATION: "REQUIRED"
SEC_NEGOTIATOR_INTEGRITY: "REQUIRED"
SEC_NEGOTIATOR_AUTHENTICATION_METHODS: "PASSWORD"
SEC_CLIENT_AUTHENTICATION_METHODS: "FS, PASSWORD"
================================================
FILE: compose/docker-compose.htcondor.yml
================================================
# Extend Galaxy to run jobs using HTCondor.
# Example: `docker-compose -f docker-compose.yml -f docker-compose.htcondor.yml up`
services:
galaxy-configurator:
environment:
- GALAXY_JOB_RUNNER=condor
- HTCONDOR_OVERWRITE_CONFIG=true
volumes:
- ${EXPORT_DIR:-./export}/htcondor:/htcondor
htcondor-master:
image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-htcondor:${IMAGE_TAG:-latest}
build: galaxy-htcondor
hostname: htcondor-master
environment:
- HTCONDOR_TYPE=master
- HTCONDOR_POOL_PASSWORD=123456789changeme
volumes:
- ${EXPORT_DIR:-./export}/htcondor:/config
networks:
- galaxy
htcondor-executor:
image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-htcondor:${IMAGE_TAG:-latest}
build: galaxy-htcondor
privileged: true
environment:
- HTCONDOR_TYPE=executor
- CONDOR_HOST=htcondor-master
- HTCONDOR_POOL_PASSWORD=123456789changeme
volumes:
- ${EXPORT_DIR:-./export}/htcondor:/config
- ${EXPORT_DIR:-./export}/galaxy/database:/galaxy/database
- ${EXPORT_DIR:-./export}/galaxy/lib/galaxy/tools:/galaxy/lib/galaxy/tools:ro
- ${EXPORT_DIR:-./export}/galaxy/tools:/galaxy/tools:ro
- ${EXPORT_DIR:-./export}/galaxy/tool-data:/galaxy/tool-data
- ${EXPORT_DIR:-./export}/galaxy/.venv:/galaxy/.venv
- ${EXPORT_DIR:-./export}/tool_deps:/tool_deps
- /var/run/docker.sock:/var/run/docker.sock
networks:
- galaxy
galaxy-server:
volumes:
- ${EXPORT_DIR:-./export}/htcondor:/htcondor_config
================================================
FILE: compose/docker-compose.k8s.yml
================================================
# Extend Galaxy to run jobs on Kubernetes.
# This will set up Kubernetes using kind (https://kind.sigs.k8s.io).
# Note that this extension is not compatible with others like Pulsar, HTCondor, Singularity, etc.
# Example: `docker-compose -f docker-compose.yml -f docker-compose.k8s.yml up`
services:
galaxy-configurator:
environment:
- KIND_OVERWRITE_CONFIG=true
- GALAXY_JOB_RUNNER=k8s
- GALAXY_KUBECONFIG=/kind/.kube/config_in_docker
volumes:
- ${EXPORT_DIR:-./export}/kind:/kind
galaxy-server:
volumes:
- ${EXPORT_DIR:-./export}/kind:/kind
networks:
- kind
galaxy-kind:
image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-kind:${IMAGE_TAG:-latest}
build: galaxy-kind
privileged: true
volumes:
- ${EXPORT_DIR:-./export}/kind:/kind
- /var/run/docker.sock:/var/run/docker.sock
networks:
- galaxy
- kind
networks:
kind:
name: kind
================================================
FILE: compose/docker-compose.pulsar.mq.yml
================================================
# Extend Pulsar to use RabbitMQ (Message Queue) instead of the REST API
# for communicating with Galaxy.
# Requirements: `docker-compose.pulsar.yml`
# Example: `docker-compose -f docker-compose.yml -f docker-compose.pulsar.yml -f docker-compose.pulsar.mq.yml up`
services:
galaxy-configurator:
environment:
- GALAXY_JOB_RUNNER=pulsar_mq
- PULSAR_CONFIG_MESSAGE_QUEUE_URL=amqp://pulsar:8jfqi9uo2i30fqoifqfo09@pulsar-rabbitmq/pulsar
- PULSAR_GALAXY_URL=http://nginx:80
pulsar-rabbitmq:
image: rabbitmq:alpine
container_name: pulsar-rabbitmq
hostname: pulsar-rabbitmq
environment:
- RABBITMQ_DEFAULT_USER=pulsar
- RABBITMQ_DEFAULT_PASS=8jfqi9uo2i30fqoifqfo09
- RABBITMQ_DEFAULT_VHOST=pulsar
volumes:
- ${EXPORT_DIR:-./export}/pulsar_rabbitmq:/var/lib/rabbitmq:delegated
networks:
- galaxy
================================================
FILE: compose/docker-compose.pulsar.yml
================================================
# Extend Galaxy to run jobs using Pulsar. With this setup, you
# don't need to share the `/galaxy/database` path with Galaxy.
# Galaxy will send all the needed files for Pulsar, and Pulsar
# will handle the rest locally on its side.
# This docker-compose file enables for Galaxy and Pulsar to
# communicate over HTTP. To enable the MQ, concatenate the
# docker-compose.pulsar.mq.yml after this one.
# Example: `docker-compose -f docker-compose.yml -f docker-compose.pulsar.yml up`
services:
galaxy-configurator:
environment:
- GALAXY_JOB_RUNNER=pulsar_rest
- GALAXY_PULSAR_TRANSPORT=${GALAXY_PULSAR_TRANSPORT:-curl}
- PULSAR_OVERWRITE_CONFIG=true
- PULSAR_JOB_RUNNER=local
- PULSAR_CONFIG_PRIVATE_TOKEN=changemeinproduction
- GALAXY_PULSAR_URL=http://pulsar:8913
volumes:
- ${EXPORT_DIR:-./export}/pulsar/config:/pulsar/config
pulsar:
image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/pulsar:${IMAGE_TAG:-latest}
build: pulsar
hostname: pulsar
privileged: true
volumes:
- ${EXPORT_DIR:-./export}/pulsar/config:/pulsar/config
- ${EXPORT_DIR:-./export}/pulsar/dependencies:/pulsar/dependencies
- ${EXPORT_DIR:-./export}/galaxy/database:/galaxy/database
- ${EXPORT_DIR:-./export}/galaxy/tool-data:/galaxy/tool-data
networks:
- galaxy
================================================
FILE: compose/docker-compose.singularity.yml
================================================
# Extend Galaxy to use Singularity for dependency resolution.
# This is working with the base Galaxy, but also in combination
# with different job runners, like HTCondor, or Slurm
# (Pulsar is still WIP).
# Examples:
# * `docker-compose -f docker-compose.yml -f docker-compose.singularity.yml up`
# * `docker-compose -f docker-compose.yml -f docker-compose.slurm.yml -f docker-compose.singularity.yml up`
services:
galaxy-configurator:
environment:
- GALAXY_DEPENDENCY_RESOLUTION=singularity
- GALAXY_CONFIG_CONDA_AUTO_INSTALL=false
================================================
FILE: compose/docker-compose.slurm.yml
================================================
# Extend Galaxy to run jobs using Slurm.
# Example: `docker-compose -f docker-compose.yml -f docker-compose.slurm.yml up`
services:
galaxy-configurator:
environment:
- GALAXY_JOB_RUNNER=slurm
- SLURM_OVERWRITE_CONFIG=true
- SLURM_NODE_COUNT=${SLURM_NODE_COUNT:-1}
- SLURM_NODE_HOSTNAME=compose_slurm_node
volumes:
- ${EXPORT_DIR:-./export}/slurm_config:/etc/slurm
galaxy-server:
volumes:
- ${EXPORT_DIR:-./export}/munge:/etc/munge
- ${EXPORT_DIR:-./export}/slurm_config:/etc/slurm
slurmctld:
image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-slurm:${IMAGE_TAG:-latest}
build: galaxy-slurm
command: ["slurmctld"]
hostname: slurmctld
volumes:
- ${EXPORT_DIR:-./export}/slurm_config:/etc/slurm
- ${EXPORT_DIR:-./export}/munge:/etc/munge
networks:
- galaxy
slurm_node_discovery:
image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-slurm-node-discovery:${IMAGE_TAG:-latest}
build: galaxy-slurm-node-discovery
volumes:
- ${EXPORT_DIR:-./export}/slurm_config:/etc/slurm
- /var/run/docker.sock:/var/run/docker.sock
slurm_node:
image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-slurm:${IMAGE_TAG:-latest}
build: galaxy-slurm
command: ["slurmd"]
privileged: true
labels:
slurm_node: true
volumes:
- ${EXPORT_DIR:-./export}/galaxy/database:/galaxy/database
- ${EXPORT_DIR:-./export}/galaxy/tools:/galaxy/tools:ro
- ${EXPORT_DIR:-./export}/galaxy/lib/galaxy/tools:/galaxy/lib/galaxy/tools:ro
- ${EXPORT_DIR:-./export}/galaxy/tool-data:/galaxy/tool-data
- ${EXPORT_DIR:-./export}/galaxy/.venv:/galaxy/.venv
- ${EXPORT_DIR:-./export}/tool_deps:/tool_deps
- ${EXPORT_DIR:-./export}/slurm_config:/etc/slurm
- ${EXPORT_DIR:-./export}/munge:/etc/munge
- /var/run/docker.sock:/var/run/docker.sock
networks:
- galaxy
================================================
FILE: compose/docker-compose.yml
================================================
services:
galaxy-server:
image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-server:${IMAGE_TAG:-latest}
build: galaxy-server
environment:
- GALAXY_DEFAULT_ADMIN_USER=admin
- GALAXY_DEFAULT_ADMIN_EMAIL=admin@galaxy.org
- GALAXY_DEFAULT_ADMIN_PASSWORD=password
- GALAXY_DEFAULT_ADMIN_KEY=fakekey
- HTCONDOR_POOL_PASSWORD=123456789changeme
hostname: galaxy-server
privileged: True
volumes:
# This is the directory where all your files from Galaxy will be stored
# on your host system
- ${EXPORT_DIR:-./export}/:/export/:delegated
- ${EXPORT_DIR:-./export}/tus_upload_store:/tus_upload_store:delegated
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- postgres
- rabbitmq
- redis
- rustus
networks:
- galaxy
# The galaxy-configurator is responsible for the whole configuration of
# your setup and should be the central place of configuration.
galaxy-configurator:
image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-configurator:${IMAGE_TAG:-latest}
build: galaxy-configurator
environment:
- EXPORT_DIR=${EXPORT_DIR:-./export}
- HOST_PWD=$PWD
- GALAXY_OVERWRITE_CONFIG=true
- GALAXY_DEPENDENCY_RESOLUTION=conda
- GALAXY_JOB_RUNNER=local
- GALAXY_CONFIG_ADMIN_USERS=admin@galaxy.org
- GALAXY_CONFIG_DATABASE_CONNECTION=postgresql://galaxy:chaopagoosaequuashie@postgres/galaxy
- GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL:-http://localhost}
- GALAXY_CONFIG_CONDA_AUTO_INSTALL=true
- GALAXY_CONFIG_AMQP_INTERNAL_CONNECTION=amqp://galaxy:vaiJa3ieghai2ief0jao@rabbitmq/galaxy
- GALAXY_PROXY_PREFIX=${GALAXY_PROXY_PREFIX:-}
- GALAXY_CONFIG_CLEANUP_JOB=onsuccess
- NGINX_OVERWRITE_CONFIG=true
volumes:
- ${EXPORT_DIR:-./export}/galaxy/config:/galaxy/config
- ${EXPORT_DIR:-./export}/nginx:/etc/nginx
- ./base_config.yml:/base_config.yml
- ./galaxy-configurator/templates:/templates
# The database for Galaxy
postgres:
image: postgres:15
hostname: postgres
environment:
- POSTGRES_PASSWORD=chaopagoosaequuashie
- POSTGRES_USER=galaxy
- POSTGRES_DB=galaxy
volumes:
- ${EXPORT_DIR:-./export}/postgres/:/var/lib/postgresql/data:delegated
networks:
- galaxy
# The proxy server. All web-traffic is going through here, so we can
# offload static file serving
# (https://docs.galaxyproject.org/en/master/admin/production.html#using-a-proxy-server)
nginx:
image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-nginx:${IMAGE_TAG:-latest}
build: galaxy-nginx
ports:
- 80:80
volumes:
- ${EXPORT_DIR:-./export}/nginx:/config:ro
- ${EXPORT_DIR:-./export}/galaxy/static:/export/galaxy/static:ro
- ${EXPORT_DIR:-./export}/galaxy/config/plugins:/galaxy/config/plugins:ro
depends_on:
- galaxy-server
networks:
- galaxy
# Message queue for better performance
rabbitmq:
image: rabbitmq:alpine
container_name: galaxy-rabbitmq
hostname: rabbitmq
environment:
- RABBITMQ_DEFAULT_USER=galaxy
- RABBITMQ_DEFAULT_PASS=vaiJa3ieghai2ief0jao
- RABBITMQ_DEFAULT_VHOST=galaxy
volumes:
- ${EXPORT_DIR:-./export}/rabbitmq:/var/lib/rabbitmq:delegated
networks:
- galaxy
# Backend for Celery
redis:
image: redis:alpine
container_name: galaxy-redis
hostname: redis
volumes:
- ${EXPORT_DIR:-./export}/redis:/data:delegated
networks:
- galaxy
# For file uploads
rustus:
image: s3rius/rustus:0.7.6-alpine
container_name: galaxy-rustus
hostname: rustus
environment:
- RUSTUS_STORAGE=file-storage
- RUSTUS_DATA_DIR=/data/
- RUSTUS_URL=${GALAXY_PROXY_PREFIX:-}/api/upload/resumable_upload
- RUSTUS_HOOKS_HTTP_URLS=http://nginx${GALAXY_PROXY_PREFIX:-}/api/upload/hooks
- RUSTUS_HOOKS_HTTP_PROXY_HEADERS=X-Api-Key,Cookie
- RUSTUS_HOOKS=pre-create
- RUSTUS_HOOKS_FORMAT=tusd
- RUSTUS_INFO_STORAGE=redis-info-storage
- RUSTUS_INFO_DB_DSN=redis://redis:6379/1
- RUSTUS_MAX_BODY_SIZE=20000000
- RUSTUS_BEHIND_PROXY=true
volumes:
- ${EXPORT_DIR:-./export}/tus_upload_store:/data:delegated
depends_on:
- redis
networks:
- galaxy
networks:
galaxy:
================================================
FILE: compose/galaxy-configurator/Dockerfile
================================================
FROM alpine:3.17
RUN apk add --no-cache bash python3 py3-pip \
&& pip3 install j2cli[yaml] jinja2-ansible-filters
COPY ./templates /templates
COPY ./customize.py /customize.py
COPY ./run.sh /usr/bin/run.sh
ENTRYPOINT "/usr/bin/run.sh"
================================================
FILE: compose/galaxy-configurator/customize.py
================================================
import os
def j2_environment_params():
""" Extra parameters for the Jinja2 Environment
Add AnsibleCoreFiltersExtension for filters known in Ansible
like `to_nice_yaml`
"""
return dict(
extensions=('jinja2_ansible_filters.AnsibleCoreFiltersExtension',),
)
def alter_context(context):
"""
Translates env variables that start with a specific prefix
and combines them into one dict (like all GALAXY_CONFIG_*
are stored at galaxy.*).
Variables that are stored in an input file overwrite
the input from env.
TODO: Unit test
"""
new_context = dict(os.environ)
translations = {
"GALAXY_CONFIG_": "galaxy",
"GRAVITY_CONFIG_": "gravity",
"GALAXY_JOB_METRICS_": "galaxy_job_metrics",
"NGINX_CONFIG_": "nginx",
"SLURM_CONFIG_": "slurm",
"HTCONDOR_GALAXY_": "htcondor_galaxy",
"HTCONDOR_MASTER_": "htcondor_master",
"HTCONDOR_EXECUTOR_": "htcondor_executor",
"PULSAR_CONFIG_": "pulsar"
}
# Add values from possible input file if existent
if context is not None and len(context) > 0:
new_context.update(context)
# Translate string-boolean to Python boolean
for key, value in new_context.items():
if not isinstance(value, str):
continue
if value.lower() == "true":
new_context[key] = True
elif value.lower() == "false":
new_context[key] = False
for to in translations.values():
if to not in new_context:
new_context[to] = {}
for key, value in new_context.items():
for frm, to in translations.items():
if key.startswith(frm):
# Format key depending on it being uppercase or not
# (to cope with different formatings: compare Slurm
# with Galaxy)
key = key[len(frm):]
if key.isupper():
key = key.lower()
new_context[to][key] = value
context = new_context
# Set HOST_EXPORT_DIR depending on EXPORT_DIR being absolute or relative
if "HOST_EXPORT_DIR" not in context and "EXPORT_DIR" in context \
and "HOST_PWD" in context:
if context["EXPORT_DIR"].startswith("./"):
context["HOST_EXPORT_DIR"] = context["HOST_PWD"] \
+ context["EXPORT_DIR"][1:]
else:
context["HOST_EXPORT_DIR"] = context["EXPORT_DIR"]
return context
================================================
FILE: compose/galaxy-configurator/run.sh
================================================
#!/bin/bash
# Set default config dirs
export GALAXY_CONF_DIR=${GALAXY_CONF_DIR:-/galaxy/config} \
NGINX_CONF_DIR=${NGINX_CONF_DIR:-/etc/nginx/} \
SLURM_CONF_DIR=${SLURM_CONF_DIR:-/etc/slurm} \
HTCONDOR_CONF_DIR=${HTCONDOR_CONF_DIR:-/htcondor} \
PULSAR_CONF_DIR=${PULSAR_CONF_DIR:-/pulsar/config} \
KIND_CONF_DIR=${KIND_CONF_DIR:-/kind}
echo "Locking all configurations"
locks=("$GALAXY_CONF_DIR" "$SLURM_CONF_DIR" "$HTCONDOR_CONF_DIR" "$PULSAR_CONF_DIR" "$KIND_CONF_DIR")
for lock in "${locks[@]}"; do
echo "Locking $lock"
touch "${lock}/configurator.lock"
done
# Nginx configuration
if [ "$NGINX_OVERWRITE_CONFIG" != "true" ]; then
echo "NGINX_OVERWRITE_CONFIG is not true. Skipping configuration of Nginx"
else
nginx_configs=( "nginx.conf" )
for conf in "${nginx_configs[@]}"; do
echo "Configuring $conf"
j2 --customize /customize.py --undefined -o "/tmp/$conf" "/templates/nginx/$conf.j2" /base_config.yml
echo "The following changes will be applied to $conf:"
diff "${NGINX_CONF_DIR}/$conf" "/tmp/$conf"
mv -f "/tmp/$conf" "${NGINX_CONF_DIR}/$conf"
done
fi
# Slurm configuration
if [ "$SLURM_OVERWRITE_CONFIG" != "true" ]; then
echo "SLURM_OVERWRITE_CONFIG is not true. Skipping configuration of Slurm"
else
slurm_configs=( "slurm.conf" )
for conf in "${slurm_configs[@]}"; do
echo "Configuring $conf"
j2 --customize /customize.py --undefined -o "/tmp/$conf" "/templates/slurm/$conf.j2" /base_config.yml
echo "The following changes will be applied to $conf:"
diff "${SLURM_CONF_DIR}/$conf" "/tmp/$conf"
mv -f "/tmp/$conf" "${SLURM_CONF_DIR}/$conf"
done
rm "${SLURM_CONF_DIR}/configurator.lock"
echo "Lock for Slurm config released"
fi
# HTCondor configuration
if [ "$HTCONDOR_OVERWRITE_CONFIG" != "true" ]; then
echo "HTCONDOR_OVERWRITE_CONFIG is not true. Skipping configuration of HTCondor"
else
htcondor_configs=( "galaxy.conf" "master.conf" "executor.conf" )
for conf in "${htcondor_configs[@]}"; do
echo "Configuring $conf"
j2 --customize /customize.py --undefined -o "/tmp/$conf" "/templates/htcondor/$conf.j2" /base_config.yml
echo "The following changes will be applied to $conf:"
diff "${HTCONDOR_CONF_DIR}/$conf" "/tmp/$conf"
mv -f "/tmp/$conf" "${HTCONDOR_CONF_DIR}/$conf"
done
rm "${HTCONDOR_CONF_DIR}/configurator.lock"
echo "Lock for HTCondor config released"
fi
# Pulsar configuration
if [ "$PULSAR_OVERWRITE_CONFIG" != "true" ]; then
echo "PULSAR_OVERWRITE_CONFIG is not true. Skipping configuration of Pulsar"
else
pulsar_configs=( "server.ini" "app.yml" )
for conf in "${pulsar_configs[@]}"; do
echo "Configuring $conf"
j2 --customize /customize.py --undefined -o "/tmp/$conf" "/templates/pulsar/$conf.j2" /base_config.yml
echo "The following changes will be applied to $conf:"
diff "${PULSAR_CONF_DIR}/$conf" "/tmp/$conf"
mv -f "/tmp/$conf" "${PULSAR_CONF_DIR}/$conf"
done
rm "${PULSAR_CONF_DIR}/configurator.lock"
echo "Lock for Pulsar config released"
fi
# Kind configuration
if [ "$KIND_OVERWRITE_CONFIG" != "true" ]; then
echo "KIND_OVERWRITE_CONFIG is not true. Skipping configuration of Kind"
else
kind_configs=( "kind_config.yml" "k8s_config/persistent_volumes.yml" "k8s_config/pv_claims.yml" )
mkdir /tmp/k8s_config
mkdir "${KIND_CONF_DIR}/k8s_config"
for conf in "${kind_configs[@]}"; do
echo "Configuring $conf"
j2 --customize /customize.py --undefined -o "/tmp/$conf" "/templates/kind/$conf.j2" /base_config.yml
echo "The following changes will be applied to $conf:"
diff "${KIND_CONF_DIR}/$conf" "/tmp/$conf"
mv -f "/tmp/$conf" "${KIND_CONF_DIR}/$conf"
done
rm "${KIND_CONF_DIR}/configurator.lock"
echo "Lock for Kind config released"
sleep 5
echo "Waiting for Kind to create the cluster"
until [ -f "${GALAXY_KUBECONFIG:-${KIND_CONF_DIR}/.kube/config_in_docker}" ] && echo Found KUBECONFIG; do
sleep 0.1;
done;
chmod a+r "${GALAXY_KUBECONFIG:-${KIND_CONF_DIR}/.kube/config_in_docker}"
fi
echo "Releasing all locks (except Galaxy) if it didn't happen already"
locks=("$SLURM_CONF_DIR" "$HTCONDOR_CONF_DIR" "$PULSAR_CONF_DIR" "$KIND_CONF_DIR")
for lock in "${locks[@]}"; do
echo "Unlocking $lock"
rm "${lock}/configurator.lock"
done
# Galaxy configuration
if [ "$GALAXY_OVERWRITE_CONFIG" != "true" ]; then
echo "GALAXY_OVERWRITE_CONFIG is not true. Skipping configuration of Galaxy"
echo "Lock for Galaxy config released"
rm "${GALAXY_CONF_DIR}/configurator.lock"
exit 0
fi
cd "${GALAXY_CONF_DIR}" || { echo "Error: Could not find Galaxy config dir"; exit 1; }
echo "Waiting for Galaxy config dir to be initially populated (in case of first startup)"
until [ "$(ls -p | grep -v /)" != "" ] && echo Galaxy config populated; do
sleep 0.5;
done;
if [ ! -f /base_config.yml ]; then
echo "Warning: 'base_config.yml' does not exist. Configuration will solely happen through env!"
touch /base_config.yml
fi
galaxy_configs=( "job_conf.xml" "galaxy.yml" "job_metrics.xml" "container_resolvers_conf.yml" "dependency_resolvers_conf.xml" "GALAXY_PROXY_PREFIX.txt" )
for conf in "${galaxy_configs[@]}"; do
echo "Configuring $conf"
j2 --customize /customize.py --undefined -o "/tmp/$conf" "/templates/galaxy/$conf.j2" /base_config.yml
echo "The following changes will be applied to $conf:"
diff "${GALAXY_CONF_DIR}/$conf" "/tmp/$conf"
mv -f "/tmp/$conf" "${GALAXY_CONF_DIR}/$conf"
done
echo "Finished configuring Galaxy"
echo "Lock for Galaxy config released"
rm "${GALAXY_CONF_DIR}/configurator.lock"
if [ "$DONT_EXIT" = "true" ]; then
echo "Integration test detected. Galaxy Configurator will go to sleep (to not interrupt docker-compose)."
sleep infinity
fi
================================================
FILE: compose/galaxy-configurator/templates/galaxy/GALAXY_PROXY_PREFIX.txt.j2
================================================
{{ GALAXY_PROXY_PREFIX }}
================================================
FILE: compose/galaxy-configurator/templates/galaxy/container_resolvers_conf.yml.j2
================================================
# Resolvers that are potentially used by default are uncommented (comments describe under
# which premises they are in the defaults).
# Note that commented yaml does not have a space after the #
# while additional explanations do.
# Explicit container resolvers
# ============================
# get a container description (URI) for an explicit singularity container requirement
- type: explicit_singularity
# get a cached container description (path) for singularity
# pulls the container into a cache directory if not yet there
- type: cached_explicit_singularity
# set the cache directory for storing images
#cache_directory: database/container_cache/singularity/explicit
# Mulled container resolvers
# ==========================
# The following uncommented container resolvers are in the defaults
# if ``enable_mulled_containers`` is set in ``galaxy.yml`` (which is the default).
# get a container description for a cached mulled singularity container
# checks if the image file exists in `cache_directory`
- type: cached_mulled_singularity
#
#cache_directory: database/container_cache/singularity/mulled
#
# the method for caching directory listings (not the method for image caching)
# can be uncached or dir_mtime (the latter only determines the directory listing
# if the modification time of the directory changed)
#cache_directory_cacher_type: uncached
# Resolves container images from quay.io/NAMESPACE/MULLED_HASH where the
# mulled hash describes which packages and versions should be in the container
#
# These resolvers are generally listed after the cached_* resolvers, so that images
# are not pulled if they are already cached.
#
# When pulling the image file will be stored in the configured cache dir.
# If auto_install is True the result will point to the cached image file
# and to quay.io/NAMESPACE/MULLED_HASH otherwise.
- type: mulled_singularity
auto_install: False
#namespace: biocontainers
# In addition to the arguments of `mulled` there are cache_directory
# and cache_directory_cacher_type. See the description at `cached_explicit_singularity`
# and note the minor difference in the default for `cache_directory`
#cache_directory: database/container_cache/singularity/mulled
#cache_directory_cacher_type: uncached
# Building container resolvers
# ----------------------------
#
# The following uncommented container resolvers are included in the default
# if ``docker`` is available
- type: build_mulled_singularity
auto_install: False
#hash_func: v2
#cache_directory: database/container_cache/singularity/mulled
#cache_directory_cacher_type: uncached
# Other explicit container resolvers
# ----------------------------------
#-type: fallback_singularity
#identifier: A_VALID_CONTAINER_IDENTIFIER
#-type: fallback_no_requirements_singularity
#identifier: A_VALID_CONTAINER_IDENTIFIER
#-type: requires_galaxy_environment_singularity
#identifier: A_VALID_CONTAINER_IDENTIFIER
# The mapping container resolver allows to specify a list of mappings from tools
# (tool_id) to containers (type and identifier).
#-type: mapping
#mappings:
#- container_type: singularity
#tool_id: A_TOOL_ID
#identifier: A_VALID_CONTAINER_IDENTIFIER
================================================
FILE: compose/galaxy-configurator/templates/galaxy/dependency_resolvers_conf.xml.j2
================================================
{% if GALAXY_DEPENDENCY_RESOLUTION != 'singularity' %}
{% endif %}
================================================
FILE: compose/galaxy-configurator/templates/galaxy/galaxy.yml.j2
================================================
gravity:
{{ gravity | to_nice_yaml(indent=2) | indent(2, first=True) }}
galaxy:
{{ galaxy | to_nice_yaml(indent=2) | indent(2, first=True) }}
{% if GALAXY_PROXY_PREFIX %}
galaxy_url_prefix: /{{ GALAXY_PROXY_PREFIX | regex_replace("^/", "") | regex_replace("/$", "") }}
{% endif %}
{% if GALAXY_DEPENDENCY_RESOLUTION == 'singularity' %}
enable_mulled_containers: true
containers_resolvers_config_file: container_resolvers_conf.yml
{% endif %}
================================================
FILE: compose/galaxy-configurator/templates/galaxy/job_conf.xml.j2
================================================
/usr/lib/slurm-drmaa/lib/libdrmaa.so
{{ GALAXY_PULSAR_TRANSPORT | default('curl') }}
{% if GALAXY_JOB_RUNNER == 'pulsar_mq' -%}
{{ PULSAR_GALAXY_URL }}
{{ PULSAR_CONFIG_MESSAGE_QUEUE_URL}}
True
30
True
{% endif -%}
{% if GALAXY_JOB_RUNNER == 'k8s' -%}
{{ GALAXY_KUBECONFIG }}
{{ GALAXY_K8S_PVC | default('galaxy-root:/galaxy,galaxy-database:/galaxy/database,galaxy-tool-deps:/tool_deps') }}
{% endif -%}
{% if GALAXY_DEPENDENCY_RESOLUTION == 'singularity' -%}
/home/galaxyC/tmp/singularity/tmp
true
{% if GALAXY_JOB_RUNNER == 'local' -%}
{{ EXPORT_DIR | regex_replace("^.", "") }}/$galaxy_root:$galaxy_root:ro,{{ EXPORT_DIR | regex_replace("^.", "") }}/$galaxy_root/database/tmp:$galaxy_root/database/tmp:rw,{{ EXPORT_DIR | regex_replace("^.", "") }}/$tool_directory:$tool_directory:ro,{{ EXPORT_DIR | regex_replace("^.", "") }}/$job_directory:$job_directory:rw,{{ EXPORT_DIR | regex_replace("^.", "") }}/$working_directory:$working_directory:rw,{{ EXPORT_DIR | regex_replace("^.", "") }}/$default_file_path:$default_file_path:rw
{% endif -%}
{% elif GALAXY_DEPENDENCY_RESOLUTION == 'docker' -%}
true
false
{% if GALAXY_JOB_RUNNER == 'local' -%}
{{ HOST_EXPORT_DIR }}/$galaxy_root:$galaxy_root:ro,{{ HOST_EXPORT_DIR }}/$galaxy_root/database/tmp:$galaxy_root/database/tmp:rw,{{ HOST_EXPORT_DIR }}/$tool_directory:$tool_directory:ro,{{ HOST_EXPORT_DIR }}/$job_directory:$job_directory:rw,{{ HOST_EXPORT_DIR }}/$working_directory:$working_directory:rw,{{ HOST_EXPORT_DIR }}/$default_file_path:$default_file_path:rw
{% endif -%}
{% elif not GALAXY_JOB_RUNNER.startswith('pulsar') and GALAXY_JOB_RUNNER != 'k8s' -%}
{% endif -%}
{% if GALAXY_JOB_RUNNER == 'pulsar_rest' -%}
{{ GALAXY_PULSAR_URL }}
{{ PULSAR_CONFIG_PRIVATE_TOKEN }}
remote
{% endif -%}
{% if GALAXY_JOB_RUNNER == 'pulsar_mq' -%}
{{ PULSAR_JOBS_DIRECTORY | default('/pulsar/files/staging/') }}
{% endif -%}
{% if GALAXY_JOB_RUNNER == 'k8s' -%}
{{ GALAXY_K8S_DOCKER_REPO_DEFAULT | default('docker.io') }}
{% if GALAXY_K8S_DOCKER_OWNER_DEFAULT -%}{{ GALAXY_K8S_DOCKER_OWNER_DEFAULT }}{% endif -%}
{{ GALAXY_K8S_DOCKER_IMAGE_DEFAULT | default('python') }}
{{ GALAXY_K8S_DOCKER_TAG_DEFAULT | default('3.10.15') }}
true
{% endif -%}
================================================
FILE: compose/galaxy-configurator/templates/galaxy/job_metrics.xml.j2
================================================
{% if galaxy_job_metrics.core %}
{% endif %}
{% if galaxy_job_metrics.cpuinfo and galaxy_job_metrics.cpuinfo == "verbose" %}
{% elif galaxy_job_metrics.cpuinfo %}
{% endif %}
{% if galaxy_job_metrics.meminfo %}
{% endif %}
{% if galaxy_job_metrics.uname %}
{% endif %}
{% if galaxy_job_metrics.env %}
{% endif %}
================================================
FILE: compose/galaxy-configurator/templates/htcondor/executor.conf.j2
================================================
{% for key, value in htcondor_executor.items() -%}
{{ key }}={{ value }}
{% endfor %}
================================================
FILE: compose/galaxy-configurator/templates/htcondor/galaxy.conf.j2
================================================
{% for key, value in htcondor_galaxy.items() -%}
{{ key }}={{ value }}
{% endfor %}
================================================
FILE: compose/galaxy-configurator/templates/htcondor/master.conf.j2
================================================
{% for key, value in htcondor_master.items() -%}
{{ key }}={{ value }}
{% endfor %}
================================================
FILE: compose/galaxy-configurator/templates/kind/k8s_config/persistent_volumes.yml.j2
================================================
kind: PersistentVolume
apiVersion: v1
metadata:
name: galaxy-root
spec:
storageClassName: standard
capacity:
storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
hostPath:
path: {{ HOST_EXPORT_DIR }}/galaxy
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: galaxy-database
spec:
storageClassName: standard
capacity:
storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
hostPath:
path: {{ HOST_EXPORT_DIR }}/galaxy/database
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: galaxy-tool-deps
spec:
storageClassName: standard
capacity:
storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
hostPath:
path: {{ HOST_EXPORT_DIR }}/tool_deps
================================================
FILE: compose/galaxy-configurator/templates/kind/k8s_config/pv_claims.yml.j2
================================================
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: galaxy-root
spec:
storageClassName: standard
accessModes:
- ReadWriteMany
volumeName: galaxy-root
resources:
requests:
storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: galaxy-database
spec:
storageClassName: standard
accessModes:
- ReadWriteMany
volumeName: galaxy-database
resources:
requests:
storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: galaxy-tool-deps
spec:
storageClassName: standard
accessModes:
- ReadWriteMany
volumeName: galaxy-tool-deps
resources:
requests:
storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi
================================================
FILE: compose/galaxy-configurator/templates/kind/kind_config.yml.j2
================================================
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
extraMounts:
- hostPath: {{ HOST_EXPORT_DIR }}/galaxy
containerPath: {{ HOST_EXPORT_DIR }}/galaxy
- hostPath: {{ HOST_EXPORT_DIR }}/tool_deps
containerPath: {{ HOST_EXPORT_DIR }}/tool_deps
{% set kind_node_count = KIND_NODE_COUNT | default(1) | int -%}
{% for i in range(1, kind_node_count + 1) -%}
- role: worker
extraMounts:
- hostPath: {{ HOST_EXPORT_DIR }}/galaxy
containerPath: {{ HOST_EXPORT_DIR }}/galaxy
- hostPath: {{ HOST_EXPORT_DIR }}/tool_deps
containerPath: {{ HOST_EXPORT_DIR }}/tool_deps
{% endfor %}
================================================
FILE: compose/galaxy-configurator/templates/nginx/nginx.conf.j2
================================================
events { }
http {
include mime.types;
# See https://docs.galaxyproject.org/en/latest/admin/nginx.html#serving-galaxy-at-the-web-server-root
# compress responses whenever possible
gzip on;
gzip_http_version 1.1;
gzip_vary on;
gzip_comp_level 6;
gzip_proxied any;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_buffers 16 8k;
# allow up to 3 minutes for Galaxy to respond to slow requests before timing out
proxy_read_timeout {{ NGINX_PROXY_READ_TIMEOUT | default(180, true) }};
proxy_buffers 8 16k;
proxy_buffer_size 16k;
# maximum file upload size
client_max_body_size 10g;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
# use a variable for convenience
set $galaxy_static /export/galaxy/static;
set $galaxy_root /export/galaxy;
# proxy all requests not matching other locations to gunicorn
location /{{ GALAXY_PROXY_PREFIX | regex_replace("^/", "") | regex_replace("/$", "") }} {
proxy_pass http://galaxy-server:5555;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Upgrade $http_upgrade;
}
# serve framework static content
location {{ GALAXY_PROXY_PREFIX | regex_replace("/$", "") }}/static {
alias $galaxy_static;
expires 24h;
}
location {{ GALAXY_PROXY_PREFIX | regex_replace("/$", "") }}/robots.txt {
alias $galaxy_static/robots.txt;
expires 24h;
}
location {{ GALAXY_PROXY_PREFIX | regex_replace("/$", "") }}/favicon.ico {
alias $galaxy_static/favicon.ico;
expires 24h;
}
# serve visualization plugin static content
location ~ ^{{ GALAXY_PROXY_PREFIX | regex_replace("/$", "") }}/plugins/(?[^/]+?)/((?[^/_]*)_?)?(?[^/]*?)/static/(?.*?)$ {
alias $galaxy_root/config/plugins/$plug_type/;
try_files $vis_d/${vis_d}_${vis_name}/static/$static_file
$vis_d/static/$static_file =404;
}
# delegated uploads
location {{ GALAXY_PROXY_PREFIX | regex_replace("/$", "") }}/api/upload/resumable_upload {
# Disable request and response buffering
proxy_request_buffering off;
proxy_buffering off;
proxy_http_version 1.1;
# Add X-Forwarded-* headers
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
client_max_body_size 0;
proxy_pass http://rustus:1081;
}
rewrite ^/{{ GALAXY_PROXY_PREFIX | regex_replace("^/", "") | regex_replace("/$", "") }}$ /{{ GALAXY_PROXY_PREFIX | regex_replace("^/", "") | regex_replace("/$", "") }}/ last;
}
}
================================================
FILE: compose/galaxy-configurator/templates/pulsar/app.yml.j2
================================================
managers:
{% if PULSAR_JOB_RUNNER == 'local' -%}
_default_:
type: queued_python
num_concurrent_jobs: {{ PULSAR_NUM_CONCURRENT_JOBS | default(1) }}
{% endif %}
{{ pulsar | to_nice_yaml(indent=2) }}
================================================
FILE: compose/galaxy-configurator/templates/pulsar/server.ini.j2
================================================
[server:main]
use = egg:Paste#http
port = {{ PULSAR_PORT | default(8913) }}
host = {{ PULSAR_HOSTNAME | default('pulsar') }}
[app:main]
paste.app_factory = pulsar.web.wsgi:app_factory
app_config = %(here)s/app.yml
## Configure uWSGI (if used).
[uwsgi]
master = True
paste-logger = true
http = {{ PULSAR_HOSTNAME | default('pulsar') }}:{{ PULSAR_PORT | default(8913) }}
processes = 1
enable-threads = True
[watcher:web]
cmd = chaussette --fd $(circus.sockets.web) paste:server.ini
use_sockets = True
# Pulsar must be single-process for now...
numprocesses = 1
[socket:web]
host = localhost
port = 8913
## Configure Python loggers.
[loggers]
keys = root,pulsar
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = {{ PULSAR_LOG_LEVEL | default('INFO') }}
handlers = console
[logger_pulsar]
level = {{ PULSAR_LOG_LEVEL | default('INFO') }}
handlers = console
qualname = pulsar
propagate = 1
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = {{ PULSAR_LOG_LEVEL | default('INFO') }}
formatter = generic
[formatter_generic]
format = %(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s
================================================
FILE: compose/galaxy-configurator/templates/slurm/slurm.conf.j2
================================================
{% for key, value in slurm.items() -%}
{{ key }}={{ value }}
{% endfor %}
{% set slurm_node_count = SLURM_NODE_COUNT | int -%}
{% for i in range(1, slurm_node_count + 1) -%}
NodeName={{ SLURM_NODE_HOSTNAME }}_{{ i }} NodeAddr={{ SLURM_NODE_HOSTNAME }}_{{ i }} NodeHostname={{ SLURM_NODE_HOSTNAME }}_{{ i }} CPUs={{ SLURM_NODE_CPUS | default(1, true) }} RealMemory={{ SLURM_NODE_MEMORY | default(1024, true) }} State=UNKNOWN
{% endfor %}
PartitionName=work Nodes={% for i in range(1, slurm_node_count + 1) -%}{{ SLURM_NODE_HOSTNAME }}_{{ i }}{%- if not loop.last -%},{% endif %}{% endfor %} Default=YES MaxTime=INFINITE State=UP Shared=YES # TODO
================================================
FILE: compose/galaxy-htcondor/Dockerfile
================================================
ARG DOCKER_REGISTRY=quay.io
ARG DOCKER_REGISTRY_USERNAME=bgruening
ARG IMAGE_TAG=latest
FROM buildpack-deps:22.04 as galaxy_dependencies
ARG GALAXY_RELEASE=release_24.1
ARG GALAXY_REPO=https://github.com/galaxyproject/galaxy
ENV GALAXY_ROOT_DIR=/galaxy
ENV GALAXY_LIBRARY=$GALAXY_ROOT_DIR/lib
# Download Galaxy source, but only keep necessary dependencies
RUN mkdir "${GALAXY_ROOT_DIR}" \
&& curl -L -s $GALAXY_REPO/archive/$GALAXY_RELEASE.tar.gz | tar xzf - --strip-components=1 -C $GALAXY_ROOT_DIR \
&& cd $GALAXY_ROOT_DIR \
&& ls . | grep -v "lib" | xargs rm -rf \
&& cd $GALAXY_ROOT_DIR/lib \
&& ls . | grep -v "galaxy\|galaxy_ext" | xargs rm -rf \
&& cd $GALAXY_ROOT_DIR/lib/galaxy \
&& ls . | grep -v "__init__.py\|datatypes\|exceptions\|files\|metadata\|model\|util\|security" | xargs rm -rf
FROM $DOCKER_REGISTRY/$DOCKER_REGISTRY_USERNAME/galaxy-container-base:$IMAGE_TAG as final
ENV DEBIAN_FRONTEND=noninteractive
ENV GALAXY_USER=galaxy \
GALAXY_GROUP=galaxy \
GALAXY_UID=1450 \
GALAXY_GID=1450 \
GALAXY_HOME=/home/galaxy \
GALAXY_ROOT_DIR=/galaxy
RUN groupadd -r $GALAXY_USER -g $GALAXY_GID \
&& useradd -u $GALAXY_UID -r -g $GALAXY_USER -d $GALAXY_HOME -c "Galaxy user" --shell /bin/bash $GALAXY_USER \
&& mkdir $GALAXY_HOME \
&& chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_HOME
ENV EXPORT_DIR=/export \
# Setting a standard encoding. This can get important for things like the unix sort tool.
LC_ALL=en_US.UTF-8 \
LANG=en_US.UTF-8
ENV CONDOR_CPUS=1 \
CONDOR_MEMORY=1024
# Condor master
RUN echo "force-unsafe-io" > /etc/dpkg/dpkg.cfg.d/02apt-speedup \
&& echo 'Acquire::http::Timeout "20";' > /etc/apt/apt.conf.d/98AcquireTimeout \
&& echo 'Acquire::Retries "5";' > /etc/apt/apt.conf.d/99AcquireRetries \
&& apt-get update -qq && apt-get install -y --no-install-recommends locales gnupg2 curl \
&& locale-gen en_US.UTF-8 && dpkg-reconfigure locales \
&& curl -fsSL https://research.cs.wisc.edu/htcondor/repo/keys/HTCondor-current-Key | apt-key add - \
&& echo "deb https://research.cs.wisc.edu/htcondor/repo/ubuntu/current jammy main" >> /etc/apt/sources.list \
&& apt-get update -qq && apt-get install -y --no-install-recommends \
supervisor \
htcondor \
wget \
&& touch /var/log/condor/StartLog /var/log/condor/StarterLog /var/log/condor/CollectorLog /var/log/condor/NegotiatorLog \
&& mkdir -p /var/run/condor/ /var/lock/condor/ \
&& chown -R condor: /var/log/condor/StartLog /var/log/condor/StarterLog /var/log/condor/CollectorLog /var/log/condor/NegotiatorLog /var/run/condor/ /var/lock/condor/
ADD supervisord.conf /etc/supervisord.conf
# Copy Galaxy dependencies
COPY --chown=$GALAXY_USER:$GALAXY_USER --from=galaxy_dependencies $GALAXY_ROOT_DIR $GALAXY_ROOT_DIR
COPY start.sh /usr/bin/start.sh
RUN apt update && apt install python3 -y
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 10
ENTRYPOINT /usr/bin/start.sh
================================================
FILE: compose/galaxy-htcondor/start.sh
================================================
#!/bin/bash
sleep 5
echo "Waiting for Galaxy configurator to finish and release lock"
until [ ! -f /config/configurator.lock ] && echo Lock released; do
sleep 0.1;
done;
cp -f "/config/$HTCONDOR_TYPE.conf" /etc/condor/condor_config.local
condor_store_cred -p "$HTCONDOR_POOL_PASSWORD" -f /var/lib/condor/pool_password
/usr/bin/supervisord
================================================
FILE: compose/galaxy-htcondor/supervisord.conf
================================================
[unix_http_server]
file=/var/run/supervisor.sock ; (the path to the socket file)
chmod=0700 ; sockef file mode (default 0700)
[supervisord]
nodaemon = true
[program:htcondor]
command=/usr/sbin/condor_master -pidfile /var/run/condor/condor.pid -f -t
#stdout_logfile=/var/log/htcondor.log
#stderr_logfile=/var/log/htcondor.log
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
stopwaitsecs=1
startretries=0
autostart=true
autorestart=false
[program:log-condor-collector]
command=tail -f -n1000 /var/log/condor/CollectorLog
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
stopwaitsecs=1
startretries=5
autostart=true
autorestart=false
user=condor
[program:log-condor-negotiator]
command=tail -f -n1000 /var/log/condor/NegotiatorLog
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
stopwaitsecs=1
startretries=5
autostart=true
autorestart=false
user=condor
# [program:telegraf]
# command=/usr/bin/telegraf --config /etc/telegraf/telegraf.conf
# stdout_logfile=/dev/stdout
# stdout_logfile_maxbytes=0
# stderr_logfile=/dev/stderr
# stderr_logfile_maxbytes=0
# stopwaitsecs=1
# startretries=5
# autostart=true
# autorestart=false
# user=root
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl=unix:///var/run/supervisor.sock ; use a unix:// URL for a unix socket
================================================
FILE: compose/galaxy-kind/Dockerfile
================================================
FROM alpine:3.17
ARG KIND_RELEASE=v0.24.0
ARG KUBECTL_RELEASE=v1.31.1
RUN apk add --no-cache docker
RUN apk add --no-cache --virtual build-deps wget \
&& apk add --no-cache bash \
&& wget -O /usr/bin/kind https://kind.sigs.k8s.io/dl/${KIND_RELEASE}/kind-linux-amd64 \
&& chmod +x /usr/bin/kind \
&& wget -O /usr/bin/kubectl https://dl.k8s.io/release/${KUBECTL_RELEASE}/bin/linux/amd64/kubectl \
&& chmod +x /usr/bin/kubectl \
&& apk del build-deps
ENV KIND_CONFIG_DIR=/kind
ENV KUBECONFIG=${KIND_CONFIG_DIR}/.kube/config
COPY docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
ENTRYPOINT [ "/usr/bin/docker-entrypoint.sh" ]
================================================
FILE: compose/galaxy-kind/docker-entrypoint.sh
================================================
#!/bin/bash
_term() {
echo "Caught SIGTERM signal!"
echo "Trying to stop Kind cluster"
kind delete cluster --name "${K8S_CLUSTER_NAME:-galaxy}" || true
exit 0
}
trap _term SIGTERM
if [ -z "$KIND_SKIP_CONFIG_LOCK" ]; then
sleep 2
echo "Waiting for Galaxy configurator to finish and release lock"
until [ ! -f "$KIND_CONFIG_DIR/configurator.lock" ] && echo Lock released; do
sleep 0.1;
done;
fi
rm "${KUBECONFIG}_in_docker" || true
kind delete cluster --name "${K8S_CLUSTER_NAME:-galaxy}" || true
kind create cluster --config "$KIND_CONFIG_DIR/kind_config.yml" --kubeconfig "$KUBECONFIG" --name "${K8S_CLUSTER_NAME:-galaxy}" || true
# Create custom kubeconfig, that allows to reach the control-plane from inside the containers
REAL_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "${K8S_CLUSTER_NAME:-galaxy}-control-plane")
cp "${KUBECONFIG}" "${KUBECONFIG}_in_docker"
sed -i "s/127.0.0.1:[0-9]*$/${REAL_IP}:6443/g" "${KUBECONFIG}_in_docker"
export KUBECONFIG="${KUBECONFIG}_in_docker"
kubectl cluster-info
# Not all resources can be easily updated, therefore it is easier
# to remove the resources first, while the whole setup is
# still starting up
ls "$KIND_CONFIG_DIR/k8s_config"
kubectl delete -f "$KIND_CONFIG_DIR/k8s_config" || true
kubectl apply -f "$KIND_CONFIG_DIR/k8s_config"
# Wait for SIGTERM and delete cluster
sleep inf & wait
================================================
FILE: compose/galaxy-nginx/Dockerfile
================================================
FROM nginx:1.27-alpine
COPY start.sh /usr/bin/start.sh
CMD [ "/bin/sh", "/usr/bin/start.sh"]
================================================
FILE: compose/galaxy-nginx/start.sh
================================================
#!/bin/bash
sleep 5 # ToDo: Use locking or so to be sure we really have the newest version
echo "Waiting for Nginx config"
until [ "$(ls -p | grep -v /config)" != "" ] && echo Nginx config found; do
sleep 0.5;
done;
cp -f /config/* /etc/nginx
echo "Running nginx startup command"
nginx -g "daemon off;"
================================================
FILE: compose/galaxy-server/Dockerfile
================================================
ARG DOCKER_REGISTRY=quay.io
ARG DOCKER_REGISTRY_USERNAME=bgruening
ARG IMAGE_TAG=latest
FROM buildpack-deps:22.04 as build_base
ENV EXPORT_DIR=/export \
GALAXY_ROOT_DIR=/galaxy \
HTCONDOR_ROOT=/opt/htcondor
ENV GALAXY_STATIC_DIR=$GALAXY_ROOT_DIR/static \
GALAXY_EXPORT=$EXPORT_DIR/galaxy \
GALAXY_CONFIG_DIR=$GALAXY_ROOT_DIR/config \
GALAXY_CONFIG_TOOL_DEPENDENCY_DIR=/tool_deps \
GALAXY_CONFIG_TOOL_PATH=$GALAXY_ROOT_DIR/tools \
GALAXY_CONFIG_TOOL_DATA_PATH=$GALAXY_ROOT_DIR/tool-data \
GALAXY_VIRTUAL_ENV=$GALAXY_ROOT_DIR/.venv \
GALAXY_DATABASE_PATH=$GALAXY_ROOT_DIR/database
ENV GALAXY_USER=galaxy \
GALAXY_GROUP=galaxy \
GALAXY_UID=1450 \
GALAXY_GID=1450 \
GALAXY_HOME=/home/galaxy
ENV GALAXY_CONDA_PREFIX=$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR/_conda \
MINIFORGE_VERSION=24.3.0-0
RUN groupadd -r $GALAXY_USER -g $GALAXY_GID \
&& useradd -u $GALAXY_UID -r -g $GALAXY_USER -d $GALAXY_HOME -c "Galaxy user" --shell /bin/bash $GALAXY_USER \
&& mkdir $GALAXY_HOME \
&& chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_HOME
FROM build_base as build_miniforge
COPY ./files/common_cleanup.sh /usr/bin/common_cleanup.sh
# Install Miniforge
RUN curl -s -L https://github.com/conda-forge/miniforge/releases/download/$MINIFORGE_VERSION/Miniforge3-$MINIFORGE_VERSION-Linux-x86_64.sh > ~/miniforge.sh \
&& /bin/bash ~/miniforge.sh -b -p $GALAXY_CONDA_PREFIX/ \
&& rm ~/miniforge.sh \
&& ln -s $GALAXY_CONDA_PREFIX/etc/profile.d/conda.sh /etc/profile.d/conda.sh \
&& echo ". $GALAXY_CONDA_PREFIX/etc/profile.d/conda.sh" >> $GALAXY_HOME/.bashrc \
&& echo "conda activate base" >> $GALAXY_HOME/.bashrc \
&& export PATH=$GALAXY_CONDA_PREFIX/bin/:$PATH \
&& conda config --add channels bioconda \
&& conda install virtualenv pip ephemeris \
&& conda clean --packages -t -i \
&& cp -r ~/.conda $GALAXY_HOME && cp ~/.condarc $GALAXY_HOME \
&& /usr/bin/common_cleanup.sh
FROM build_base as build_galaxy
ARG GALAXY_RELEASE=release_24.1
ARG GALAXY_REPO=https://github.com/galaxyproject/galaxy
COPY ./files/common_cleanup.sh /usr/bin/common_cleanup.sh
# Install Galaxy
RUN apt update && apt install --no-install-recommends libcurl4-openssl-dev libssl-dev python3-dev python3-pip -y \
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 10 \
&& mkdir "${GALAXY_ROOT_DIR}" \
&& curl -L -s $GALAXY_REPO/archive/$GALAXY_RELEASE.tar.gz | tar xzf - --strip-components=1 -C $GALAXY_ROOT_DIR \
&& cd $GALAXY_ROOT_DIR \
&& ./scripts/common_startup.sh \
&& . $GALAXY_ROOT_DIR/.venv/bin/activate \
&& pip3 install drmaa psycopg2 pycurl pykube redis \
&& pip3 install importlib-metadata importlib-resources pathlib2 ruamel.yaml.clib typing zipp \
&& deactivate \
&& rm -rf .ci .circleci .coveragerc .gitignore .travis.yml CITATION CODE_OF_CONDUCT.md CONTRIBUTING.md CONTRIBUTORS.md \
LICENSE.txt Makefile README.rst SECURITY_POLICY.md pytest.ini tox.ini \
contrib doc config/plugins lib/galaxy_test test test-data \
.venv/lib/node_modules .venv/src/node-v10.15.3-linux-x64 \
.venv/include/node .venv/bin/node .venv/bin/nodeenv \
&& /usr/bin/common_cleanup.sh
# --- Final image ---
FROM $DOCKER_REGISTRY/$DOCKER_REGISTRY_USERNAME/galaxy-cluster-base:$IMAGE_TAG as final
COPY ./files/common_cleanup.sh /usr/bin/common_cleanup.sh
COPY ./files/create_galaxy_user.py /usr/local/bin/create_galaxy_user.py
ENV EXPORT_DIR=/export \
GALAXY_ROOT_DIR=/galaxy \
GALAXY_PYTHON=/usr/bin/python3 \
HTCONDOR_ROOT=/opt/htcondor
ENV GALAXY_RELEASE=${GALAXY_RELEASE:-release_24.1} \
GALAXY_REPO=${GALAXY_REPO:-https://github.com/galaxyproject/galaxy} \
GALAXY_STATIC_DIR=$GALAXY_ROOT_DIR/static \
GALAXY_EXPORT=$EXPORT_DIR/galaxy \
GALAXY_CONFIG_DIR=$GALAXY_ROOT_DIR/config \
GALAXY_CONFIG_TOOL_DEPENDENCY_DIR=/tool_deps \
GALAXY_CONFIG_TOOL_PATH=$GALAXY_ROOT_DIR/tools \
GALAXY_CONFIG_TOOL_DATA_PATH=$GALAXY_ROOT_DIR/tool-data \
GALAXY_VIRTUAL_ENV=$GALAXY_ROOT_DIR/.venv \
GALAXY_DATABASE_PATH=$GALAXY_ROOT_DIR/database
ENV GALAXY_USER=galaxy \
GALAXY_GROUP=galaxy \
GALAXY_UID=1450 \
GALAXY_GID=1450 \
GALAXY_HOME=/home/galaxy
ENV GALAXY_CONFIG_FILE=$GALAXY_CONFIG_DIR/galaxy.yml
# Set permissions
RUN groupadd -r $GALAXY_USER -g $GALAXY_GID \
&& useradd -u $GALAXY_UID -r -g $GALAXY_USER -d $GALAXY_HOME -c "Galaxy user" --shell /bin/bash $GALAXY_USER \
&& /usr/bin/common_cleanup.sh
# Install remaining dependencies
RUN apt update && apt install --no-install-recommends curl gcc gnupg2 libgomp1 liblzma-dev libbz2-dev libpq-dev \
libcurl4-openssl-dev libssl-dev \
mercurial make netcat python3-dev python3-setuptools python3-pip \
zlib1g-dev sudo -y \
# Cython and wheel are needed to later install pysam..
&& pip3 install Cython wheel \
&& pip3 install pysam \
&& /usr/bin/common_cleanup.sh
# GALAXY_USER should be able to run docker without root permissions
RUN usermod -aG docker $GALAXY_USER
# Make Python3 standard
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 10
COPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_galaxy ${GALAXY_ROOT_DIR} ${GALAXY_ROOT_DIR}
COPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_miniforge ${GALAXY_CONFIG_TOOL_DEPENDENCY_DIR} ${GALAXY_CONFIG_TOOL_DEPENDENCY_DIR}
COPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_miniforge ${GALAXY_HOME} ${GALAXY_HOME}
COPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_miniforge /etc/profile.d/conda.sh /etc/profile.d/conda.sh
COPY ./files/start.sh /usr/bin/start.sh
EXPOSE 80
ENTRYPOINT "/usr/bin/start.sh"
================================================
FILE: compose/galaxy-server/files/common_cleanup.sh
================================================
#!/bin/sh
set -x
# This usually drastically reduced the container size
# at the cost of the startup time of your application
find / -name '*.pyc' -delete
find / -name '*.log' -delete
find / -name '.cache' -delete
rm -rf /var/lib/apt/lists/*
rm -rf /var/cache/*
# https://askubuntu.com/questions/266738/how-to-truncate-all-logfiles
truncate -s 0 /var/log/*log || true
truncate -s 0 /var/log/**/*log || true
================================================
FILE: compose/galaxy-server/files/create_galaxy_user.py
================================================
#!/usr/bin/env python
import sys
sys.path.insert(1,'/galaxy')
sys.path.insert(1,'/galaxy/lib')
from galaxy.model import User, APIKeys
from galaxy.model.mapping import init
from galaxy.model.orm.scripts import get_config
import argparse
def add_user(sa_session, security_agent, email, password, key=None, username="admin"):
"""
Add Galaxy User.
From John https://gist.github.com/jmchilton/4475646
"""
query = sa_session.query( User ).filter_by( email=email )
if query.count() > 0:
return query.first()
else:
User.use_pbkdf2 = True
user = User(email)
user.username = username
user.set_password_cleartext(password)
sa_session.add(user)
sa_session.flush()
security_agent.create_private_user_role( user )
if not user.default_permissions:
security_agent.user_set_default_permissions( user, history=True, dataset=True )
if key is not None:
api_key = APIKeys()
api_key.user_id = user.id
api_key.key = key
sa_session.add(api_key)
sa_session.flush()
sa_session.commit()
return user
if __name__ == "__main__":
db_url = get_config(sys.argv, use_argparse=False)['db_url']
parser = argparse.ArgumentParser(description='Create Galaxy Admin User.')
parser.add_argument("--user", required=True,
help="Username, it should be an email address.")
parser.add_argument("--password", required=True,
help="Password.")
parser.add_argument("--key", help="API-Key.")
parser.add_argument("--username", default="admin",
help="The public username. Public names must be at least three characters in length and contain only lower-case letters, numbers, and the '-' character.")
parser.add_argument('args', nargs=argparse.REMAINDER)
options = parser.parse_args()
mapping = init('/tmp/', db_url)
sa_session = mapping.context
security_agent = mapping.security_agent
add_user(sa_session, security_agent, options.user, options.password, key=options.key, username=options.username)
================================================
FILE: compose/galaxy-server/files/start.sh
================================================
#!/bin/bash
create_user() {
GALAXY_PROXY_PREFIX=$(cat $GALAXY_CONFIG_DIR/GALAXY_PROXY_PREFIX.txt)
echo "Waiting for Galaxy..."
until [ "$(curl -s -o /dev/null -w '%{http_code}' ${GALAXY_URL:-nginx}$GALAXY_PROXY_PREFIX)" -eq "200" ] && echo Galaxy started; do
sleep 0.1;
done;
echo "Creating admin user $GALAXY_DEFAULT_ADMIN_USER with key $GALAXY_DEFAULT_ADMIN_KEY and password $GALAXY_DEFAULT_ADMIN_PASSWORD if not existing"
. $GALAXY_VIRTUAL_ENV/bin/activate
python /usr/local/bin/create_galaxy_user.py --user "$GALAXY_DEFAULT_ADMIN_EMAIL" --password "$GALAXY_DEFAULT_ADMIN_PASSWORD" \
-c "$GALAXY_CONFIG_FILE" --username "$GALAXY_DEFAULT_ADMIN_USER" --key "$GALAXY_DEFAULT_ADMIN_KEY"
deactivate
}
# start copy lib/tools. Looks very hacky.
tools_dir="/galaxy/lib/galaxy/tools/"
exp_dir="/export$tools_dir"
mkdir -p $exp_dir
chown "$GALAXY_USER:$GALAXY_USER" $exp_dir
cp -rf $tools_dir/* $exp_dir
# end copy lib/tools.
# First start?? Check if something exists that indicates that environment is not new.. Config file? Something in DB maybe??
echo "Initialization: Check if files already exist, export otherwise."
# Create initial $GALAXY_ROOT_DIR in $EXPORT_DIR if not already existent
mkdir -p "$EXPORT_DIR/$GALAXY_ROOT_DIR"
declare -A exports=( ["$GALAXY_STATIC_DIR"]="$EXPORT_DIR/$GALAXY_STATIC_DIR" \
["$GALAXY_CONFIG_TOOL_PATH"]="$EXPORT_DIR/$GALAXY_CONFIG_TOOL_PATH" \
["$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR"]="$EXPORT_DIR/$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR" \
["$GALAXY_CONFIG_TOOL_DATA_PATH"]="$EXPORT_DIR/$GALAXY_CONFIG_TOOL_DATA_PATH" \
["$GALAXY_VIRTUAL_ENV"]="$EXPORT_DIR/$GALAXY_VIRTUAL_ENV" )
# shellcheck disable=SC2143,SC2086,SC2010
for galaxy_dir in "${!exports[@]}"; do
exp_dir=${exports[$galaxy_dir]}
if [ ! -d $exp_dir ] || [ -z "$(ls -A $exp_dir)" ]; then
echo "Exporting $galaxy_dir to $exp_dir"
mkdir $exp_dir
chown "$GALAXY_USER:$GALAXY_USER" $exp_dir
cp -rpf $galaxy_dir/* $exp_dir
fi
rm -rf $galaxy_dir
ln -v -s $exp_dir $galaxy_dir
chown -h "$GALAXY_USER:$GALAXY_USER" $galaxy_dir
done
# Export galaxy_config seperately (special treatment because of plugins-dir)
# shellcheck disable=SC2143,SC2086,SC2010
if [ ! -d "$EXPORT_DIR/$GALAXY_CONFIG_DIR" ] || [ -z "$(ls -p $EXPORT_DIR/$GALAXY_CONFIG_DIR | grep -v /)" ]; then
# Move config to $EXPORT_DIR and create symlink
mkdir "$EXPORT_DIR/$GALAXY_CONFIG_DIR"
chown "$GALAXY_USER:$GALAXY_USER" "$EXPORT_DIR/$GALAXY_CONFIG_DIR"
cp -rpf $GALAXY_CONFIG_DIR/* $EXPORT_DIR/$GALAXY_CONFIG_DIR
cp -rpf $GALAXY_CONFIG_DIR/plugins/* $EXPORT_DIR/$GALAXY_CONFIG_DIR/plugins
fi
rm -rf "$GALAXY_CONFIG_DIR"
ln -v -s "$EXPORT_DIR/$GALAXY_CONFIG_DIR" "$GALAXY_CONFIG_DIR"
chown -h "$GALAXY_USER:$GALAXY_USER" "$GALAXY_CONFIG_DIR"
# Export database-folder (used for job files etc)
rm -rf "$GALAXY_DATABASE_PATH"
mkdir -p "$EXPORT_DIR/$GALAXY_DATABASE_PATH"
chown "$GALAXY_USER:$GALAXY_USER" "$EXPORT_DIR/$GALAXY_DATABASE_PATH"
ln -v -s "$EXPORT_DIR/$GALAXY_DATABASE_PATH" "$GALAXY_DATABASE_PATH"
chown -h "$GALAXY_USER:$GALAXY_USER" "$GALAXY_DATABASE_PATH"
# Try to guess if we are running under --privileged mode
if mount | grep "/proc/kcore"; then
PRIVILEGED=false
else
PRIVILEGED=true
echo "Privileged mode detected"
chmod 666 /var/run/docker.sock
fi
if $PRIVILEGED; then
echo "Mounting CVMFS"
chmod 666 /dev/fuse
mkdir /cvmfs/data.galaxyproject.org
mount -t cvmfs data.galaxyproject.org /cvmfs/data.galaxyproject.org
mkdir /cvmfs/singularity.galaxyproject.org
mount -t cvmfs singularity.galaxyproject.org /cvmfs/singularity.galaxyproject.org
fi
echo "Finished initialization"
echo "Waiting for RabbitMQ..."
until nc -z -w 2 rabbitmq 5672 && echo RabbitMQ started; do
sleep 1;
done;
echo "Waiting for Postgres..."
until nc -z -w 2 postgres 5432 && echo Postgres started; do
sleep 1;
done;
if [ "$SKIP_LOCKING" != "true" ]; then
echo "Waiting for Galaxy configurator to finish and release lock"
until [ ! -f "$GALAXY_CONFIG_DIR/configurator.lock" ] && echo Lock released; do
sleep 0.1;
done;
fi
if [ -f "/htcondor_config/galaxy.conf" ]; then
echo "HTCondor config file found"
cp -f "/htcondor_config/galaxy.conf" /etc/condor/condor_config.local
condor_store_cred -p "$HTCONDOR_POOL_PASSWORD" -f /var/lib/condor/pool_password
echo "Starting HTCondor.."
/usr/sbin/condor_master -b
fi
if [ -f /etc/munge/munge.key ]; then
echo "Munge key found"
echo "Starting Munge.."
/etc/init.d/munge start
fi
# In case the user wants the default admin to be created, do so.
if [[ -n $GALAXY_DEFAULT_ADMIN_USER ]]; then
# Run in background and wait for Galaxy having finished starting up
create_user &
fi
# Ensure proper permission (the configurator might have changed them "by mistake")
chown -RL "$GALAXY_USER:$GALAXY_GROUP" "$GALAXY_CONFIG_DIR"
echo "Starting Galaxy now.."
cd "$GALAXY_ROOT_DIR" || { echo "Error: Could not change to $GALAXY_ROOT_DIR"; exit 1; }
sudo -E -H -u $GALAXY_USER "$GALAXY_VIRTUAL_ENV/bin/galaxy" --config-file "$GALAXY_CONFIG_FILE"
================================================
FILE: compose/galaxy-slurm/Dockerfile
================================================
ARG DOCKER_REGISTRY=quay.io
ARG DOCKER_REGISTRY_USERNAME=bgruening
ARG IMAGE_TAG=latest
FROM buildpack-deps:22.04 as galaxy_dependencies
ARG GALAXY_RELEASE=release_24.1
ARG GALAXY_REPO=https://github.com/galaxyproject/galaxy
ENV GALAXY_ROOT_DIR=/galaxy
# Download Galaxy source, but only keep necessary dependencies
RUN mkdir "${GALAXY_ROOT_DIR}" \
&& curl -L -s $GALAXY_REPO/archive/$GALAXY_RELEASE.tar.gz | tar xzf - --strip-components=1 -C $GALAXY_ROOT_DIR \
&& cd $GALAXY_ROOT_DIR \
&& ls . | grep -v "lib" | xargs rm -rf \
&& cd $GALAXY_ROOT_DIR/lib \
&& ls . | grep -v "galaxy\|galaxy_ext" | xargs rm -rf \
&& cd $GALAXY_ROOT_DIR/lib/galaxy \
&& ls . | grep -v "__init__.py\|datatypes\|exceptions\|files\|metadata\|model\|util\|security" | xargs rm -rf
FROM $DOCKER_REGISTRY/$DOCKER_REGISTRY_USERNAME/galaxy-container-base:$IMAGE_TAG as final
ENV GALAXY_USER=galaxy \
GALAXY_GROUP=galaxy \
GALAXY_UID=1450 \
GALAXY_GID=1450 \
GALAXY_HOME=/home/galaxy \
GALAXY_ROOT_DIR=/galaxy
RUN groupadd -r $GALAXY_USER -g $GALAXY_GID \
&& useradd -u $GALAXY_UID -r -g $GALAXY_USER -d $GALAXY_HOME -c "Galaxy user" --shell /bin/bash $GALAXY_USER \
&& mkdir $GALAXY_HOME \
&& chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_HOME
# Install Slurm
ENV SLURM_USER=galaxy \
SLURM_UID=1450 \
SLURM_GID=1450 \
MUNGE_USER=munge \
MUNGE_UID=1200 \
MUNGE_GID=1200
RUN groupadd -r $MUNGE_USER -g $MUNGE_GID \
&& useradd -u $MUNGE_UID -r -g $MUNGE_USER $MUNGE_USER \
&& apt update \
&& apt install --no-install-recommends gosu munge python3 python3-dev slurm-wlm -y \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/cache/* && find / -name '*.pyc' -delete
# Copy Galaxy dependencies
COPY --chown=$GALAXY_USER:$GALAXY_USER --from=galaxy_dependencies $GALAXY_ROOT_DIR $GALAXY_ROOT_DIR
# Make Python3 standard
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 10
COPY start.sh /usr/bin/start.sh
ENTRYPOINT [ "/usr/bin/start.sh" ]
================================================
FILE: compose/galaxy-slurm/start.sh
================================================
#!/bin/bash
# Inspired by: https://github.com/giovtorres/slurm-docker-cluster
sleep 10 # ToDo: Use locking or so to be sure we really have the newest version
echo "Waiting for Slurm config"
until [ -f /etc/slurm/slurm.conf ] && echo Config found; do
sleep 0.5;
done;
if [ "$1" = "slurmctld" ]; then
if [ ! -f /etc/munge/munge.key ]; then
chown -R "$MUNGE_USER":"$MUNGE_USER" /etc/munge
gosu "$MUNGE_USER" /usr/sbin/mungekey
fi
echo "Starting Munge.."
/etc/init.d/munge start
echo "Starting Slurmctld"
exec /usr/sbin/slurmctld -D
fi
if [ "$1" = "slurmd" ]; then
echo "Waiting for munge.key"
until [ -f /etc/munge/munge.key ] && echo munge.key found; do
sleep 0.5;
done;
sleep 1
echo "Starting Munge.."
/etc/init.d/munge start
echo "Starting Slurmd"
exec /usr/sbin/slurmd -D
fi
exec "$@"
================================================
FILE: compose/galaxy-slurm-node-discovery/Dockerfile
================================================
FROM alpine:3.17
RUN apk add curl jq
COPY run.sh /usr/bin/run.sh
ENTRYPOINT /usr/bin/run.sh
================================================
FILE: compose/galaxy-slurm-node-discovery/run.sh
================================================
#!/bin/sh
# This script is used to replace the container name of a slurm node
# with its correct hostname. This is needed, as a hostname can not
# include '_', which is the case for docker-compose.
sleep 5
echo "Waiting for Galaxy configurator to finish and release lock"
until [ ! -f /etc/slurm/configurator.lock ] && echo Lock released; do
sleep 0.1;
done;
grep < /etc/slurm/slurm.conf "NodeName=" | while read -r line; do
node=$(echo "$line" | sed "s/NodeName=\(.*\) \(NodeAddr.*\)/\1/")
node_hostname=$(curl -s --unix-socket /var/run/docker.sock -XGET \
-H "Content-Type: application/json" http://v1.40/containers/json \
-G --data-urlencode "filters={\"name\":[\"$node\"]}" \
| jq -r '.[0] | .["Id"]' | head -c 12)
sed -i "s/$node/$node_hostname/g" /etc/slurm/slurm.conf
done
sleep infinity
================================================
FILE: compose/pulsar/Dockerfile
================================================
ARG DOCKER_REGISTRY=quay.io
ARG DOCKER_REGISTRY_USERNAME=bgruening
ARG IMAGE_TAG=latest
FROM buildpack-deps:22.04 as build_pulsar
ARG PULSAR_RELEASE=0.15.6
ARG PULSAR_REPO=https://github.com/galaxyproject/pulsar
ENV PULSAR_ROOT=/pulsar
ENV PULSAR_VIRTUALENV=$PULSAR_ROOT/.venv
RUN apt update \
&& apt install --no-install-recommends curl python3 python3-dev python3-pip python3-setuptools python3-venv -y
RUN mkdir /tmp/pulsar \
&& curl -L -s $PULSAR_REPO/archive/$PULSAR_RELEASE.tar.gz | tar xzf - --strip-components=1 -C /tmp/pulsar \
&& mkdir $PULSAR_ROOT \
&& pip3 install wheel \
&& python3 -m venv $PULSAR_VIRTUALENV \
&& . $PULSAR_VIRTUALENV/bin/activate \
&& pip3 install drmaa kombu pastescript pastedeploy pycurl uwsgi pydantic "aiohttp==3.10.9" \
&& cd /tmp/pulsar \
&& python3 /tmp/pulsar/setup.py install
# --- Final image ---
FROM $DOCKER_REGISTRY/$DOCKER_REGISTRY_USERNAME/galaxy-cluster-base:$IMAGE_TAG as final
COPY files/common_cleanup.sh /usr/bin/common_cleanup.sh
ENV PULSAR_ROOT=/pulsar
ENV PULSAR_VIRTUALENV=$PULSAR_ROOT/.venv \
PULSAR_CONFIG_DIR=$PULSAR_ROOT/config \
PULSAR_TOOL_DEPENDENCY_DIR=$PULSAR_ROOT/dependencies
RUN apt update \
&& apt install --no-install-recommends ca-certificates curl libxml2-dev python3 -y \
&& /usr/bin/common_cleanup.sh
COPY --from=build_pulsar /pulsar /pulsar
COPY docker-entrypoint.sh /docker-entrypoint.sh
ENTRYPOINT ["/docker-entrypoint.sh"]
================================================
FILE: compose/pulsar/docker-entrypoint.sh
================================================
#!/bin/bash
if [ -z "$PULSAR_SKIP_CONFIG_LOCK" ]; then
sleep 10
echo "Waiting for Galaxy configurator to finish and release lock"
until [ ! -f "$PULSAR_CONFIG_DIR/configurator.lock" ] && echo Lock released; do
sleep 0.1;
done;
fi
# Try to guess if we are running under --privileged mode
if mount | grep "/proc/kcore"; then
PRIVILEGED=false
else
PRIVILEGED=true
echo "Privileged mode detected"
chmod 666 /var/run/docker.sock
fi
if $PRIVILEGED; then
echo "Mounting CVMFS"
chmod 666 /dev/fuse
mkdir /cvmfs/data.galaxyproject.org
mount -t cvmfs data.galaxyproject.org /cvmfs/data.galaxyproject.org
mkdir /cvmfs/singularity.galaxyproject.org
mount -t cvmfs singularity.galaxyproject.org /cvmfs/singularity.galaxyproject.org
fi
cd "$PULSAR_ROOT" ||exit 1
. "$PULSAR_VIRTUALENV/bin/activate"
pulsar --mode "${PULSAR_MODE:-paster}"
================================================
FILE: compose/pulsar/files/common_cleanup.sh
================================================
#!/bin/sh
set -x
# This usually drastically reduced the container size
# at the cost of the startup time of your application
find / -name '*.pyc' -delete
find / -name '*.log' -delete
find / -name '.cache' -delete
rm -rf /var/lib/apt/lists/*
rm -rf /var/cache/*
# https://askubuntu.com/questions/266738/how-to-truncate-all-logfiles
truncate -s 0 /var/log/*log || true
truncate -s 0 /var/log/**/*log || true
================================================
FILE: compose/tests/docker-compose.test.bioblend.yml
================================================
services:
galaxy-bioblend-test:
image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-bioblend-test:${IMAGE_TAG:-latest}
build: tests/galaxy-bioblend-test
environment:
- GALAXY_VERSION=${GALAXY_VERSION:-release_24.1} # TODO: Change to GALAXY_RELEASE
- GALAXY_URL=http://nginx${GALAXY_PROXY_PREFIX:-}
- EXTRA_SKIP_TESTS_BIOBLEND=${EXTRA_SKIP_TESTS_BIOBLEND:-}
networks:
- galaxy
================================================
FILE: compose/tests/docker-compose.test.selenium.yml
================================================
services:
galaxy-selenium-test:
image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-selenium-test:${IMAGE_TAG:-latest}
build: tests/galaxy-selenium-test
environment:
- TESTS=${TESTS:-navigates_galaxy.py,login.py}
- GALAXY_URL=http://nginx${GALAXY_PROXY_PREFIX:-}
- SE_ENABLE_TRACING=false
- SE_SESSION_REQUEST_TIMEOUT=1800
volumes:
- ${EXPORT_DIR:-./../export}/galaxy/database:/galaxy/database
networks:
- galaxy
================================================
FILE: compose/tests/docker-compose.test.workflows.yml
================================================
services:
galaxy-workflow-test:
image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-workflow-test:${IMAGE_TAG:-latest}
build: tests/galaxy-workflow-test
environment:
- GALAXY_URL=http://nginx${GALAXY_PROXY_PREFIX:-}
- WORKFLOWS=${WORKFLOWS:-training/sequence-analysis/quality-control/quality_control.ga,sklearn/ard/ard.ga,example1/wf3-shed-tools.ga}
volumes:
- ${EXPORT_DIR:-./../export}/galaxy/database:/galaxy/database
networks:
- galaxy
================================================
FILE: compose/tests/docker-compose.test.yml
================================================
services:
galaxy-configurator:
environment:
- GALAXY_CONFIG_CLEANUP_JOB=never
- NGINX_PROXY_READ_TIMEOUT=3600
- DONT_EXIT=true
# Terminates the container after $TIMEOUT minutes
# which results in the whole setup terminating if --exit-code-from
# is set (see CI)
timeout:
image: alpine:3.17
environment:
- TIMEOUT=${TIMEOUT:-120}
command: sh -c "echo \"Setting timeout to $$TIMEOUT minutes\" && sleep $$((( $$TIMEOUT * 60 ))) && echo \"Timeout after $$TIMEOUT minutes!\" && exit 1"
================================================
FILE: compose/tests/galaxy-bioblend-test/Dockerfile
================================================
FROM alpine:3.17 as build
ENV BIOBLEND_VERSION=1.3.0
ADD "https://github.com/galaxyproject/bioblend/archive/v$BIOBLEND_VERSION.zip" /src/bioblend.zip
RUN apk update && apk add curl python3-dev unzip \
&& python3 -m ensurepip --upgrade \
&& pip3 install pep8 tox "aiohttp==3.10.9" \
&& cd /src \
&& unzip bioblend.zip && rm bioblend.zip \
&& mv "bioblend-$BIOBLEND_VERSION" bioblend \
&& cd bioblend \
&& python3 setup.py install
WORKDIR /src/bioblend
RUN tox -e py310 --notest
COPY ./run.sh /src/bioblend/run.sh
ENTRYPOINT ./run.sh
================================================
FILE: compose/tests/galaxy-bioblend-test/run.sh
================================================
#!/bin/sh
echo "Waiting for Galaxy..."
until [ "$(curl -s -o /dev/null -w '%{http_code}' ${GALAXY_URL:-nginx}/api/users/current\?key\=${GALAXY_DEFAULT_ADMIN_KEY:-fakekey})" -eq "200" ] && echo Galaxy started; do
sleep 1;
done;
export BIOBLEND_GALAXY_URL=${GALAXY_URL:-http://nginx}
export BIOBLEND_GALAXY_API_KEY=${GALAXY_DEFAULT_ADMIN_KEY:-fakekey}
export BIOBLEND_TEST_JOB_TIMEOUT=${BIOBLEND_TEST_JOB_TIMEOUT:-240}
# default skip tests
DEFAULT_SKIP_TESTS="not test_rerun_and_remap and not test_create_quota and not test_get_quotas and not test_delete_undelete_quota and not test_update_quota and not test_update_non_default_quota and not test_upload_from_galaxy_filesystem and not test_get_datasets and not test_datasets_from_fs and not test_existing_history and not test_new_history and not test_params and not test_tool_dependency_install and not test_download_history and not test_export_and_download and not test_cancel_invocation and not test_run_step_actions and not test_extract_workflow_from_history"
EXTRA_SKIP_TESTS_BIOBLEND=${EXTRA_SKIP_TESTS_BIOBLEND:-""}
# Combine default skip tests with extra skip tests, if provided
SKIP_TESTS="$DEFAULT_SKIP_TESTS"
[ -n "$EXTRA_SKIP_TESTS_BIOBLEND" ] && SKIP_TESTS="$SKIP_TESTS and $EXTRA_SKIP_TESTS_BIOBLEND"
tox -e py310 -- -k "$SKIP_TESTS"
================================================
FILE: compose/tests/galaxy-selenium-test/Dockerfile
================================================
FROM selenium/standalone-chrome:4.25.0
ARG GALAXY_RELEASE=release_24.1
ARG GALAXY_REPO=https://github.com/galaxyproject/galaxy
ENV GALAXY_ROOT_DIR=/galaxy
ENV GALAXY_PYTHON=/usr/bin/python3
USER root
RUN apt update && apt install --no-install-recommends python3-dev python3-pip libpq-dev 2to3 -y && rm -rf /var/lib/apt/lists/* \
&& mkdir "${GALAXY_ROOT_DIR}" \
&& chown seluser "${GALAXY_ROOT_DIR}"
USER seluser
RUN mkdir -p $GALAXY_ROOT_DIR && \
curl -L -s $GALAXY_REPO/archive/$GALAXY_RELEASE.tar.gz | tar xzf - --strip-components=1 -C $GALAXY_ROOT_DIR \
&& cd "${GALAXY_ROOT_DIR}" \
&& ./scripts/common_startup.sh --skip-client-build --dev-wheels
COPY run.sh /usr/bin/run.sh
CMD "/usr/bin/run.sh"
================================================
FILE: compose/tests/galaxy-selenium-test/run.sh
================================================
#!/bin/bash
set -e # Stop script, if a test fails
supervisord &
sleep 5
echo "Waiting for Galaxy..."
until [ "$(curl -s -o /dev/null -w '%{http_code}' ${GALAXY_URL:-nginx}/api/users/current\?key\=${GALAXY_DEFAULT_ADMIN_KEY:-fakekey})" -eq "200" ] && echo Galaxy started; do
sleep 1;
done;
export GALAXY_TEST_SELENIUM_REMOTE=1
export GALAXY_TEST_SELENIUM_REMOTE_HOST=localhost
export GALAXY_TEST_SELENIUM_REMOTE_PORT=4444
export GALAXY_TEST_EXTERNAL_FROM_SELENIUM=${GALAXY_URL:-http://nginx}
export GALAXY_TEST_EXTERNAL=${GALAXY_URL:-http://nginx}
export GALAXY_CONFIG_BOOTSTRAP_ADMIN_API_KEY=${GALAXY_DEFAULT_ADMIN_KEY:-fakekey}
for test in $(echo "$TESTS" | sed "s/,/ /g"); do
echo "Running test $test"
./galaxy/run_tests.sh --skip-common-startup -selenium "/galaxy/lib/galaxy_test/selenium/test_$test"
done
================================================
FILE: compose/tests/galaxy-workflow-test/Dockerfile
================================================
FROM alpine:3.17
ENV TEST_REPO=${TEST_REPO:-https://github.com/jyotipm29/workflow-testing} \
TEST_RELEASE=${TEST_RELEASE:-24.1}
RUN apk add --no-cache bash python3 py3-pip curl \
&& apk add --no-cache --virtual build-dep gcc make libc-dev xz-dev bzip2-dev hdf5-dev musl-dev linux-headers python3-dev \
&& pip3 install planemo \
&& mkdir /src && cd /src \
&& curl -L -s $TEST_REPO/archive/$TEST_RELEASE.tar.gz | tar xzf - --strip-components=1 \
&& apk del build-dep
ADD ./run.sh /usr/bin/run.sh
WORKDIR /src
ENTRYPOINT /usr/bin/run.sh
================================================
FILE: compose/tests/galaxy-workflow-test/run.sh
================================================
#!/bin/bash
set -e # Stop script, if a test fails
echo "Waiting for Galaxy..."
until [ "$(curl -s -o /dev/null -w '%{http_code}' ${GALAXY_URL:-nginx}/api/users/current\?key\=${GALAXY_DEFAULT_ADMIN_KEY:-fakekey})" -eq "200" ] && echo Galaxy started; do
sleep 1;
done;
for workflow in $(echo $WORKFLOWS | sed "s/,/ /g")
do
echo "Running test $workflow"
planemo $PLANEMO_OPTIONS test \
--galaxy_url "${GALAXY_URL:-nginx}" \
--galaxy_admin_key "${GALAXY_USER_KEY:-fakekey}" \
--shed_install \
--engine external_galaxy \
--test_output ${GALAXY_ROOT_DIR:-/galaxy}/database/tool_test_output.html \
--test_output_json ${GALAXY_ROOT_DIR:-/galaxy}/database/tool_test_output.json \
"$workflow";
done
================================================
FILE: cvmfs/Dockerfile
================================================
FROM ubuntu:24.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
ansible \
ca-certificates \
curl \
dirmngr \
fuse3 \
git \
gpg \
python3 \
python3-apt \
python3-venv \
&& rm -rf /var/lib/apt/lists/*
COPY ansible/ /ansible/
RUN ansible-galaxy install -r /ansible/requirements.yml -p /ansible/roles \
&& rm -rf /root/.ansible
COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
RUN chmod 0755 /usr/local/bin/docker-entrypoint.sh
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
CMD ["bash", "-lc", "tail -F /var/log/autofs.log /var/log/cvmfs.log 2>/dev/null"]
================================================
FILE: cvmfs/README.md
================================================
# CVMFS sidecar for Galaxy
This container provides a full CVMFS client (no cvmfsexec) and is intended to be used as an optional sidecar for the
Galaxy container in `galaxy/docker-compose.yaml`.
## What it does
- Installs and configures the CVMFS client using the `galaxyproject.cvmfs` Ansible role.
- Enables the Galaxy CVMFS repositories (including `data.galaxyproject.org` and
`singularity.galaxyproject.org`).
- Starts autofs and warms the mount points so the CVMFS mounts are shared to the Galaxy container.
## Build
From the repository root:
```bash
docker build -t galaxy-cvmfs ./cvmfs
```
## Usage with docker-compose
The `galaxy/docker-compose.yaml` file contains an optional `cvmfs` service (profile: `cvmfs`).
Start both containers with:
```bash
cd galaxy
CVMFS_MOUNT_DIR=/cvmfs EXPORT_DIR=./export docker compose --profile cvmfs up
```
Notes:
- The sidecar runs privileged so the CVMFS mount can be propagated to the host.
- The `/cvmfs` mount is shared between the sidecar and the Galaxy container.
- The CVMFS cache is stored in `${EXPORT_DIR}/cvmfs-cache` to keep it persistent.
## Basic check
Once running, verify the mount from the Galaxy container:
```bash
docker exec -it galaxy-server ls /cvmfs/data.galaxyproject.org/byhand
```
If the directory lists, CVMFS is mounted.
## Environment variables
- `CVMFS_REPOSITORIES`: Space- or comma-separated list of repositories to warm up.
Default: `data.galaxyproject.org singularity.galaxyproject.org`
- `CVMFS_CACHE_BASE`: Cache directory inside the sidecar. Default: `/var/lib/cvmfs`
================================================
FILE: cvmfs/ansible/playbook.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: true
vars:
cvmfs_role: client
galaxy_cvmfs_repos_enabled: true
cvmfs_http_proxies:
- DIRECT
cvmfs_cache_base: /var/lib/cvmfs
cvmfs_quota_limit: 4000
cvmfs_packages:
client:
- cvmfs
- autofs
roles:
- role: galaxyproject.cvmfs
================================================
FILE: cvmfs/ansible/requirements.yml
================================================
---
roles:
- name: galaxyproject.cvmfs
src: https://github.com/galaxyproject/ansible-cvmfs
version: main
================================================
FILE: cvmfs/docker-entrypoint.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
repos="${CVMFS_REPOSITORIES:-data.galaxyproject.org singularity.galaxyproject.org}"
repos="${repos//,/ }"
mkdir -p /cvmfs
mkdir -p "${CVMFS_CACHE_BASE:-/var/lib/cvmfs}"
touch /var/log/autofs.log /var/log/cvmfs.log
if [[ ! -f "${CVMFS_CACHE_BASE:-/var/lib/cvmfs}/.configured" ]]; then
ansible-playbook /ansible/playbook.yml
touch "${CVMFS_CACHE_BASE:-/var/lib/cvmfs}/.configured"
fi
if command -v service >/dev/null 2>&1; then
service autofs start || true
else
autofs -f || true
fi
for repo in $repos; do
mkdir -p "/cvmfs/$repo"
ls "/cvmfs/$repo" >/dev/null 2>&1 || true
done
exec "$@"
================================================
FILE: docs/README.md
================================================
Documentation
=============
The documentation is automatically generated when the main [`README.md`](https://github.com/bgruening/docker-galaxy/blob/main/README.md) is changed on the `main` branch.
For information, this automatic generation uses a [Python script](src/generate_docs.py) to transform the markdown in the `README.md` into the HTML files.
This generation is automatically launched by a [GitHub Action Workflow](https://github.com/bgruening/docker-galaxy/actions/workflows/update-site.yml).
So, if you see any error in the [online documentation](http://bgruening.github.io/docker-galaxy), you can first check the `README.md`. If the error does not come from the `README.md`, you can either file an issue or check the [Python](src/generate_docs.py) script used to generate the HTML files.
================================================
FILE: docs/Running_jobs_outside_of_the_container.md
================================================
Using an external Slurm cluster
-------------------------------
It is often convenient to configure Galaxy to use a high-performance cluster for running jobs. To do so, two files are required:
1. munge.key
2. slurm.conf
These files from the cluster must be copied to the `/export` mount point (i.e., `/data/galaxy-data` on the host if using below command) accessible to Galaxy before starting the container. This must be done regardless of which Slurm daemons are running within Docker. At start, symbolic links will be created to these files to `/etc` within the container, allowing the various Slurm functions to communicate properly with your cluster. In such cases, there's no reason to run `slurmctld`, the Slurm controller daemon, from within Docker, so specify `-e "NONUSE=slurmctld"`. Unless you would like to also use Slurm (rather than the local job runner) to run jobs within the Docker container, then alternatively specify `-e "NONUSE=slurmctld,slurmd"`.
Importantly, Slurm relies on a shared filesystem between the Docker container and the execution nodes. To allow things to function correctly, each of the execution nodes will need `/export` and `/galaxy` directories to point to the appropriate places. Suppose you ran the following command to start the Docker image:
```sh
docker run -d -e "NONUSE=slurmd,slurmctld" -p 80:80 -v /data/galaxy-data:/export bgruening/galaxy-stable
```
You would then need the following symbolic links on each of the nodes:
1. `/export` → `/data/galaxy-data`
2. `/galaxy` → `/data/galaxy-data/galaxy`
A brief note is in order regarding the version of Slurm installed. This Docker image uses Ubuntu 14.04 as its base image. The version of Slurm in the Unbuntu 14.04 repository is 2.6.5 and that is what is installed in this image. If your cluster is using an incompatible version of Slurm then you will likely need to modify this Docker image.
The following is an example for how to specify a destination in `job_conf.xml` that uses a custom partition ("work", rather than "debug") and 4 cores rather than 1:
```xml
False
-p work -n 4
```
The usage of `-n` can be confusing. Note that it will specify the number of cores, not the number of tasks (i.e., it's not equivalent to `srun -n 4`).
Tips for Running Jobs Outside the Container
---------------------------------------------
In its default state Galaxy assumes both the Galaxy source code and
various temporary files are available on shared file systems across the
cluster, and uses the Galaxy source code to calculate metadata about the
files that have been produced.
When using Condor or SLURM (as described above) to run jobs outside
of the Docker container one can disable the metadata generation on the cluster,
or synchronize the files required for generating these.
The ``embed_metadata_in_job`` option on job destinations in `job_conf.xml`
forces Galaxy collect metadata inside the container instead of on the
cluster:
```xml
False
```
This has performance implications and may not scale as well as performing
these calculations on the remote cluster - but this should not be a problem
for most Galaxy instances.
Additionally, many framework tools depend on Galaxy's Python virtual
environment being avaiable. This should be created outside of the container
on a shared filesystem available to your cluster using the instructions
[here](https://github.com/galaxyproject/galaxy/blob/dev/doc/source/admin/framework_dependencies.rst#managing-dependencies-manually). Job destinations
can then source these virtual environments using the instructions outlined
[here](https://github.com/galaxyproject/galaxy/blob/dev/doc/source/admin/framework_dependencies.rst#galaxy-job-handlers). In other words, by adding
a line such as this to each job destination:
```xml
```
A Hands-on example of running SLURM on an external cluster container
--------------------------------------------------------------------
In the [/test/slurm](../test/slurm/) folder you will find a Dockerfile
that can be used to build a SLURM docker image and to test the integration
of docker galaxy with SLURM.
To build the image, go the [/test/slurm](../test/slurm/) folder and type:
```sh
docker build -t slurm .
```
As explained above, to connect galaxy with the SLURM cluster, the slurm.conf
and munge.key files are needed. These file will be automatically generated by the
docker slurm container and placed into the /export folder.
To make them available to the galaxy container, we start the slurm container
with a host directory (`/data/galaxy-data`) mounted to `/export`.
(If there is a real cluster available, this would be a network share):
```sh
docker run -d -v /data/galaxy-data:/export \
--name slurm \
--hostname slurm \
slurm
```
We are also using the `--hostname slurm`, which allows the galaxy container
to reach the slurm container use the `slurm` hostname.
You should see a `slurm.conf` and `munge.key` key file in the export folder.
We can now start and connect galaxy to the slurm cluster:
```sh
docker run -d -e "NONUSE=slurmd,slurmctld" \
--name galaxy-slurm-test \
--link slurm \
-p 80:80 \
-v /data/galaxy-data:/export \
bgruening/galaxy-stable
```
Note the --link slurm, this will allow the galaxy container to talk to the slurm container.
On a real network this would not be necessary.
After a moment, we can enter the the docker container and submit a simple job using the srun utility:
```
docker exec galaxy-slurm-test srun hostname
```
This should return the hostname of the slurm container, slurm.
But we still need to instruct galaxy on how to interface with slurm.
We therefore need to adjust the job_conf.xml file.
A sample job_conf.xml is in [/test/slurm/job_conf.xml](../test/slurm/job_conf.xml).
We can copy this file to /data/galaxy-data/galaxy/config:
```
cp job_conf.xml /data/galaxy-data/galaxy/config
```
We restart galaxy inside the container
```sh
docker exec galaxy-slurm-test galaxyctl restart
```
We should now be able to submit galaxy jobs through the slurm container.
To verify this you can install the printenv tools from the toolshed
(do not forget to restart galaxy after installing tools!)
and look at its output.
Bonus points
------------
In the [job_conf.xml](../test/slurm/job_conf.xml) we are disabling metadata generation
on the cluster, since this requires a set of galaxy's dependencies.
We can install these on the cluster, since the docker image copies galaxy's requirements.txt
and the galaxy's lib folder to /export.
We enter the slurm container and install these dependencies:
```sh
docker exec -it slurm bash
```
Inside the container we switch to the galaxy user, source the virtualenv, upgrade pip and install
the required dependencies:
```sh
source /galaxy/.venv/bin/activate && pip install --upgrade pip
pip install -r /galaxy/requirements.txt --index-url https://wheels.galaxyproject.org/simple
```
Now quit the slurm container, edit the job_conf.xml and set
```
True
```
and finally restart galaxy:
```
docker exec galaxy-slurm-test galaxyctl restart
```
================================================
FILE: docs/css/landing_page.css
================================================
@font-face {
font-family: 'Noto Sans';
font-weight: 400;
font-style: normal;
src: url('../fonts/Noto-Sans-regular/Noto-Sans-regular.eot');
src: url('../fonts/Noto-Sans-regular/Noto-Sans-regular.eot?#iefix') format('embedded-opentype'),
local('Noto Sans'),
local('Noto-Sans-regular'),
url('../fonts/Noto-Sans-regular/Noto-Sans-regular.woff2') format('woff2'),
url('../fonts/Noto-Sans-regular/Noto-Sans-regular.woff') format('woff'),
url('../fonts/Noto-Sans-regular/Noto-Sans-regular.ttf') format('truetype'),
url('../fonts/Noto-Sans-regular/Noto-Sans-regular.svg#NotoSans') format('svg');
}
@font-face {
font-family: 'Noto Sans';
font-weight: 700;
font-style: normal;
src: url('../fonts/Noto-Sans-700/Noto-Sans-700.eot');
src: url('../fonts/Noto-Sans-700/Noto-Sans-700.eot?#iefix') format('embedded-opentype'),
local('Noto Sans Bold'),
local('Noto-Sans-700'),
url('../fonts/Noto-Sans-700/Noto-Sans-700.woff2') format('woff2'),
url('../fonts/Noto-Sans-700/Noto-Sans-700.woff') format('woff'),
url('../fonts/Noto-Sans-700/Noto-Sans-700.ttf') format('truetype'),
url('../fonts/Noto-Sans-700/Noto-Sans-700.svg#NotoSans') format('svg');
}
@font-face {
font-family: 'Noto Sans';
font-weight: 400;
font-style: italic;
src: url('../fonts/Noto-Sans-italic/Noto-Sans-italic.eot');
src: url('../fonts/Noto-Sans-italic/Noto-Sans-italic.eot?#iefix') format('embedded-opentype'),
local('Noto Sans Italic'),
local('Noto-Sans-italic'),
url('../fonts/Noto-Sans-italic/Noto-Sans-italic.woff2') format('woff2'),
url('../fonts/Noto-Sans-italic/Noto-Sans-italic.woff') format('woff'),
url('../fonts/Noto-Sans-italic/Noto-Sans-italic.ttf') format('truetype'),
url('../fonts/Noto-Sans-italic/Noto-Sans-italic.svg#NotoSans') format('svg');
}
@font-face {
font-family: 'Noto Sans';
font-weight: 700;
font-style: italic;
src: url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.eot');
src: url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.eot?#iefix') format('embedded-opentype'),
local('Noto Sans Bold Italic'),
local('Noto-Sans-700italic'),
url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.woff2') format('woff2'),
url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.woff') format('woff'),
url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.ttf') format('truetype'),
url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.svg#NotoSans') format('svg');
}
body {
background-color: #fff;
padding:50px;
font: 14px/1.5 "Noto Sans", "Helvetica Neue", Helvetica, Arial, sans-serif;
color:#727272;
font-weight:400;
}
h1, h2, h3, h4, h5, h6 {
color:#222;
margin:0 0 20px;
}
p, ul, ol, table, pre, dl {
margin:0 0 20px;
}
h1, h2, h3 {
line-height:1.1;
}
h1 {
font-size:28px;
}
h2 {
color:#393939;
}
h3, h4, h5, h6 {
color:#494949;
}
a {
color:#39c;
text-decoration:none;
}
a:hover {
color:#069;
}
a small {
font-size:11px;
color:#777;
margin-top:-0.3em;
display:block;
}
a:hover small {
color:#777;
}
.wrapper {
width:860px;
margin:0 auto;
}
blockquote {
border-left:1px solid #e5e5e5;
margin:0;
padding:0 0 0 20px;
font-style:italic;
}
code, pre {
font-family:Monaco, Bitstream Vera Sans Mono, Lucida Console, Terminal, Consolas, Liberation Mono, DejaVu Sans Mono, Courier New, monospace;
color:#333;
font-size:12px;
}
pre {
padding:8px 15px;
background: #f8f8f8;
border-radius:5px;
border:1px solid #e5e5e5;
overflow-x: auto;
}
table {
width:100%;
border-collapse:collapse;
}
th, td {
text-align:left;
padding:5px 10px;
border-bottom:1px solid #e5e5e5;
}
dt {
color:#444;
font-weight:700;
}
th {
color:#444;
}
img {
max-width:100%;
}
header {
width:270px;
float:left;
position:fixed;
-webkit-font-smoothing:subpixel-antialiased;
}
header ul.box {
list-style:none;
height:40px;
padding:0;
background: #f4f4f4;
border-radius:5px;
border:1px solid #e0e0e0;
width:270px;
}
header li.box {
width:89px;
float:left;
border-right:1px solid #e0e0e0;
height:40px;
}
header li.box:first-child a {
border-radius:5px 0 0 5px;
}
header li.box:last-child a {
border-radius:0 5px 5px 0;
}
header ul.box a {
line-height:1;
font-size:11px;
color:#999;
display:block;
text-align:center;
padding-top:6px;
height:34px;
}
header ul.box a:hover {
color:#999;
}
header ul.box a:active {
background-color:#f0f0f0;
}
strong {
color:#222;
font-weight:700;
}
header ul.box li + li + li {
border-right:none;
width:89px;
}
header ul.box a strong {
font-size:14px;
display:block;
color:#222;
}
section {
width:500px;
float:right;
padding-bottom:50px;
}
.bold {
font-weight:bold;
}
small {
font-size:11px;
}
hr {
border:0;
background:#e5e5e5;
height:1px;
margin:0 0 20px;
}
footer {
width:270px;
float:left;
position:fixed;
bottom:50px;
-webkit-font-smoothing:subpixel-antialiased;
}
@media print, screen and (max-width: 960px) {
div.wrapper {
width:auto;
margin:0;
}
header, section, footer {
float:none;
position:static;
width:auto;
}
header {
padding-right:320px;
}
section {
border:1px solid #e5e5e5;
border-width:1px 0;
padding:20px 0;
margin:0 0 20px;
}
header a small {
display:inline;
}
header ul {
position:absolute;
right:50px;
top:52px;
}
}
@media print, screen and (max-width: 720px) {
body {
word-wrap:break-word;
}
header {
padding:0;
}
header ul, header p.view {
position:static;
}
pre, code {
word-wrap:normal;
}
}
@media print, screen and (max-width: 480px) {
body {
padding:15px;
}
header ul {
width:99%;
}
header li, header ul li + li + li {
width:33%;
}
}
@media print {
body {
padding:0.4in;
font-size:12pt;
color:#444;
}
}
/*
The MIT License (MIT)
Copyright (c) 2016 GitHub, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
.pl-c /* comment */ {
color: #969896;
}
.pl-c1 /* constant, variable.other.constant, support, meta.property-name, support.constant, support.variable, meta.module-reference, markup.raw, meta.diff.header */,
.pl-s .pl-v /* string variable */ {
color: #0086b3;
}
.pl-e /* entity */,
.pl-en /* entity.name */ {
color: #795da3;
}
.pl-smi /* variable.parameter.function, storage.modifier.package, storage.modifier.import, storage.type.java, variable.other */,
.pl-s .pl-s1 /* string source */ {
color: #333;
}
.pl-ent /* entity.name.tag */ {
color: #63a35c;
}
.pl-k /* keyword, storage, storage.type */ {
color: #a71d5d;
}
.pl-s /* string */,
.pl-pds /* punctuation.definition.string, string.regexp.character-class */,
.pl-s .pl-pse .pl-s1 /* string punctuation.section.embedded source */,
.pl-sr /* string.regexp */,
.pl-sr .pl-cce /* string.regexp constant.character.escape */,
.pl-sr .pl-sre /* string.regexp source.ruby.embedded */,
.pl-sr .pl-sra /* string.regexp string.regexp.arbitrary-repitition */ {
color: #183691;
}
.pl-v /* variable */ {
color: #ed6a43;
}
.pl-id /* invalid.deprecated */ {
color: #b52a1d;
}
.pl-ii /* invalid.illegal */ {
color: #f8f8f8;
background-color: #b52a1d;
}
.pl-sr .pl-cce /* string.regexp constant.character.escape */ {
font-weight: bold;
color: #63a35c;
}
.pl-ml /* markup.list */ {
color: #693a17;
}
.pl-mh /* markup.heading */,
.pl-mh .pl-en /* markup.heading entity.name */,
.pl-ms /* meta.separator */ {
font-weight: bold;
color: #1d3e81;
}
.pl-mq /* markup.quote */ {
color: #008080;
}
.pl-mi /* markup.italic */ {
font-style: italic;
color: #333;
}
.pl-mb /* markup.bold */ {
font-weight: bold;
color: #333;
}
.pl-md /* markup.deleted, meta.diff.header.from-file */ {
color: #bd2c00;
background-color: #ffecec;
}
.pl-mi1 /* markup.inserted, meta.diff.header.to-file */ {
color: #55a532;
background-color: #eaffea;
}
.pl-mdr /* meta.diff.range */ {
font-weight: bold;
color: #795da3;
}
.pl-mo /* meta.output */ {
color: #1d3e81;
}
.remove-all-styles {
all: revert;
}
================================================
FILE: docs/js/landing_page.js
================================================
var metas = document.getElementsByTagName('meta');
var i;
if (navigator.userAgent.match(/iPhone/i)) {
for (i=0; i{section_name}\n", "html.parser")
else:
if section.name == "a" and section.get("href") == "#toc":
continue
section_content.append(section)
if section_id:
html_structure[section_id] = {
"name": section_name,
"content": section_content,
}
section_order.append(section_id)
header = BeautifulSoup(features="html.parser")
header.append(header.new_tag("p", **{"class": "bold"}, string="Table of content"))
ul_tag = header.new_tag("ul")
header.append(ul_tag)
for section in section_order:
if section == "header" or "toc" in section:
continue
li_tag = header.new_tag("li")
a_tag = header.new_tag("a", href=f"{section}.html")
a_tag.string = html_structure[section]["name"]
li_tag.append(a_tag)
ul_tag.append(li_tag)
for section in html_structure:
if section == "header" or "toc" in section:
continue
output_filepath = os.path.join("docs", f"{section}.html")
page_content = html_structure[section]["content"]
soup = BeautifulSoup("", "html.parser")
html = soup.html
head = soup.new_tag("head")
html.append(head)
meta_charset = soup.new_tag("meta", charset="utf-8")
head.append(meta_charset)
meta_compat = soup.new_tag(
"meta", **{"http-equiv": "X-UA-Compatible", "content": "chrome=1"}
)
head.append(meta_compat)
title = soup.new_tag("title")
title.string = "Galaxy Docker Image by bgruening"
head.append(title)
link = soup.new_tag("link", rel="stylesheet", href="css/landing_page.css")
head.append(link)
body = soup.new_tag("body")
html.append(body)
wrapper = soup.new_tag("div", **{"class": "wrapper"})
body.append(wrapper)
header_tag = soup.new_tag("header")
wrapper.append(header_tag)
h1 = soup.new_tag("h1")
h1.string = "Galaxy Docker Image"
header_tag.append(h1)
p = soup.new_tag("p")
p.string = "Docker Images tracking the stable Galaxy releases"
header_tag.append(p)
header_tag.append(BeautifulSoup(str(header), "html.parser"))
p_view = soup.new_tag("p", **{"class": "view"})
a_view = soup.new_tag("a", href="https://github.com/bgruening/docker-galaxy")
a_view.string = "View the Project on GitHub "
small_view = soup.new_tag("small")
small_view.string = "bgruening/docker-galaxy"
a_view.append(small_view)
p_view.append(a_view)
header_tag.append(p_view)
ul_box = soup.new_tag("ul", **{"class": "box"})
li_zip = soup.new_tag("li", **{"class": "box"})
a_zip = soup.new_tag(
"a", href="https://github.com/bgruening/docker-galaxy/zipball/master"
)
a_zip.string = "Download "
strong_zip = soup.new_tag("strong")
strong_zip.string = "ZIP File"
a_zip.append(strong_zip)
li_zip.append(a_zip)
ul_box.append(li_zip)
li_tar = soup.new_tag("li", **{"class": "box"})
a_tar = soup.new_tag(
"a", href="https://github.com/bgruening/docker-galaxy/tarball/master"
)
a_tar.string = "Download "
strong_tar = soup.new_tag("strong")
strong_tar.string = "TAR Ball"
a_tar.append(strong_tar)
li_tar.append(a_tar)
ul_box.append(li_tar)
li_github = soup.new_tag("li", **{"class": "box"})
a_github = soup.new_tag("a", href="https://github.com/bgruening/docker-galaxy")
a_github.string = "View On "
strong_github = soup.new_tag("strong")
strong_github.string = "GitHub"
a_github.append(strong_github)
li_github.append(a_github)
ul_box.append(li_github)
header_tag.append(ul_box)
section = soup.new_tag("section")
section.append(page_content)
wrapper.append(section)
footer = soup.new_tag("footer")
p1 = soup.new_tag("p")
p1.append(
BeautifulSoup(
'This project is maintained by bgruening',
"html.parser",
)
)
footer.append(p1)
p2 = soup.new_tag("p")
p2.append(
BeautifulSoup(
'Hosted on GitHub Pages — Theme by orderedlist',
"html.parser",
)
)
footer.append(p2)
wrapper.append(footer)
script = soup.new_tag("script", src="js/landing_page.js")
wrapper.append(script)
with open(output_filepath, "w") as output_file:
output_file.write(soup.prettify())
if __name__ == "__main__":
with open("README.md", "r") as f:
doc = f.read()
html_content = pycmarkgfm.gfm_to_html(doc, options=pycmarkgfm.options.unsafe)
extract_html_structure(html_content)
================================================
FILE: docs/src/requirements.txt
================================================
pycmarkgfm
beautifulsoup4
================================================
FILE: galaxy/Dockerfile
================================================
# Galaxy - Stable
#
# VERSION Galaxy in Docker
# TODO
#
# * README: only Docker next to Docker is supported
# * NodeJS is getting globally installed via the playbook, this is not needed anymore isn't it?
# * the playbooks are not cleaning anything up
#
FROM buildpack-deps:24.04 AS build_base
ENV GALAXY_ROOT_DIR=/galaxy \
GALAXY_VIRTUAL_ENV=/galaxy_venv \
GALAXY_HOME=/home/galaxy \
GALAXY_CONDA_PREFIX=/tool_deps/_conda \
MINIFORGE_VERSION=25.11.0-1
# Install miniforge and then virtualenv from conda
FROM build_base AS build_miniforge
ADD ./bashrc $GALAXY_HOME/.bashrc
RUN curl -s -L https://github.com/conda-forge/miniforge/releases/download/$MINIFORGE_VERSION/Miniforge3-$MINIFORGE_VERSION-Linux-x86_64.sh > ~/miniforge.sh \
&& /bin/bash ~/miniforge.sh -b -p $GALAXY_CONDA_PREFIX/ \
&& rm ~/miniforge.sh \
&& ln -s $GALAXY_CONDA_PREFIX/etc/profile.d/conda.sh /etc/profile.d/conda.sh \
&& echo ". $GALAXY_CONDA_PREFIX/etc/profile.d/conda.sh" >> $GALAXY_HOME/.bashrc \
&& echo "conda activate base" >> $GALAXY_HOME/.bashrc \
&& export PATH=$GALAXY_CONDA_PREFIX/bin/:$PATH \
&& conda config --add channels bioconda \
&& conda install -y virtualenv pip ephemeris "galaxy-tool-util>=24.1" \
&& conda clean --packages -t -i \
&& cp -r ~/.conda $GALAXY_HOME && cp ~/.condarc $GALAXY_HOME \
&& find $GALAXY_CONDA_PREFIX -name '*.pyc' -delete
FROM build_base AS build_galaxy
ARG GALAXY_RELEASE=release_26.0
ARG GALAXY_REPO=https://github.com/galaxyproject/galaxy
ENV NODE_OPTIONS=--max-old-space-size=4096 \
UV_INSTALL_DIR=/usr/local/bin \
GALAXY_WHEELS_INDEX_URL="https://wheels.galaxyproject.org/simple"
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
COPY --from=build_miniforge /tool_deps /tool_deps
RUN --mount=type=cache,target=/root/.cache/uv \
--mount=type=cache,target=/root/.npm \
--mount=type=cache,target=/root/.cache/yarn \
mkdir $GALAXY_ROOT_DIR $GALAXY_VIRTUAL_ENV \
# download latest stable release of Galaxy.
&& curl -L -s $GALAXY_REPO/archive/$GALAXY_RELEASE.tar.gz | tar xzf - --strip-components=1 -C $GALAXY_ROOT_DIR \
&& uv venv --seed $GALAXY_VIRTUAL_ENV \
# Install galaxy client
&& cd $GALAXY_ROOT_DIR && NPM_CONFIG_CACHE=/root/.npm YARN_CACHE_FOLDER=/root/.cache/yarn ./scripts/common_startup.sh \
&& uv pip install --python $GALAXY_VIRTUAL_ENV/bin/python "weasyprint>=61.2" watchdog \
--index-strategy unsafe-best-match --extra-index-url ${GALAXY_WHEELS_INDEX_URL} \
# cleanup
&& find config \( -name 'node_modules' -o -name '.cache' -o -name '.parcel-cache' \) -type d -prune -exec rm -rf '{}' + \
&& find $GALAXY_ROOT_DIR -name '*.pyc' -delete && find $GALAXY_VIRTUAL_ENV -name '*.pyc' -delete \
&& rm -rf $GALAXY_ROOT_DIR/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/
# This is need for gridengine to work with galaxy
# https://github.com/galaxyproject/galaxy/issues/10425
RUN cd / \
&& curl -L -o jemalloc-5.3.0.tar.gz https://github.com/jemalloc/jemalloc/archive/5.3.0.tar.gz \
&& tar -xvzf jemalloc-5.3.0.tar.gz \
&& cd jemalloc-5.3.0 \
&& ./autogen.sh && ./configure --disable-initial-exec-tls \
&& make -j 4 && make install
# TEMPORARY SLURM-DRMAA SOURCE BUILD
# - Slurm 24.11 is required for Ubuntu 24.04 in this image, but the natefoo
# slurm-drmaa PPA only ships binaries built against Ubuntu's Slurm 23.11.
# - That mismatch breaks DRMAA at runtime (libslurm ABI/plugin version errors).
# - Until a 24.11-compatible slurm-drmaa package exists, we build it here from
# source in a dedicated stage and only copy the runtime library into the
# final image.
FROM build_base AS build_slurm_drmaa
ARG SLURM_DRMAA_VERSION=1.1.5
RUN apt-get -qq update \
&& apt-get install -y --no-install-recommends \
ca-certificates \
git \
autoconf \
automake \
bison \
flex \
gperf \
ragel \
libtool \
pkg-config \
software-properties-common \
dirmngr \
gpg \
gpg-agent \
&& add-apt-repository ppa:ubuntu-hpc/slurm-wlm-24.11 \
&& apt-get -qq update \
&& apt-get install -y --no-install-recommends libslurm-dev slurm-wlm \
&& git clone --branch "$SLURM_DRMAA_VERSION" --depth 1 --recurse-submodules --shallow-submodules \
https://github.com/natefoo/slurm-drmaa.git /tmp/slurm-drmaa \
&& cd /tmp/slurm-drmaa \
&& ./autogen.sh \
&& ./configure --prefix=/usr/local \
&& make -j"$(nproc)" \
&& make install \
&& mkdir -p /out \
&& cp -a /usr/local/lib/libdrmaa.so* /out/ \
&& rm -rf /var/lib/apt/lists/* /tmp/slurm-drmaa
FROM ubuntu:24.04 AS galaxy_cluster_base
ENV GALAXY_ROOT_DIR=/galaxy \
GALAXY_VIRTUAL_ENV=/galaxy_venv \
GALAXY_LOGS_DIR=/home/galaxy/logs \
GALAXY_CONFIG_DIR=/etc/galaxy \
GALAXY_USER=galaxy \
GALAXY_UID=1450 \
GALAXY_GID=1450 \
GALAXY_HOME=/home/galaxy \
GALAXY_CONDA_PREFIX=/tool_deps/_conda \
EXPORT_DIR=/export \
DEBIAN_FRONTEND=noninteractive \
PG_VERSION=15
ENV GALAXY_CONFIG_FILE=$GALAXY_CONFIG_DIR/galaxy.yml \
GALAXY_CONFIG_JOB_CONFIG_FILE=$GALAXY_CONFIG_DIR/job_conf.xml \
GALAXY_CONFIG_JOB_METRICS_CONFIG_FILE=$GALAXY_CONFIG_DIR/job_metrics_conf.yml \
GALAXY_CONFIG_TUS_UPLOAD_STORE=/tmp/tus_upload_store \
GALAXY_CONFIG_INTERACTIVETOOLS_MAP=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/database/interactivetools_map.sqlite \
GRAVITY_CONFIG_FILE=$GALAXY_CONFIG_DIR/gravity.yml \
GALAXY_POSTGRES_UID=1550 \
GALAXY_POSTGRES_GID=1550 \
# Define the default postgresql database path
PG_DATA_DIR_DEFAULT=/var/lib/postgresql/$PG_VERSION/main/ \
PG_CONF_DIR_DEFAULT=/etc/postgresql/$PG_VERSION/main/ \
PG_DATA_DIR_HOST=$EXPORT_DIR/postgresql/$PG_VERSION/main/
ENV UV_INSTALL_DIR=/usr/local/bin
ADD ./common_cleanup.sh /usr/bin/common_cleanup.sh
RUN echo "force-unsafe-io" > /etc/dpkg/dpkg.cfg.d/02apt-speedup \
&& echo "Acquire::http {No-Cache=True;};" > /etc/apt/apt.conf.d/no-cache \
&& echo 'APT::Install-Recommends "0";' > /etc/apt/apt.conf.d/99no-install-recommends \
&& apt-get -qq update && apt-get install -y locales curl \
&& locale-gen en_US.UTF-8 && dpkg-reconfigure locales \
&& apt-get autoremove -y && apt-get clean \
&& chmod 755 /usr/bin/common_cleanup.sh \
&& /usr/bin/common_cleanup.sh
ADD ansible/ /ansible/
# Install ansible and other dependencies
RUN apt-get -qq update \
&& apt install -y software-properties-common dirmngr gpg gpg-agent \
&& add-apt-repository ppa:ansible/ansible \
# Use the Ubuntu HPC PPA for Slurm 24.11 so libslurm and plugins stay in sync on 24.04.
&& add-apt-repository ppa:ubuntu-hpc/slurm-wlm-24.11 \
&& apt-get -qq update \
&& apt install -y sudo ca-certificates nano git gridengine-common gridengine-drmaa1.0 libswitch-perl nodejs npm singularity-container \
&& apt install -y ansible slurm-wlm libslurm42t64 \
# Make python3 standard
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 10 \
&& apt purge -y software-properties-common systemd && apt-get autoremove -y && apt-get clean \
# Install ansible roles
&& ansible-galaxy install -r /ansible/requirements.yml -p /ansible/roles \
&& npm install -g @galaxyproject/gx-it-proxy@latest \
&& apt-get purge -y npm \
&& apt-get autoremove -y \
&& groupadd -r $GALAXY_USER -g $GALAXY_GID \
&& useradd -u $GALAXY_UID -r -g $GALAXY_USER -d $GALAXY_HOME -m -c "Galaxy user" --shell /bin/bash $GALAXY_USER \
# Create the postgres user before apt-get does (with the configured UID/GID) to facilitate sharing $EXPORT_DIR/postgresql with non-Linux hosts
&& groupadd -r postgres -g $GALAXY_POSTGRES_GID \
&& adduser --system --quiet --home /var/lib/postgresql --no-create-home --shell /bin/bash --gecos "" --uid $GALAXY_POSTGRES_UID --gid $GALAXY_POSTGRES_GID postgres \
&& mkdir -p $GALAXY_ROOT_DIR $GALAXY_VIRTUAL_ENV $GALAXY_CONFIG_DIR $GALAXY_CONFIG_DIR/web $GALAXY_LOGS_DIR $EXPORT_DIR $EXPORT_DIR/container_cache/singularity/mulled \
&& chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_ROOT_DIR $GALAXY_VIRTUAL_ENV $GALAXY_CONFIG_DIR $GALAXY_LOGS_DIR $EXPORT_DIR \
&& /usr/bin/common_cleanup.sh
COPY --from=build_slurm_drmaa /out/ /usr/lib/slurm-drmaa/lib/
RUN ln -sf /usr/lib/slurm-drmaa/lib/libdrmaa.so.1 /usr/lib/slurm-drmaa/lib/libdrmaa.so \
&& ldconfig
COPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_miniforge /tool_deps /tool_deps
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
# Install necessary components and dependencies for running Galaxy
RUN --mount=type=cache,target=/root/.cache/uv \
uv venv --seed $GALAXY_VIRTUAL_ENV \
&& chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_VIRTUAL_ENV \
&& ansible-playbook /ansible/provision.yml \
--extra-vars galaxy_server_dir=$GALAXY_ROOT_DIR \
--extra-vars galaxy_venv_dir=$GALAXY_VIRTUAL_ENV \
--extra-vars galaxy_logs_dir=$GALAXY_LOGS_DIR \
--extra-vars galaxy_user_name=$GALAXY_USER \
--extra-vars galaxy_config_file=$GALAXY_CONFIG_FILE \
--extra-vars galaxy_config_dir=$GALAXY_CONFIG_DIR \
--extra-vars gravity_config_file=$GRAVITY_CONFIG_FILE \
--extra-vars galaxy_job_conf_path=$GALAXY_CONFIG_JOB_CONFIG_FILE \
--extra-vars galaxy_job_metrics_conf_path=$GALAXY_CONFIG_JOB_METRICS_CONFIG_FILE \
--extra-vars postgresql_version=$PG_VERSION \
--extra-vars supervisor_postgres_config_path=$PG_CONF_DIR_DEFAULT/postgresql.conf \
--extra-vars redis_venv_dir=$GALAXY_VIRTUAL_ENV \
--extra-vars redis_venv_user=$GALAXY_USER \
--extra-vars galaxy_user_name=$GALAXY_USER \
--extra-vars proftpd_sql_db=galaxy@galaxy \
--extra-vars proftpd_sql_user=$GALAXY_USER \
--extra-vars proftpd_sql_password=$GALAXY_USER \
--extra-vars galaxy_ftp_upload_dir=$EXPORT_DIR/ftp \
--extra-vars tus_upload_store_path=$GALAXY_CONFIG_TUS_UPLOAD_STORE \
--extra-vars gx_it_proxy_sessions_path=$GALAXY_CONFIG_INTERACTIVETOOLS_MAP \
# Install flower separately as systemd tasks (tagged with 'service') have to be skipped
&& PATH=$GALAXY_CONDA_PREFIX/bin/:$PATH ansible-playbook /ansible/flower.yml --skip-tags service \
--extra-vars flower_venv_dir=$GALAXY_VIRTUAL_ENV \
--extra-vars flower_db_file=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/database/flower.db \
--extra-vars flower_user=$GALAXY_USER \
--extra-vars flower_group=$GALAXY_USER \
--extra-vars flower_venv_user=$GALAXY_USER \
--extra-vars flower_venv_group=$GALAXY_USER \
&& chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_VIRTUAL_ENV \
&& apt purge -y software-properties-common dirmngr gpg gpg-agent && apt-get autoremove -y && apt-get clean \
&& /usr/bin/common_cleanup.sh
FROM galaxy_cluster_base AS final
LABEL maintainer="Björn A. Grüning "
ENV GALAXY_CONFIG_MANAGED_CONFIG_DIR=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/database/config \
GALAXY_CONFIG_TOOL_CONFIG_FILE=$GALAXY_CONFIG_DIR/tool_conf.xml \
GALAXY_CONFIG_TOOL_DATA_TABLE_CONFIG_PATH=$GALAXY_CONFIG_DIR/tool_data_table_conf.xml \
GALAXY_CONFIG_WATCH_TOOL_DATA_DIR=True \
GALAXY_CONFIG_CONTAINER_RESOLVERS_CONFIG_FILE=$GALAXY_CONFIG_DIR/container_resolvers_conf.yml \
GALAXY_CONFIG_TOOL_DEPENDENCY_DIR=$EXPORT_DIR/tool_deps \
GALAXY_CONFIG_TOOL_PATH=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/tools \
GALAXY_DEFAULT_ADMIN_USER=admin \
GALAXY_DEFAULT_ADMIN_EMAIL=admin@example.org \
GALAXY_DEFAULT_ADMIN_PASSWORD=password \
GALAXY_DEFAULT_ADMIN_KEY=fakekey \
GALAXY_DESTINATIONS_DEFAULT=slurm_cluster \
GALAXY_RUNNERS_ENABLE_SLURM=True \
GALAXY_RUNNERS_ENABLE_CONDOR=False \
GALAXY_CONFIG_DATABASE_CONNECTION=postgresql://galaxy:galaxy@localhost:5432/galaxy?client_encoding=utf8 \
GALAXY_CONFIG_ADMIN_USERS=admin@example.org \
GALAXY_CONFIG_BOOTSTRAP_ADMIN_API_KEY=HSNiugRFvgT574F43jZ7N9F3 \
GALAXY_CONFIG_BRAND="Galaxy Docker Build" \
GALAXY_CONFIG_STATIC_ENABLED=False \
GALAXY_CONFIG_FILE_SOURCE_TEMPPLATES=$GALAXY_CONFIG_DIR/file_source_templates.yml \
GALAXY_CONFIG_VAULT_CONFIG_FILE=$GALAXY_CONFIG_DIR/vault_conf.yml \
GALAXY_INTERACTIVE_TOOLS_CONFIG_FILE=$GALAXY_CONFIG_DIR/tool_conf_interactive.xml \
# The following ENV var can be used to set the number of gunicorn workers
GUNICORN_WORKERS=2 \
# The following ENV var can be used to set the number of celery workers
CELERY_WORKERS=2 \
# Set HTTPS to use a self-signed certificate (or your own certificate in $EXPORT_DIR/{server.key,server.crt})
USE_HTTPS=False \
# Set USE_HTTPS_LENSENCRYPT and GALAXY_DOMAIN to a domain that is reachable to get a letsencrypt certificate
USE_HTTPS_LETSENCRYPT=False \
GALAXY_DOMAIN=localhost \
# Set the number of Galaxy handlers
GALAXY_HANDLER_NUMPROCS=2 \
# Setting a standard encoding. This can get important for things like the unix sort tool.
LC_ALL=en_US.UTF-8 \
LANG=en_US.UTF-8
COPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_galaxy $GALAXY_ROOT_DIR $GALAXY_ROOT_DIR
COPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_galaxy $GALAXY_VIRTUAL_ENV $GALAXY_VIRTUAL_ENV
COPY --chown=root:root --from=build_galaxy /usr/local/lib/libjemalloc.so.2 /usr/local/lib/libjemalloc.so.2
COPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_miniforge $GALAXY_HOME $GALAXY_HOME
COPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_miniforge /etc/profile.d/conda.sh /etc/profile.d/conda.sh
ADD --chown=$GALAXY_USER:$GALAXY_USER ./sample_tool_list.yaml $GALAXY_HOME/ephemeris/sample_tool_list.yaml
# Activate Interactive Tools during runtime
ADD --chown=$GALAXY_USER:$GALAXY_USER ./tool_conf_interactive.xml.sample $GALAXY_INTERACTIVE_TOOLS_CONFIG_FILE
RUN mkdir -p $GALAXY_CONFIG_TUS_UPLOAD_STORE \
&& ln -s /tool_deps/ $GALAXY_CONFIG_TOOL_DEPENDENCY_DIR \
&& chown $GALAXY_USER:$GALAXY_USER $GALAXY_CONFIG_TOOL_DEPENDENCY_DIR $GALAXY_CONFIG_TUS_UPLOAD_STORE \
# Configure Galaxy to use the Tool Shed
&& cp $GALAXY_HOME/.bashrc ~/ \
&& su $GALAXY_USER -c "cp $GALAXY_ROOT_DIR/config/galaxy.yml.sample $GALAXY_CONFIG_FILE" \
&& su $GALAXY_USER -c "cp $GALAXY_ROOT_DIR/config/tool_conf.xml.sample $GALAXY_CONFIG_TOOL_CONFIG_FILE" \
&& ansible-playbook /ansible/galaxy_job_conf.yml \
--extra-vars galaxy_server_dir=$GALAXY_ROOT_DIR \
--extra-vars galaxy_config_dir=$GALAXY_CONFIG_DIR \
--extra-vars galaxy_config_file=$GALAXY_CONFIG_FILE \
--extra-vars galaxy_job_conf_path=$GALAXY_CONFIG_JOB_CONFIG_FILE \
--extra-vars galaxy_container_resolvers_conf_path=$GALAXY_CONFIG_CONTAINER_RESOLVERS_CONFIG_FILE \
--extra-vars galaxy_user_name=$GALAXY_USER \
&& curl -o $GALAXY_CONFIG_TOOL_DATA_TABLE_CONFIG_PATH \
-L https://raw.githubusercontent.com/galaxyproject/usegalaxy-playbook/8adb1f82c94fe95b09df2a2816440ce2420b7d39/env/main/files/galaxy/config/tool_data_table_conf.xml \
&& chown $GALAXY_USER:$GALAXY_USER $GALAXY_CONFIG_TOOL_DATA_TABLE_CONFIG_PATH \
# Ensure Galaxy uses the jemalloc we built (gridengine compatibility: #10425).
&& mv /usr/lib/x86_64-linux-gnu/libjemalloc.so.2 /usr/lib/x86_64-linux-gnu/libjemalloc.so.2.orig \
&& ln -s /usr/local/lib/libjemalloc.so.2 /usr/lib/x86_64-linux-gnu/libjemalloc.so.2
# Install optional Galaxy dependencies for the default config during build.
RUN --mount=type=cache,target=/root/.cache/uv \
optional_deps_file="$(mktemp)" \
&& PYTHONPATH=$GALAXY_ROOT_DIR/lib $GALAXY_VIRTUAL_ENV/bin/python \
-c "from galaxy.dependencies import optional; print('\n'.join(optional('/etc/galaxy/galaxy.yml')))" \
> "$optional_deps_file" \
&& if [ -s "$optional_deps_file" ]; then \
/usr/local/bin/uv pip install \
--python "$GALAXY_VIRTUAL_ENV/bin/python" \
--index-strategy unsafe-best-match \
--extra-index-url "${GALAXY_WHEELS_INDEX_URL}" \
-r "$optional_deps_file"; \
fi \
&& rm -f "$optional_deps_file" \
###### This is needed because of a setuptools problem, remove in 26.1
&& cd "$GALAXY_ROOT_DIR" \
&& ./scripts/common_startup.sh --skip-client-build \
###############################
&& chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_VIRTUAL_ENV
# Include all needed scripts from the host
ADD ./setup_postgresql.py /usr/local/bin/setup_postgresql.py
# Configure PostgreSQL
# 1. Remove all old configuration
# 2. Create DB-user 'galaxy' with password 'galaxy' in database 'galaxy'
# 3. Create Galaxy Admin User 'admin@example.org' with password 'admin' and API key 'admin'
RUN cd / \
&& rm $PG_DATA_DIR_DEFAULT -rf \
&& python /usr/local/bin/setup_postgresql.py --dbuser galaxy --dbpassword galaxy --db-name galaxy --dbpath $PG_DATA_DIR_DEFAULT --dbversion $PG_VERSION \
&& service postgresql start \
&& service postgresql stop
WORKDIR $GALAXY_ROOT_DIR
# Updating genome informations from UCSC
# RUN su $GALAXY_USER -c "export GALAXY=$GALAXY_ROOT_DIR && sh ./cron/updateucsc.sh.sample"
ENV GALAXY_CONFIG_JOB_WORKING_DIRECTORY=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/database/job_working_directory \
GALAXY_CONFIG_FILE_PATH=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/database/files \
GALAXY_CONFIG_NEW_FILE_PATH=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/database/tmp \
GALAXY_CONFIG_TEMPLATE_CACHE_PATH=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/database/compiled_templates \
GALAXY_CONFIG_CITATION_CACHE_DATA_DIR=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/database/citations/data \
GALAXY_CONFIG_FTP_UPLOAD_DIR=$EXPORT_DIR/ftp \
GALAXY_CONFIG_FTP_UPLOAD_SITE=example.org \
GALAXY_CONFIG_USE_PBKDF2=True \
GALAXY_CONFIG_NGINX_X_ACCEL_REDIRECT_BASE=/_x_accel_redirect \
GALAXY_CONFIG_DYNAMIC_PROXY_MANAGE=False \
GALAXY_CONFIG_VISUALIZATION_PLUGINS_DIRECTORY=config/plugins/visualizations \
GALAXY_CONFIG_TRUST_JUPYTER_NOTEBOOK_CONVERSION=True \
GALAXY_CONFIG_SANITIZE_ALL_HTML=False \
GALAXY_CONFIG_WELCOME_URL=$GALAXY_CONFIG_DIR/web/welcome.html \
GALAXY_CONFIG_OVERRIDE_DEBUG=False \
GALAXY_CONFIG_ENABLE_QUOTAS=True \
GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=http://$GALAXY_DOMAIN \
GALAXY_CONFIG_OUTPUTS_TO_WORKING_DIRECTORY=True \
GALAXY_CONDA_PREFIX=$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR/_conda \
DRMAA_LIBRARY_PATH=/usr/lib/slurm-drmaa/lib/libdrmaa.so
# Container Style
ADD --chown=$GALAXY_USER:$GALAXY_USER GalaxyDocker.png $GALAXY_CONFIG_DIR/web/welcome_image.png
ADD --chown=$GALAXY_USER:$GALAXY_USER welcome.html $GALAXY_CONFIG_DIR/web/welcome.html
# Activate additional Tool Sheds
# Activate the Test Tool Shed during runtime, useful for testing repositories.
ADD --chown=$GALAXY_USER:$GALAXY_USER ./tool_sheds_conf.xml $GALAXY_HOME/tool_sheds_conf.xml
# Script that enables easier downstream installation of tools (e.g. for different Galaxy Docker flavours)
ADD install_tools_wrapper.sh /usr/bin/install-tools
RUN chmod +x /usr/bin/install-tools && \
cd /usr/bin/ && curl https://git.embl.de/grp-gbcs/galaxy-dir-sync/raw/master/src/galaxy-dir-sync.py > galaxy-dir-sync.py && \
chmod +x galaxy-dir-sync.py
# use https://github.com/krallin/tini/ as tiny but valid init and PID 1
ADD https://github.com/krallin/tini/releases/download/v0.18.0/tini /sbin/tini
ADD --chown=$GALAXY_USER:$GALAXY_USER ./run.sh $GALAXY_ROOT_DIR/run.sh
RUN chmod +x /sbin/tini \
&& chmod 755 ./run.sh $GALAXY_ROOT_DIR/run.sh
# This needs to happen here and not above, otherwise the Galaxy start
# (without running the startup.sh script) will crash because integrated_tool_panel.xml could not be found.
ENV GALAXY_CONFIG_INTEGRATED_TOOL_PANEL_CONFIG=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/integrated_tool_panel.xml
# Expose port 80, 443 (webserver), 21 (FTP server), 4002 (Proxy), 9002 (supvisord web app)
EXPOSE 21
EXPOSE 80
EXPOSE 443
EXPOSE 4002
EXPOSE 9002
# Mark folders as imported from the host.
VOLUME ["/export/", "/data/", "/var/lib/docker"]
ADD startup.sh /usr/bin/startup
ADD startup2.sh /usr/bin/startup2
ENV SUPERVISOR_POSTGRES_AUTOSTART=False \
SUPERVISOR_MANAGE_POSTGRES=True \
SUPERVISOR_MANAGE_CRON=True \
SUPERVISOR_MANAGE_PROFTP=True \
SUPERVISOR_MANAGE_CONDOR=True \
SUPERVISOR_MANAGE_SLURM= \
SUPERVISOR_MANAGE_RABBITMQ=True \
SUPERVISOR_MANAGE_REDIS=True \
SUPERVISOR_MANAGE_FLOWER=True \
GRAVITY_MANAGE_CELERY=True \
GRAVITY_MANAGE_GX_IT_PROXY=True \
GRAVITY_MANAGE_TUSD=True \
HOST_DOCKER_LEGACY= \
STARTUP_EXPORT_USER_FILES=True \
LOAD_GALAXY_CONDITIONAL_DEPENDENCIES=True
ENTRYPOINT ["/sbin/tini", "--"]
# Autostart script that is invoked during container start
CMD ["/usr/bin/startup"]
================================================
FILE: galaxy/ansible/condor.yml
================================================
- hosts: localhost
connection: local
remote_user: root
vars:
htcondor_version: 25.x
htcondor_keyring_path: /etc/apt/keyrings/htcondor.asc
htcondor_repo_list_path: /etc/apt/sources.list.d/htcondor.list
htcondor_role_submit: true
htcondor_password: changeme
htcondor_domain: '{{ galaxy_user_name }}'
htcondor_server: localhost
htcondor_firewall_condor: false
htcondor_firewall_nfs: false
pre_tasks:
# This pre-task addresses the systemd service that is installed by the role,
# which cannot function inside the container.
# Therefore, we use an sysvinit script to manage HTCondor during the playbook execution.
# The init script will be removed later in the post-tasks, as we will use
# supervisor to manage HTCondor.
- name: Create HTCondor init script
copy:
dest: /etc/init.d/condor
mode: '0755'
content: |
#!/bin/sh
HTCONDOR_DIR="/usr/sbin"
case "$1" in
start)
echo "Starting HTCondor..."
$HTCONDOR_DIR/condor_master
;;
stop)
echo "Stopping HTCondor..."
killall -r '.*condor.*'
;;
restart)
echo "Restarting HTCondor..."
$0 stop
$0 start
;;
esac
exit 0
- name: Register HTCondor init script
command: update-rc.d condor defaults
tasks:
- name: Install HTCondor repository prerequisites
apt:
name:
- curl
- gnupg
- apt-transport-https
state: present
update_cache: true
- name: Ensure APT keyring directory exists
file:
path: /etc/apt/keyrings
state: directory
mode: "0755"
- name: Add HTCondor signing key
get_url:
url: "https://htcss-downloads.chtc.wisc.edu/repo/keys/HTCondor-{{ htcondor_version }}-Key"
dest: "{{ htcondor_keyring_path }}"
mode: "0644"
- name: Configure HTCondor apt repository
copy:
dest: "{{ htcondor_repo_list_path }}"
content: |
# HTCondor repository for the {{ htcondor_version }} feature versions
deb [signed-by={{ htcondor_keyring_path }}] https://htcss-downloads.chtc.wisc.edu/repo/ubuntu/{{ htcondor_version }} {{ ansible_distribution_release }} main
deb-src [signed-by={{ htcondor_keyring_path }}] https://htcss-downloads.chtc.wisc.edu/repo/ubuntu/{{ htcondor_version }} {{ ansible_distribution_release }} main
- name: Install HTCondor
apt:
name: condor
state: present
update_cache: true
- name: Create log files for HTCondor
file:
path: "/var/log/condor/{{ item }}"
state: touch
owner: condor
loop:
- StartLog
- StarterLog
- CollectorLog
- NegotiatorLog
- name: Configure HTCondor
lineinfile:
path: /etc/condor/condor_config.local
create: yes
line: "{{ item }}"
loop:
- 'DISCARD_SESSION_KEYRING_ON_STARTUP=False'
- 'TRUST_UID_DOMAIN=true'
# Remove the init script and systemd
post_tasks:
- name: Stop HTCondor service
command: /etc/init.d/condor stop
- name: Remove HTCondor init script
file:
path: /etc/init.d/condor
state: absent
- name: Remove HTCondor init script registration
command: update-rc.d -f condor remove
- name: Purge systemd and perform cleanup
shell: apt purge -y systemd && apt-get autoremove -y && apt-get clean
================================================
FILE: galaxy/ansible/cvmfs_client.yml
================================================
# Setup of the CernVM-File system (CVMFS) and configure so that the reference
# data hosted by Galaxy on usegalaxy.org is available to the remote target.
- hosts: localhost
connection: local
remote_user: root
tasks:
- name: Install CernVM apt key
apt_key:
url: https://cvmrepo.web.cern.ch/cvmrepo/apt/cernvm.gpg
# Install & setup CermVM-FS
- name: Configure CernVM apt repository
apt_repository:
filename: "cernvm.list"
mode: 422
repo: deb [allow-insecure=true] https://cvmrepo.web.cern.ch/cvmrepo/apt/ {{ ansible_distribution_release }}-prod main
- name: Install CernVM-FS client (apt)
apt:
name: ['cvmfs', 'cvmfs-config', 'autofs']
state: "{{ galaxy_apt_package_state }}"
update_cache: yes
- name: Make CernVM-FS key directories
file:
state: directory
path: "{{ item }}"
owner: "root"
group: "root"
mode: "0755"
loop: "{{ cvmfs_keys | map(attribute='path') | map('dirname') | unique }}"
- name: Install CernVM-FS keys
copy:
content: "{{ item.key }}"
dest: "{{ item.path }}"
owner: "root"
group: "root"
mode: "0444"
with_items: "{{ cvmfs_keys }}"
- name: Perform AutoFS and FUSE configuration for CernVM-FS
command: cvmfs_config setup
- name: Configure CernVM-FS config repository
block:
- name: Create config repo config
copy:
content: |
CVMFS_SERVER_URL="{{ cvmfs_config_repo.urls | join(';') }}"
CVMFS_PUBLIC_KEY="{{ cvmfs_config_repo.key.path }}"
dest: "/etc/cvmfs/config.d/{{ cvmfs_config_repo.repository.repository }}.conf"
owner: "root"
group: "root"
mode: "0444"
- name: Set config repo defaults
copy:
content: |
CVMFS_CONFIG_REPOSITORY="{{ cvmfs_config_repo.repository.repository }}"
CVMFS_DEFAULT_DOMAIN="{{ cvmfs_config_repo.domain }}"
CVMFS_USE_GEOAPI="{{ cvmfs_config_repo.use_geoapi | default('yes') }}"
dest: "/etc/cvmfs/default.d/80-galaxyproject-cvmfs.conf"
owner: "root"
group: "root"
mode: "0444"
- name: Configure CernVM-FS global client settings
copy:
content: |
CVMFS_HTTP_PROXY="{{ cvmfs_http_proxies | default(['DIRECT']) | join(';') }}"
CVMFS_QUOTA_LIMIT="{{ cvmfs_quota_limit | default('4000') }}"
CVMFS_CACHE_BASE="{{ cvmfs_cache_base | default('/var/lib/cvmfs') }}"
CVMFS_USE_GEOAPI="{{ cvmfs_use_geoapi | default('yes') }}"
dest: "/etc/cvmfs/default.local"
owner: "root"
group: "root"
mode: "0644"
================================================
FILE: galaxy/ansible/docker.yml
================================================
- hosts: localhost
connection: local
remote_user: root
vars:
docker_install_compose: false
docker_install_compose_plugin: false
docker_users:
- "{{ galaxy_user_name }}"
docker_service_manage: false
roles:
- role: geerlingguy.docker
tasks:
- name: Purge systemd and perform cleanup
shell: apt purge -y systemd && apt-get autoremove -y && apt-get clean
================================================
FILE: galaxy/ansible/files/413.html
================================================
Request Too Large! | 413 - Payload Too Large
Request Too Large Error 413
The request payload you tried to send is too large.
Please try again with a smaller request.
================================================
FILE: galaxy/ansible/files/500.html
================================================
Galaxy is down! | 500 - Webservice currently unavailable
Webservice currently unavailable Error 500
An unexpected condition was encountered.
================================================
FILE: galaxy/ansible/files/502.html
================================================
Galaxy is down! | 502 - Webservice currently unavailable
Webservice currently unavailable Error 502
Most likely, the Galaxy is booting.
Please don't panic. Relax, get some tea or ice-cream (depending on the season), and try again few seconds later.
================================================
FILE: galaxy/ansible/files/nginx_sample.crt
================================================
-----BEGIN CERTIFICATE-----
MIIE7TCCAtWgAwIBAgIUHBIplAOVmxyIRH51KvXuSWydCj8wDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTI0MTEwNjIzMDU1NVoXDTM0MTEw
NDIzMDU1NVowFDESMBAGA1UEAwwJbG9jYWxob3N0MIICIjANBgkqhkiG9w0BAQEF
AAOCAg8AMIICCgKCAgEA1kSpfexOnDQvNDwSg/4Cjv13+41VF2RgJdpk0n1iBz92
GKEl7SEh+nhUFinn+CKv2EaNQ7Nv5/+BNoPbBvS8Gm7ZtGt+cFXqRy4ka5It68sq
bwZadmAGwksJbvcizs5D6XS3BPIB2FrxvBbhzOj+oDYxC3HItIgYwV0+Gv/GBDCi
F4+b9dO//gfR1ywqsvGczaaMBkbhuOZ2WZph9nFEcdHgNEzLn/HJsYZv0crrFjCL
o7+FWsYIdM1wNP2bkpPzRFpB1ujxfl9xxH4pTc06sHIKivnoVMs5VvsqdWtosJi/
s84ALuofPDKuGN2JaTA8e0MVnC2ZxOcDFUtR/WvN8rkUMcNnP8nKJGlFuDRbVMCq
MrzxglAUeOCc4sjOZxvSlCGa44xGUCvpiVvE9pVO0pNTXQGlNlC9DWnhO4VkOZf7
rnx2a9u707g3GbyfjJfMmZIn3jZTpbOe+6JVgtgrxATa9g8aYKWVOe5HzJywgTkC
ktNWcqZr4Kx4lKTM4so4dbsZe3pwHBi+XeInUzwEhFcbcP3UUZXpRmzOX7vIkd6+
VVR1WwaWwOAEUx872chHLqabrFMWeQLa64vUeG/A82ltFD03anzTBVr7dEkxczIS
2Ljt5Bzc/5hhbev8s/0CCLhQy89EucnIs1Ow+Fwdc9Ue6Qn/OvVjw65hJRruxrUC
AwEAAaM3MDUwFAYDVR0RBA0wC4IJbG9jYWxob3N0MB0GA1UdDgQWBBTorI9k0nHV
1OA4uCA6/ciE3gdgRDANBgkqhkiG9w0BAQsFAAOCAgEAWYmTrc/h4kSv4W12JrTP
mzMF5qGRUK5YBTxN656Pf9wENEMbZmWbCFu5e+Ewe+z3BCE/oZZFpHEx/mdT2ARO
YbIXpw0THzrfRtwVRSlUDl7O2zt6DrT4AxIJzBLpf8TSWNSMIQwI/Hv1wewwClM9
D7YM+S2WkwSu6jByXVfLCByjXsLLN25X16q+t7tnio6IMA2gB8XdnstMRzQmwQti
dfki3mCSehuhDV6Ylj4Ln/JGkyJ648MzvGnl2J8l4FArR9E9Rzx0XEPbMiqZmYXq
jPtBErrO4thXkgqr3TjLuxqt3RjG7cmmlsGY3oMtscs8QeNDhhCxoyWpz7ECmCpE
43DtUMfLLf77XbDUSvakPAJ2ZdWl44+JgOS7v7CxiV6DFzRi5ZXfmx+KdZ3YakkV
B6pvGHmxqy2uNAk2WX632BFa0OvGnYFF68x8flYQUOXVgC3B5/xMxWfy5JLC/jwE
DFvLMZinxLiJIx9Bbn1PCPXNIk7waDK2Y1YYVj61tKupFEh470C1Rra/C+RP08Be
5zJi0OZumpfCa+Hz4UDv9Cm6tTrrG/xwpicQidfPzkSublgzzW0Zvx/s6C+5Rqzm
PLZw6l33bEQstPcJSxUmSNlaNwsUJxEGGNnAwfQq6vPTp0YJZ+xo0BUOeFpt7tdC
pygM9OEmi1vuRknJr3vD+aM=
-----END CERTIFICATE-----
================================================
FILE: galaxy/ansible/files/nginx_sample.key
================================================
-----BEGIN RSA PRIVATE KEY-----
MIIJKQIBAAKCAgEA1kSpfexOnDQvNDwSg/4Cjv13+41VF2RgJdpk0n1iBz92GKEl
7SEh+nhUFinn+CKv2EaNQ7Nv5/+BNoPbBvS8Gm7ZtGt+cFXqRy4ka5It68sqbwZa
dmAGwksJbvcizs5D6XS3BPIB2FrxvBbhzOj+oDYxC3HItIgYwV0+Gv/GBDCiF4+b
9dO//gfR1ywqsvGczaaMBkbhuOZ2WZph9nFEcdHgNEzLn/HJsYZv0crrFjCLo7+F
WsYIdM1wNP2bkpPzRFpB1ujxfl9xxH4pTc06sHIKivnoVMs5VvsqdWtosJi/s84A
LuofPDKuGN2JaTA8e0MVnC2ZxOcDFUtR/WvN8rkUMcNnP8nKJGlFuDRbVMCqMrzx
glAUeOCc4sjOZxvSlCGa44xGUCvpiVvE9pVO0pNTXQGlNlC9DWnhO4VkOZf7rnx2
a9u707g3GbyfjJfMmZIn3jZTpbOe+6JVgtgrxATa9g8aYKWVOe5HzJywgTkCktNW
cqZr4Kx4lKTM4so4dbsZe3pwHBi+XeInUzwEhFcbcP3UUZXpRmzOX7vIkd6+VVR1
WwaWwOAEUx872chHLqabrFMWeQLa64vUeG/A82ltFD03anzTBVr7dEkxczIS2Ljt
5Bzc/5hhbev8s/0CCLhQy89EucnIs1Ow+Fwdc9Ue6Qn/OvVjw65hJRruxrUCAwEA
AQKCAf8gSZuhrHbI4ElDmmH/c/j38/ceP1B3i0DRg5GbW2nGb424cjPYd6bVPqaP
t1tmvLVh6wPD9j8wg8NMeFF9d/cqN0TS/+ogHMRcsqUmkuCGugjf9Pcm/6Rl9cq5
AGReqc+25kmnDVaF8wA+VUtwvH+UQasGohDtLJdG47FVh2gTbEw1tAHaPlzCfnkA
KvhbRi2ZBwx4GGkEjSuEVQ/xdiJP2KXG34ZxEDWi2Rcw9Jf7tHw4WiK3Tw2tW7sQ
SlwSVzthUgEkPHJ19yD/gfDqKUXDg6Mn09YVVnNI6Lm5Jgw7DM4W/g0t60Dp+tTC
ZLIv24OihQO+7yloi6MSgj3dQvmudbrGIaUQ3WuSVIeRyzlEBQRC6TfsIMNEIH7E
GogW+uopWWMhdnZkesGzYRIMkOpkOiFd4gZ6zw7++IOB7SaiR7JkJnTVmFip4oDf
tFXxxOOtZDlTpRzfjZggNzPm7Kyh4KFdKD27OdHwxpkl14GALtELncFCWBq6zt8g
32HtFhMKTkjCjZ+oAe4X3PAZ2vISsKk7sEAebumy6E1xUAfDrfcXNN0rBz2jaMro
SsdUeOsaaOQ3sB5KMyR04GvBdHsCI0rCEWJKtjxzbRkRryAftxXp1pOzDZg69xZO
CFrNW+FrRLu19k6ZNuoToq7p7oSqwMSzj3BScD2EW+QL1PpJAoIBAQDkwyjL8/cL
1gG52hodMjPUBXEQbQW6GDMvmt0rHUcX7tbpvfvidZuF35qPujVFsRIa/gQSH6sB
VvIrarsioaqkNaRLBl1EmnF2JpGH1SJWw5yOt7FUpC95sqwWk5vkXdOxL3XCzLVy
pIVbGKoy2bYe0lcYH804ggnTuGQYR7EkjSvv7/FpBZY2uDTzf3X2w5YzfaBQOuSF
rzOMj7php61z/jA0POaVq6yr3mnZ0Hxk1aJfyX+a5mvKBGfChceIpcdzpNBuSpbS
ZNKF4YBNmZ8ESd0qjUnBmtPg4h0gpOXR3wa86xHiboMoYyhor6eCo1yS33qxv6B+
Xn1GzLGBkWJHAoIBAQDvx7YR91sAb/uwEHQB/nciXrynXEfjWC1fnDNiJMgF89Z9
dUgEKPPeJNX7S8TrHNZryHBnLj4m8G3pWrIyJkTEvWZwr3jJKsl3x3ZTgS1LhkPZ
KvXph6xlkpx+4BcDHFQX4B/mTOnsYZyIb0C71LMZ/vNE6J3WN5uVFtI7ShZDf6SJ
GUKCBDckYXDaEAJ0WqK3b7Eu7wBhYjWq6EPTZxtudVMJkHvaLVJAPBUCgswtyE2o
ETXoTExG4388XrPCQ/qosuUNPDm9pSMjTiWtDz0VQg+clJNdjTu7EfhqD2zx/sHM
1KVJvoZUowrL5UdErKRJGoRvtev9ZIJl48m8a3EjAoIBAQDdaUKoPEXFL+nVvxH9
ZiShtm6bTln3pwqLreEYpKq1sFZUP6x2oAvaA/Tt3XVIMbzrYSYBgKMbldKoURI0
z7KAYubUMqG9D5p3l5bNmG02+vchbwt0d8D3kgZbh5yf6GxHFz9sPoP0JOZpqDK7
KtrJdB4V3FndsobeY56FnYYHcZewEFVgp6ae6aVec+Rx5RYQWiv62zVpaoyDJG1p
rUgFd2WiebtX66QhaRCcX9y2H8ub2EPoYdK74Y2nyaG5UXL9K+0Mgqb9ldXo/LwY
33H4TaGBWOSlPTyLcW5ttQw3GBzGZuKVfQ723Ro0UKbZm1GzWhe/yFAHX17zUpUP
ae5rAoIBAQDQ7SMn5G/WobycXLm4QxFrUUDwUugQn/RpKqFbEtF23lA2YMqvVT+o
gFAy9oJOmoH6yFuojBJ7u2MJwY0jRVUGWEG6TirgnfeN9q6TdCsTc5oKz/QV17HP
jz/tDTT/8N8VLqSc9secwDC0cLvm7h5guFUf5dAhp7JY5dmo75UWm1GyY+AfiazC
dmTunKSG3bKKQzgPvRCHyhsZH+h5e43bYT9JRiukn3jbn35vAakG+1Eu8FAYaOLN
ocxrvdjDnJf8BmSuc5ucMxe624zYjj6bF0SjGpKNIVK6XZ4mS+qRsXkMEP00lF5X
wPjXUKAYppU/XWuoKsvFrp4wSZquIrAhAoIBAQCWE7gx9/Ao7hT5fq0ho+TVV7Cp
5gnDlNVASexjOrVS8OuQntrKNbYrvcgdCivyxVXxi3HFoydHPg9U4VchrADj9qPL
9BH3/BCZJPttGYVIfPThJCnj6kM11BuOSweG/nEU03N36XsxR4748RiRf+MyL2kX
KxMEwt+RZvH5beqMLrbLuQ8ey7QNJx/XK3wYhl90q8eJgEFHaKz/LBB/Fwerj5mW
f+5jNCbDg+ey7gUmYh8pHqc1KlI8orvRmYP2m2OJFXIHVY+O0U5k/lKXBndM8TpQ
jzCf/N7e2g/7lkZKQhaMrfBsU1s9Ib2p7JuaOyeOKdN4qKURj8xtb3m6fymK
-----END RSA PRIVATE KEY-----
================================================
FILE: galaxy/ansible/files/production_b2drop.yml
================================================
- id: b2drop
version: 0
name: B2DROP
description: |
B2DROP is a Nextcloud to sync and share your research data.
variables:
username:
label: Username / Account Name
type: string
help: |
The username or account name to use to connect to B2DROP. This is not your email address but the name show in the URL of your profile page.
writable:
label: Writable?
type: boolean
default: false
help: Allow Galaxy to write data to B2DROP.
secrets:
password:
label: Password
help: |
The password to use to connect to B2DROP.
configuration:
type: webdav
url: 'https://b2drop.bsc.es'
root: '/remote.php/dav/files/{{ variables.username }}'
login: '{{ variables.username }}'
writable: '{{ variables.writable }}'
password: '{{ secrets.password }}'
================================================
FILE: galaxy/ansible/flower.yml
================================================
- hosts: localhost
connection: local
remote_user: root
vars:
flower_python_package_version: 1.2.0
flower_custom_logging: false
flower_conf_dir: "{{ flower_conf_path | dirname }}"
flower_ui_users: []
roles:
- role: usegalaxy_eu.flower
tasks:
- name: Add url prefix to flower config
lineinfile:
path: "{{ flower_conf_path }}"
line: 'url_prefix = "{{ flower_url_prefix }}"'
================================================
FILE: galaxy/ansible/galaxy_file_source_templates.yml
================================================
- hosts: localhost
connection: local
remote_user: root
tasks:
- name: Install fs.webdavfs for Galaxy's file source plugins
pip:
name: "fs.webdavfs"
extra_args: "--index-url https://wheels.galaxyproject.org/simple/ --extra-index-url https://pypi.python.org/simple"
virtualenv: "{{ galaxy_venv_dir }}"
environment:
PYTHOPATH: null
VIRTUAL_ENV: "{{ galaxy_venv_dir }}"
become_user: "{{ galaxy_user_name }}"
- name: "Setup user configurable file source templates, also called BYOD"
template: src=file_source_templates.yml.j2 dest={{ galaxy_file_source_templates_config_file }} owner={{ galaxy_user_name }} group={{ galaxy_user_name }}
- name: "Copy B2Drop file source template"
ansible.builtin.copy:
src: "./files/production_b2drop.yml"
dest: "{{ galaxy_config_dir }}/production_b2drop.yml"
owner: "{{ galaxy_user_name }}"
group: "{{ galaxy_user_name }}"
mode: '0644'
================================================
FILE: galaxy/ansible/galaxy_job_conf.yml
================================================
- hosts: localhost
connection: local
remote_user: root
tasks:
- name: "Ensure dynamic handler assignment method is configured"
lineinfile:
path: "{{ galaxy_config_file }}"
regexp: '^job_handler_assignment_method:'
line: 'job_handler_assignment_method: db-skip-locked'
insertafter: EOF
create: true
owner: "{{ galaxy_user_name }}"
group: "{{ galaxy_user_name }}"
mode: "0644"
when: galaxy_dynamic_handlers | bool
- name: "Install Galaxy job conf"
template: src=job_conf.xml.j2 dest={{ galaxy_job_conf_path }} owner={{ galaxy_user_name }} group={{ galaxy_user_name }}
- name: "Install Galaxy container resolution configuration"
template: src=container_resolvers_conf.yml.j2 dest={{ galaxy_container_resolvers_conf_path }} owner={{ galaxy_user_name }} group={{ galaxy_user_name }}
when: galaxy_container_resolution | bool
================================================
FILE: galaxy/ansible/galaxy_job_metrics.yml
================================================
- hosts: localhost
connection: local
remote_user: root
tasks:
- name: "Setup job metrics"
template: src=job_metrics_conf.yml.j2 dest={{ galaxy_job_metrics_conf_path }} owner={{ galaxy_user_name }} group={{ galaxy_user_name }}
================================================
FILE: galaxy/ansible/galaxy_object_store_templates.yml
================================================
- hosts: localhost
connection: local
remote_user: root
tasks:
- name: "Setup user configurable object store templates, also called BYOS"
template: src=object_store_templates.yml.j2 dest={{ galaxy_object_store_templates_config_file }} owner={{ galaxy_user_name }} group={{ galaxy_user_name }}
================================================
FILE: galaxy/ansible/galaxy_scripts.yml
================================================
- hosts: localhost
connection: local
remote_user: root
tasks:
- name: "Install galaxy user creation script."
template: src=create_galaxy_user.py.j2 dest=/usr/local/bin/create_galaxy_user.py mode=a+x
- name: "Install galaxy check database script."
template: src=check_database.py.j2 dest=/usr/local/bin/check_database.py mode=a+x
- name: "Install export user files script."
template: src=export_user_files.py.j2 dest=/usr/local/bin/export_user_files.py mode=a+x
- name: "Install add_tool_shed script."
template: src=add_tool_shed.py.j2 dest=/usr/local/bin/add-tool-shed mode=a+x
- name: "Install startup lite script."
template: src=startup_lite.sh.j2 dest=/usr/bin/startup_lite mode=a+x
- name: "Install cgroupfs_mount.sh for startup script."
template: src=cgroupfs_mount.sh.j2 dest=/root/cgroupfs_mount.sh mode=a+x
- name: "Install update_yaml_value script."
template: src=update_yaml_value.py.j2 dest=/usr/local/bin/update_yaml_value mode=a+x
================================================
FILE: galaxy/ansible/galaxy_vault_config.yml
================================================
- hosts: localhost
connection: local
remote_user: root
# You should change this key in production. You can generate Fernet keys with:
#from cryptography.fernet import Fernet
#Fernet.generate_key().decode('utf-8')
vars:
galaxy_vault_encryption_keys:
- pwiL08wXlpkBm-_Dr75aw1_uOPVA3HET1y7xrpynhKU=
tasks:
- name: "Configure Galaxy vault"
template: src=vault_conf.yml.j2 dest={{ galaxy_vault_config_file }} owner={{ galaxy_user_name }} group={{ galaxy_user_name }}
================================================
FILE: galaxy/ansible/gravity.yml
================================================
- hosts: localhost
connection: local
remote_user: root
tasks:
- name: "Install gravity for galaxy"
pip:
name: gravity
version: 1.0.6
virtualenv: "{{ galaxy_venv_dir }}"
virtualenv_command: "{{ pip_virtualenv_command | default( 'virtualenv' ) }}"
extra_args: "--index-url https://wheels.galaxyproject.org/ --extra-index-url https://pypi.python.org/simple"
become: True
become_user: "{{ galaxy_user_name }}"
- name: Deploy galaxyctl wrapper script
copy:
content: |
#!/usr/bin/env sh
export GRAVITY_CONFIG_FILE={{ gravity_config_file }}
export GRAVITY_STATE_DIR={{ gravity_state_dir }}
exec sudo -E -H -u $GALAXY_USER {{ galaxy_venv_dir }}/bin/galaxyctl "$@"
dest: "/usr/local/bin/galaxyctl"
mode: "0755"
become: True
become_user: root
- name: "Install Gravity conf"
template: src=gravity.yml.j2 dest={{ gravity_config_file }} owner={{ galaxy_user_name }} group={{ galaxy_user_name }}
================================================
FILE: galaxy/ansible/group_vars/all.yml
================================================
use_pbkdf2: true
postgresql_version: 15
galaxy_apt_package_state: present
# The storage backend to use for docker-in-docker.
# overlay2 on parent docker cannot be combined with overlay2 in child docker
docker_storage_backend: overlay2
docker_legacy: false
galaxy_nginx: true
galaxy_postgres: true
galaxy_proftpd: true
galaxy_slurm: true
galaxy_condor: true
galaxy_pbs: false
galaxy_k8s_jobs: false
galaxy_supervisor: true
galaxy_job_metrics: true
galaxy_file_source_templates: true
galaxy_object_store_templates: true
galaxy_vault_config: true
galaxy_scripts: true
galaxy_domain: "localhost" # This is used by letsencrypt and Interactive Tools, set it to the domain name under which galaxy can be reached
galaxy_startup: true
galaxy_rabbitmq: true
galaxy_redis: true
galaxy_flower: true
galaxy_tusd: true
galaxy_cvmfs_client: true
galaxy_job_conf: true
galaxy_gravity: true
galaxy_docker: true
galaxy_db_port: "5432"
galaxy_database_connection: "postgres://{{ galaxy_user_name }}@localhost:{{ galaxy_db_port }}/galaxy"
# Default destination for Galaxy jobs in generated job_conf.xml - can
# tweak this to allow for a different default for Docker-enabled tools.
galaxy_destination_default: slurm_cluster
galaxy_destination_docker_default: "{{ galaxy_destination_default }}"
galaxy_destination_singularity_default: "{{ galaxy_destination_default }}"
# set the FQDN for the pbs server, only used when galaxy_pbs: true
pbs_server_name: pbsqueue
# Only used when galaxy_slurm: true, sets slurm ntask in job_conf.xml.
# Will be overwritten if NATIVE_SPEC environmental variable is set.
# In the default setting controls the value of GALAXY_SLOTS.
# Use ansible_processor_cores: "{{ ansible_processor_vcpus }}" to set this to the number of
# threads per core * processor count * cores per processor
galaxy_slurm_ntask: 1
galaxy_gcc_available: false
# Follow job_conf attributes set if galaxy_k8s_jobs is true.
galaxy_k8s_jobs_use_service_account: true
galaxy_k8s_jobs_persistent_volume_claims: galaxy-web-claim0:/export
galaxy_k8s_jobs_namespace: default
galaxy_k8s_jobs_supplemental_group_id: 0
galaxy_k8s_jobs_fs_group_id: 0
galaxy_k8s_jobs_pull_policy: IfNotPresent
# Point at the existing Galaxy configuration.
galaxy_server_dir: "/galaxy"
galaxy_config_dir: "{{ galaxy_server_dir }}/config"
galaxy_job_conf_path: "{{ galaxy_config_dir }}/job_conf.xml"
galaxy_container_resolvers_conf_path: "{{ galaxy_config_dir }}/container_resolvers_conf.yml"
galaxy_job_metrics_conf_path: "{{ galaxy_config_dir }}/job_metrics_conf.yml"
galaxy_file_source_templates_config_file: "{{ galaxy_config_dir }}/file_source_templates.yml"
galaxy_object_store_templates_config_file: "{{ galaxy_config_dir }}/object_store_templates.yml"
galaxy_vault_config_file: "{{ galaxy_config_dir }}/vault_conf.yml"
galaxy_user_name: "galaxy"
galaxy_home_dir: "/home/{{ galaxy_user_name }}"
galaxy_source_shellrc: false
galaxy_user_shellrc: "{{ galaxy_home_dir }}/.bashrc"
galaxy_logs_dir: "{{ galaxy_home_dir }}/"
galaxy_venv_dir: "/galaxy_venv"
galaxy_config_file: "{{ galaxy_config_dir }}/galaxy.yml"
galaxy_toolshed_config_file: "{{ galaxy_config_dir }}/tool_shed.yml"
galaxy_tool_data_table_config_file: "{{ galaxy_config_dir }}/tool_data_table_conf.xml"
galaxy_toolshed_port: "9009"
# Docker defaults
galaxy_docker_enabled: false
galaxy_docker_sudo: false
galaxy_docker_default_image: 'busybox:ubuntu-14.04'
galaxy_docker_volumes_from: ""
galaxy_docker_volumes : "$defaults"
galaxy_docker_net: "bridge"
galaxy_docker_auto_rm: true
galaxy_docker_set_user: ""
# Singularity defaults
galaxy_singularity_enabled: false
galaxy_singularity_sudo: false
# ToDo create default image
galaxy_singularity_default_image: ''
galaxy_singularity_volumes_from: ""
# rw directories are not considered if the parent is ro
galaxy_singularity_volumes : "$defaults{{ ',/cvmfs:/cvmfs' if galaxy_cvmfs_client | bool else '' }}"
galaxy_container_resolution: true
container_resolution_explicit: true
container_resolution_mulled: true
container_resolution_cached_mulled: "{{ container_resolution_mulled }}"
container_resolution_build_mulled: "{{ container_resolution_mulled }}"
container_resolution_mulled_namespace: biocontainers
# Gravity configuration.
gravity_config_file: "{{ galaxy_config_dir }}/gravity.yml"
gravity_state_dir: "{{ galaxy_server_dir }}/database/gravity"
gravity_process_manager: "supervisor"
gravity_manage_celery: true
gravity_manage_tusd: true
gravity_manage_gx_it_proxy: true
# Gunicorn configuration.
galaxy_gunicorn: true
gunicorn_port: "4001"
gunicorn_workers: 2
# Handler configuration.
galaxy_dynamic_handlers: true
galaxy_handler_processes: 2
# Celery configuration.
galaxy_celery: true
galaxy_celery_beat: true
celery_workers: 2
# gx_it_proxy configuration.
galaxy_gx_it_proxy: true
gx_it_proxy_port: "4002"
gx_it_proxy_version: '>=0.0.6'
gx_it_proxy_sessions_path: "{{ galaxy_server_dir }}/database/interactivetools_map.sqlite"
# Tusd configuration.
tusd_port: "1080"
tusd_path: "/usr/local/sbin/tusd"
tus_upload_store_path: "/tmp/tus_upload_store"
tusd_base_path: "{{ nginx_tusd_location }}"
galaxy_job_metrics_core: true
galaxy_job_metrics_env: false
galaxy_job_metrics_cpuinfo: true
galaxy_job_metrics_meminfo: true
galaxy_job_metrics_uname: true
galaxy_job_metrics_hostname: false
galaxy_job_metrics_cgroup: false
# TODO: configure collectl, individual env files
# TODO: alternative to configure metrics all at once using yml datastructure.
galaxy_it_fetch_jupyter: false
galaxy_it_jupyter_image: quay.io/bgruening/docker-jupyter-notebook:2021-03-05
galaxy_it_fetch_rstudio: false
galaxy_it_rstudio_image: quay.io/galaxy/docker-rstudio-notebook:23.1
galaxy_it_fetch_ethercalc: false
galaxy_it_ethercalc_image: shiltemann/ethercalc-galaxy-ie:17.05
galaxy_it_fetch_phinch: false
galaxy_it_phinch_image: shiltemann/docker-phinch-galaxy:16.04
galaxy_it_fetch_neo: false
galaxy_it_neo_image: quay.io/sanbi-sa/neo_ie:3.1.9
# Nginx configuration.
nginx_conf_dir: /etc/nginx
nginx_conf_file: "{{ nginx_conf_dir }}/nginx.conf"
# Use nginx_*_location variables to control serving apps at subdirectories.
# If galaxy should be served at subdirectory (e.g. example.com/galaxy) set nginx_galaxy_location: /galaxy
# If all apps should be served on a common subdirectory, use nginx_prefix_location: /your_common_dir
nginx_prefix_location: ""
nginx_galaxy_location: "{{ nginx_prefix_location }}"
nginx_rabbitmq_management_location: "{{ nginx_prefix_location }}/rabbitmq"
nginx_flower_location: "{{ nginx_prefix_location }}/flower"
nginx_tusd_location: "{{ nginx_prefix_location }}/api/upload/resumable_upload"
nginx_planemo_web_location: "{{ nginx_prefix_location }}/planemo"
nginx_ide_location: "{{ nginx_prefix_location }}/ide"
nginx_welcome_location: "{{ nginx_prefix_location }}/etc/galaxy/web"
nginx_welcome_path: "/etc/galaxy/web"
# Synchronize error handling with ansible-galaxy role.
galaxy_errordocs_dir: "/var/www/galaxy_errordocs"
#web security
nginx_use_passwords: false
nginx_htpasswds:
- "admin:WiBKbsJTSQ8dc"
nginx_use_remote_header: true
# Additional configurations to be appended to nginx config
nginx_additional_config : []
nginx_proxy_gunicorn: true
nginx_proxy_rabbitmq_management: true
nginx_proxy_flower: true
nginx_proxy_interactive_tools: true
# Certbot Configuration.
certbot_auto_renew_hour: "{{ 23 |random(seed=inventory_hostname) }}"
certbot_auto_renew_minute: "{{ 59 |random(seed=inventory_hostname) }}"
certbot_auth_method: --webroot
certbot_install_method: virtualenv
certbot_auto_renew: yes
certbot_auto_renew_user: root
certbot_environment: production
certbot_well_known_root: "{{ nginx_conf_dir }}/_well-known_root"
certbot_share_key_users:
- "{{ galaxy_user_name }}"
certbot_post_renewal: |
supervisorctl restart nginx || true
certbot_agree_tos: --agree-tos
## Proftp Configuration.
proftpd_conf_path: /etc/proftpd/proftpd.conf
proftpd_sql_db: galaxy@galaxy
proftpd_sql_user: galaxy
proftpd_sql_password: galaxy
proftpd_welcome: "Public Galaxy FTP"
galaxy_ftp_upload_dir: /export/ftp
proftpd_ftp_port: 21
proftpd_passive_port_low: 30000
proftpd_passive_port_high: 40000
proftpd_sftp_port: 22
# Set masquearade to true if host is NAT'ed.
proftpd_nat_masquerade: false
# proftpd_masquerade_address refers to the ip that clients use to establish an ftp connection.
# Can be a command that returns an IP or an IP address and applies only if proftpd_nat_masquerade is true.
# ec2metadata --public-ipv4 returns the public ip for amazon's ec2 service.
proftpd_masquerade_address: "`ec2metadata --public-ipv4`"
## RabbitMQ Configuration.
rabbitmq_port: "5672"
rabbitmq_management_port: "15672"
rabbitmq_admin_username: admin
rabbitmq_admin_password: admin
rabbitmq_galaxy_vhost: galaxy
rabbitmq_galaxy_username: galaxy
rabbitmq_galaxy_password: galaxy
rabbitmq_flower_username: flower
rabbitmq_flower_password: flower
galaxy_amqp_internal_connection: "pyamqp://{{ rabbitmq_galaxy_username }}:{{ rabbitmq_galaxy_password }}@localhost:{{ rabbitmq_port }}/{{ rabbitmq_galaxy_vhost }}"
## Flower Configuration.
flower_conf_path: /etc/flower/flowerconfig.py
flower_bind_address: 0.0.0.0
flower_port: "5555"
flower_broker_api: "http://{{ rabbitmq_flower_username }}:{{ rabbitmq_flower_password }}@localhost:{{ rabbitmq_management_port }}/api/" # URL of broker (RabbitMQ Management) API
flower_broker_url: "amqp://{{ rabbitmq_flower_username }}:{{ rabbitmq_flower_password }}@localhost:{{ rabbitmq_port }}/{{ rabbitmq_galaxy_vhost }}" # AMQP URL for Flower to connect to broker (RabbitMQ)
flower_persistent: true
flower_db_file: "{{ galaxy_server_dir }}/database/flower.db"
flower_app_name: galaxy.celery
flower_log: "{{ galaxy_logs_dir }}/flower.log"
flower_url_prefix: "{{ nginx_flower_location }}"
flower_venv_dir: "{{ galaxy_venv_dir }}"
flower_user: "{{ galaxy_user_name }}"
flower_group: "{{ galaxy_user_name }}"
flower_venv_user: "{{ galaxy_user_name }}"
flower_venv_group: "{{ galaxy_user_name }}"
## Supervisor Configuration.
supervisor_conf_path: "/etc/supervisor/conf.d/galaxy.conf"
supervisor_webserver: true
supervisor_webserver_port: "0.0.0.0:9002"
supervisor_webserver_username: null
supervisor_webserver_password: changeme
supervisor_manage_cron: true
supervisor_manage_autofs: true
supervisor_manage_slurm: false
supervisor_manage_condor: true
supervisor_manage_postgres: true
supervisor_manage_proftp: true
supervisor_manage_nginx: true
supervisor_manage_toolshed: false
supervisor_manage_docker: true
supervisor_manage_rabbitmq: true
supervisor_manage_redis: true
supervisor_manage_flower: true
supervisor_cron_autostart: false
supervisor_autofs_autostart: true
supervisor_slurm_autostart: true
supervisor_condor_autostart: false
supervisor_postgres_autostart: false
supervisor_proftpd_autostart: false
supervisor_docker_autostart: false
supervisor_docker_autorestart: true
supervisor_rabbitmq_autostart: false
supervisor_redis_autostart: false
supervisor_flower_autostart: false
supervisor_slurm_config_dir: "/home/galaxy"
supervisor_postgres_config_path: "/etc/postgresql/{{ postgresql_version }}/main/postgresql.conf"
supervisor_postgres_database_path: "/export/postgresql/{{ postgresql_version }}/main"
supervisor_postgres_options: "-D {{ supervisor_postgres_database_path }} -c \"config_file={{ supervisor_postgres_config_path }}\""
supervisor_galaxy_startsecs: 20
# had to increase retries to ensure the postgres database is available,
# wasn't needed in the past.
supervisor_galaxy_startretries: 15
## CVMFS Configuration.
cvmfs_config_repo:
domain: galaxyproject.org
key:
path: /etc/cvmfs/keys/galaxyproject.org/cvmfs-config.galaxyproject.org.pub
key: |
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuJZTWTY3/dBfspFKifv8
TWuuT2Zzoo1cAskKpKu5gsUAyDFbZfYBEy91qbLPC3TuUm2zdPNsjCQbbq1Liufk
uNPZJ8Ubn5PR6kndwrdD13NVHZpXVml1+ooTSF5CL3x/KUkYiyRz94sAr9trVoSx
THW2buV7ADUYivX7ofCvBu5T6YngbPZNIxDB4mh7cEal/UDtxV683A/5RL4wIYvt
S5SVemmu6Yb8GkGwLGmMVLYXutuaHdMFyKzWm+qFlG5JRz4okUWERvtJ2QAJPOzL
mAG1ceyBFowj/r3iJTa+Jcif2uAmZxg+cHkZG5KzATykF82UH1ojUzREMMDcPJi2
dQIDAQAB
-----END PUBLIC KEY-----
urls:
- http://cvmfs1-psu0.galaxyproject.org/cvmfs/@fqrn@
- http://cvmfs1-iu0.galaxyproject.org/cvmfs/@fqrn@
- http://cvmfs1-tacc0.galaxyproject.org/cvmfs/@fqrn@
- http://cvmfs1-ufr0.galaxyproject.eu/cvmfs/@fqrn@
- http://cvmfs1-mel0.gvl.org.au/cvmfs/@fqrn@
repository:
repository: cvmfs-config.galaxyproject.org
stratum0: cvmfs0-psu0.galaxyproject.org
owner: "root"
server_options: []
client_options: []
cvmfs_keys:
- path: /etc/cvmfs/keys/galaxyproject.org/cvmfs-config.galaxyproject.org.pub
key: |
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuJZTWTY3/dBfspFKifv8
TWuuT2Zzoo1cAskKpKu5gsUAyDFbZfYBEy91qbLPC3TuUm2zdPNsjCQbbq1Liufk
uNPZJ8Ubn5PR6kndwrdD13NVHZpXVml1+ooTSF5CL3x/KUkYiyRz94sAr9trVoSx
THW2buV7ADUYivX7ofCvBu5T6YngbPZNIxDB4mh7cEal/UDtxV683A/5RL4wIYvt
S5SVemmu6Yb8GkGwLGmMVLYXutuaHdMFyKzWm+qFlG5JRz4okUWERvtJ2QAJPOzL
mAG1ceyBFowj/r3iJTa+Jcif2uAmZxg+cHkZG5KzATykF82UH1ojUzREMMDcPJi2
dQIDAQAB
-----END PUBLIC KEY-----
- path: /etc/cvmfs/keys/galaxyproject.org/data.galaxyproject.org.pub
key: |
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5LHQuKWzcX5iBbCGsXGt
6CRi9+a9cKZG4UlX/lJukEJ+3dSxVDWJs88PSdLk+E25494oU56hB8YeVq+W8AQE
3LWx2K2ruRjEAI2o8sRgs/IbafjZ7cBuERzqj3Tn5qUIBFoKUMWMSIiWTQe2Sfnj
GzfDoswr5TTk7aH/FIXUjLnLGGCOzPtUC244IhHARzu86bWYxQJUw0/kZl5wVGcH
maSgr39h1xPst0Vx1keJ95AH0wqxPbCcyBGtF1L6HQlLidmoIDqcCQpLsGJJEoOs
NVNhhcb66OJHah5ppI1N3cZehdaKyr1XcF9eedwLFTvuiwTn6qMmttT/tHX7rcxT
owIDAQAB
-----END PUBLIC KEY-----
================================================
FILE: galaxy/ansible/k8s.yml
================================================
- hosts: localhost
connection: local
remote_user: root
tasks:
- name: "Install secure urllib3 for galaxy - better SSL verification with pykube"
# See also https://github.com/kelproject/pykube/issues/29
pip: name=urllib3[secure] virtualenv={{ galaxy_venv_dir }} virtualenv_command="{{ pip_virtualenv_command | default( 'virtualenv' ) }}"
when: galaxy_gcc_available | bool
become: True
become_user: "{{ galaxy_user_name }}"
- name: "Install ipaddress for galaxy - better SSL verification with pykube"
# See also https://github.com/kelproject/pykube/issues/29
pip: name=ipaddress virtualenv={{ galaxy_venv_dir }} virtualenv_command="{{ pip_virtualenv_command | default( 'virtualenv' ) }}"
become: True
become_user: "{{ galaxy_user_name }}"
- name: "Install pykube for galaxy"
pip: name=pykube-ng version="21.3.0" virtualenv={{ galaxy_venv_dir }} virtualenv_command="{{ pip_virtualenv_command | default( 'virtualenv' ) }}"
become: True
become_user: "{{ galaxy_user_name }}"
================================================
FILE: galaxy/ansible/nginx.yml
================================================
- hosts: localhost
connection: local
remote_user: root
vars:
# Default container config: avoid DH param generation and OCSP stapling
# errors in ephemeral/self-signed setups. Override in production.
nginx_conf_ssl_protocols:
- TLSv1.2
- TLSv1.3
nginx_conf_ssl_dhparam: false
nginx_conf_ssl_stapling: "off"
nginx_conf_ssl_stapling_verify: "off"
nginx_conf_http:
client_max_body_size: 50g
proxy_buffers: 8 16k
proxy_buffer_size: 16k
underscores_in_headers: "on"
keepalive_timeout: 600
proxy_read_timeout: 300
server_names_hash_bucket_size: 128
# gzip: "on" # This is enabled by default in Ubuntu, and the duplicate directive will cause a crash.
gzip_proxied: "any"
gzip_static: "on"
gzip_vary: "on"
gzip_min_length: 128
gzip_comp_level: 6
gzip_types: |
text/plain
text/css
text/xml
text/javascript
application/javascript
application/x-javascript
application/json
application/xml
application/xml+rss
application/xhtml+xml
application/x-font-ttf
application/x-font-opentype
image/png
image/svg+xml
image/x-icon
nginx_extra_configs:
- galaxy_common.conf
- interactive_tools_common.conf
- flower_auth.conf
- delegated_uploads.conf
nginx_servers:
- galaxy_http
- interactive_tools_http
galaxy_errordocs:
- { code: '502', src: '502.html' }
- { code: '413', src: '413.html' }
- { code: '500', src: '500.html' }
# this is required as we re-run this playbook during startup (while setting up ssl or proxy prefix)
pre_tasks:
- name: Ensure 'daemon off' is absent from nginx.conf
lineinfile:
path: '{{ nginx_conf_file }}'
regexp: '^(\s*daemon\s+off\s*;)'
state: absent
- name: Ensure nginx sites-available and sites-enabled directories are empty and recreated
file:
state: "{{ item.state }}"
path: "{{ nginx_conf_dir }}/{{ item.dir }}"
owner: root
group: root
mode: '0755'
with_items:
- { dir: 'sites-available', state: 'absent' }
- { dir: 'sites-available', state: 'directory' }
- { dir: 'sites-enabled', state: 'absent' }
- { dir: 'sites-enabled', state: 'directory' }
roles:
- role: galaxyproject.nginx
tasks:
- name: Ensure nginx is run by galaxy user
lineinfile:
path: '{{ nginx_conf_file }}'
regexp: '^(\s*user\s+.*)'
line: 'user {{ galaxy_user_name }};'
state: present
- name: Place htpasswd file
template:
src: "nginx/htpasswd.j2"
dest: "{{ nginx_conf_dir }}/htpasswd"
owner: "{{ galaxy_user_name }}"
group: "{{ galaxy_user_name }}"
mode: "0600"
- name: Create directories for error docs
file:
path: "{{ galaxy_errordocs_dir }}/{{ item.code }}"
state: directory
loop: "{{ galaxy_errordocs }}"
- name: Copy error docs into their respective directories
copy:
src: "{{ item.src }}"
dest: "{{ galaxy_errordocs_dir }}/{{ item.code }}/index.html"
mode: '0755'
loop: "{{ galaxy_errordocs }}"
post_tasks:
- name: Ensure 'daemon off' in nginx.conf as we use supervisor to manage nginx
lineinfile:
path: '{{ nginx_conf_file }}'
regexp: '^(\s*daemon\s+.*)'
line: 'daemon off;'
state: present
- name: Stop and disable nginx.
service: name=nginx state=stopped enabled=no
================================================
FILE: galaxy/ansible/pbs.yml
================================================
- hosts: localhost
connection: local
remote_user: root
tasks:
- name: Install PBS/torque system packages
apt:
state: "{{ galaxy_apt_package_state }}"
name: "{{ packages }}"
vars:
packages:
- torque-client
- pbs-drmaa-dev
# If job_conf.xml is installed before running galaxyprojectdotorg.galaxy, this would already be installed.
- name: Fetch DRMAA wheel for Galaxy
pip:
name: "drmaa"
extra_args: "--index-url https://wheels.galaxyproject.org/simple/ --extra-index-url https://pypi.python.org/simple"
virtualenv: "{{ galaxy_venv_dir }}"
environment:
PYTHOPATH: null
VIRTUAL_ENV: "{{ galaxy_venv_dir }}"
become_user: "{{ galaxy_user_name }}"
- name: "Set PBS/torque server name"
lineinfile: dest=/etc/torque/server_name line={{ pbs_server_name }} state=present create=yes
================================================
FILE: galaxy/ansible/postgresql.yml
================================================
- hosts: localhost
connection: local
remote_user: root
vars:
postgresql_backup_local_dir: /export/postgresql_backup/
postgresql_version: 15
postgresql_flavor: pgdg
postgresql_conf:
listen_addresses: "'*'"
hba_file: "'/etc/postgresql/{{ postgresql_version }}/main/pg_hba.conf'"
ident_file: "'/etc/postgresql/{{ postgresql_version }}/main/pg_ident.conf'"
postgresql_pg_hba_conf:
- host all all 0.0.0.0/0 md5
roles:
- role: galaxyproject.postgresql
================================================
FILE: galaxy/ansible/proftpd.yml
================================================
- hosts: localhost
connection: local
remote_user: root
vars:
proftpd_galaxy_auth: yes
galaxy_user:
name: "{{ galaxy_user_name }}"
proftpd_galaxy_modules:
- mod_sql.c
- mod_sql_passwd.c
- mod_sql_postgres.c
- mod_sftp.c
- mod_sftp_pam.c
- mod_sftp_sql.c
proftpd_create_ftp_upload_dir: yes
proftpd_options:
- User: "{{ galaxy_user_name }}"
- Group: "{{ galaxy_user_name }}"
proftpd_global_options:
- PassivePorts: "{{ proftpd_passive_port_low }} {{ proftpd_passive_port_high }}"
proftpd_display_connect: "{{ proftpd_welcome }}"
base_ssh_host_keys_dir: /etc/proftpd/ssh_host_keys
proftpd_virtualhosts:
- id: sftp
address: 0.0.0.0
options:
- Port: "{{ proftpd_sftp_port}}"
- SFTPEngine: on
- SFTPPAMEngine: off
- CreateHome: on dirmode 700
- SFTPHostKey: "{{ base_ssh_host_keys_dir }}/rsa"
- SFTPHostKey: "{{ base_ssh_host_keys_dir }}/dsa"
- SFTPCompression: delayed
- SQLEngine: on
- SQLPasswordEngine: on
- SQLLogFile: /var/log/proftpd/sql.log
- SQLBackend: postgres
- SQLAuthenticate: users
- SQLConnectInfo: "{{ proftpd_sql_db }} {{ proftpd_sql_user }} {{ proftpd_sql_password }}"
- SQLAuthTypes: PBKDF2 SHA1
- SQLPasswordPBKDF2: sql:/GetPBKDF2Params
- SQLPasswordEncoding: base64
- SQLUserInfo: custom:/LookupGalaxyUser
- SQLPasswordUserSalt: sql:/GetUserSalt
- SQLNamedQuery: GetPBKDF2Params SELECT "(CASE WHEN split_part(password, '$', 1) = 'PBKDF2' THEN UPPER(split_part(password, '$', 2)) ELSE 'SHA256' END), (CASE WHEN split_part(password, '$', 1) = 'PBKDF2' THEN split_part(password, '$', 3) ELSE '10000' END), 24 FROM galaxy_user WHERE email='%U'"
- SQLNamedQuery: GetUserSalt SELECT "(CASE WHEN split_part(password, '$', 1) = 'PBKDF2' THEN split_part(password, '$', 4) END) FROM galaxy_user WHERE email='%U'"
- SQLNamedQuery: LookupGalaxyUser SELECT "email, (CASE WHEN split_part(password, '$', 1) = 'PBKDF2' THEN split_part(password, '$', 5) ELSE encode(decode(password, 'hex'), 'base64') END),'{{ galaxy_user_name }}','{{ galaxy_user_name }}','{{ galaxy_ftp_upload_dir }}/%U','/bin/bash' FROM galaxy_user WHERE email='%U'"
# Required for sftp server
pre_tasks:
- name: Install ProFTPD module packages
apt:
name:
- proftpd-mod-crypto
- proftpd-mod-pgsql
state: present
update_cache: true
- name: Install OpenSSH client package
apt: pkg=openssh-client
- name: Create ssh host keys directory
file: path="{{ base_ssh_host_keys_dir }}" state=directory
- name: Generate new SSH keys (rsa)
shell: ssh-keygen -b 2048 -t rsa -f "{{ base_ssh_host_keys_dir }}/rsa" -N ""
args:
creates: "{{ base_ssh_host_keys_dir }}/rsa"
- name: Generate new SSH keys (dsa)
shell: ssh-keygen -b 1024 -t dsa -f "{{ base_ssh_host_keys_dir }}/dsa" -N ""
args:
creates: "{{ base_ssh_host_keys_dir }}/dsa"
roles:
- role: galaxyproject.proftpd
================================================
FILE: galaxy/ansible/provision.yml
================================================
---
- import_playbook: gravity.yml
when: galaxy_gravity | bool
tags: galaxy_gravity
- import_playbook: postgresql.yml
when: galaxy_postgres | bool
tags: galaxy_postgres
- import_playbook: nginx.yml
when: galaxy_nginx | bool
tags: galaxy_nginx
- import_playbook: proftpd.yml
when: galaxy_proftpd | bool
tags: galaxy_proftpd
- import_playbook: slurm.yml
when: galaxy_slurm | bool
tags: galaxy_slurm
- import_playbook: condor.yml
when: galaxy_condor | bool
tags: galaxy_condor
- import_playbook: pbs.yml
when: galaxy_pbs | bool
tags: galaxy_pbs
- import_playbook: k8s.yml
when: galaxy_k8s_jobs | bool
tags: galaxy_k8s_jobs
- import_playbook: cvmfs_client.yml
when: galaxy_cvmfs_client | bool
tags: galaxy_cvmfs_client
- import_playbook: rabbitmq.yml
when: galaxy_rabbitmq | bool
tags: galaxy_rabbitmq
- import_playbook: redis.yml
when: galaxy_redis | bool
tags: galaxy_redis
# - import_playbook: flower.yml
# when: galaxy_flower | bool
# tags: galaxy_flower
- import_playbook: tusd.yml
when: galaxy_tusd | bool
tags: galaxy_tusd
- import_playbook: docker.yml
when: galaxy_docker | bool
tags: galaxy_docker
- import_playbook: supervisor.yml
when: galaxy_supervisor | bool
tags: galaxy_supervisor
- import_playbook: galaxy_scripts.yml
when: galaxy_scripts | bool
tags: galaxy_scripts
- import_playbook: galaxy_job_conf.yml
when: galaxy_job_conf | bool
tags: galaxy_job_conf
- import_playbook: galaxy_job_metrics.yml
when: galaxy_job_metrics | bool
tags: galaxy_job_metrics
- import_playbook: galaxy_file_source_templates.yml
when: galaxy_file_source_templates | bool
tags: galaxy_file_source_templates
- import_playbook: galaxy_object_store_templates.yml
when: galaxy_object_store_templates | bool
tags: galaxy_object_store_templates
- import_playbook: galaxy_vault_config.yml
when: galaxy_vault_config | bool
tags: galaxy_vault_config
================================================
FILE: galaxy/ansible/rabbitmq.yml
================================================
- hosts: localhost
connection: local
remote_user: root
vars:
rabbitmq_keyring_path: /usr/share/keyrings/com.rabbitmq.team.gpg
rabbitmq_repo_list_path: /etc/apt/sources.list.d/rabbitmq.list
rabbitmq_version: 4.2.2-1
rabbitmq_erlang_packages:
- erlang-base
- erlang-asn1
- erlang-crypto
- erlang-eldap
- erlang-ftp
- erlang-inets
- erlang-mnesia
- erlang-os-mon
- erlang-parsetools
- erlang-public-key
- erlang-runtime-tools
- erlang-snmp
- erlang-ssl
- erlang-syntax-tools
- erlang-tftp
- erlang-tools
- erlang-xmerl
tasks:
- name: Install RabbitMQ repository prerequisites
apt:
name:
- curl
- gnupg
- apt-transport-https
state: present
update_cache: true
- name: Add RabbitMQ signing key
shell: |
curl -1sLf "https://keys.openpgp.org/vks/v1/by-fingerprint/0A9AF2115F4687BD29803A206B73A36E6026DFCA" | gpg --dearmor -o {{ rabbitmq_keyring_path }}
args:
creates: "{{ rabbitmq_keyring_path }}"
- name: Configure RabbitMQ apt repositories
copy:
dest: "{{ rabbitmq_repo_list_path }}"
content: |
## Modern Erlang/OTP releases
deb [arch=amd64 signed-by={{ rabbitmq_keyring_path }}] https://deb1.rabbitmq.com/rabbitmq-erlang/ubuntu/noble noble main
deb [arch=amd64 signed-by={{ rabbitmq_keyring_path }}] https://deb2.rabbitmq.com/rabbitmq-erlang/ubuntu/noble noble main
## Latest RabbitMQ releases
deb [arch=amd64 signed-by={{ rabbitmq_keyring_path }}] https://deb1.rabbitmq.com/rabbitmq-server/ubuntu/noble noble main
deb [arch=amd64 signed-by={{ rabbitmq_keyring_path }}] https://deb2.rabbitmq.com/rabbitmq-server/ubuntu/noble noble main
- name: Install Erlang packages
apt:
name: "{{ rabbitmq_erlang_packages }}"
state: present
update_cache: true
- name: Install RabbitMQ server
apt:
name: "rabbitmq-server={{ rabbitmq_version }}"
state: present
update_cache: true
- name: Enable rabbitmq management plugin
rabbitmq_plugin:
name: rabbitmq_management
broker_state: offline
state: enabled
- name: Copy startup script for rabbitmq
template: src=rabbitmq.sh.j2 dest=/usr/local/bin/rabbitmq.sh
- name: Install rabbitmq users configuration script
template: src=configure_rabbitmq_users.yml.j2 dest=/usr/local/bin/configure_rabbitmq_users.yml
- name: Purge systemd and perform cleanup
shell: apt purge -y systemd && apt-get autoremove -y && apt-get clean
================================================
FILE: galaxy/ansible/redis.yml
================================================
- hosts: localhost
connection: local
remote_user: root
roles:
- role: geerlingguy.redis
tasks:
- name: Set daemonize as no in redis config
lineinfile:
path: /etc/redis/redis.conf
regexp: '^daemonize'
line: 'daemonize no'
state: "{{ galaxy_apt_package_state }}"
- name: Install redis python package for galaxy
pip:
name: "redis"
virtualenv: "{{ redis_venv_dir }}"
virtualenv_command: "{{ pip_virtualenv_command | default( 'virtualenv' ) }}"
extra_args: --index-url https://wheels.galaxyproject.org/simple --extra-index-url https://pypi.python.org/simple
become: True
become_user: "{{ redis_venv_user }}"
================================================
FILE: galaxy/ansible/requirements.yml
================================================
---
roles:
- name: galaxyproject.postgresql
version: 1.1.8
- name: geerlingguy.docker
version: 7.9.0
- name: usegalaxy_eu.flower
version: 2.1.1
- name: grycap.htcondor
src: https://github.com/usegalaxy-eu/ansible-htcondor-grycap
version: fe15ce1569e93a9d1030350c42d1af79e8c3e905
- name: galaxyproject.proftpd
version: 0.3.3
- name: geerlingguy.rabbitmq
src: https://github.com/geerlingguy/ansible-role-rabbitmq
version: 3.0.0
- name: geerlingguy.redis
version: 1.9.1
- name: galaxyproject.repos
version: 0.0.3
- name: galaxyproject.slurm
version: 1.0.5
- name: galaxyproject.tusd
src: https://github.com/galaxyproject/ansible-role-tusd
version: e009b498a7989d8002c6a5d104176295d63e9fae
- name: galaxyproject.nginx
version: 1.0.0
- name: usegalaxy_eu.certbot
version: 0.1.13
- name: galaxyproject.self_signed_certs
version: 0.0.4
================================================
FILE: galaxy/ansible/slurm.yml
================================================
- hosts: localhost
connection: local
remote_user: root
vars:
slurm_roles: ['controller', 'exec']
slurm_config:
SlurmctldHost: localhost
SlurmUser: '{{ galaxy_user_name }}'
SelectType: select/cons_tres
SelectTypeParameters: CR_Core_Memory
StateSaveLocation: /tmp/slurm
ReturnToService: 1
roles:
- role: galaxyproject.slurm
# - role: galaxyproject.repos
tasks:
- name: Ensure slurm-drmaa library path exists
file:
path: /usr/lib/slurm-drmaa/lib
state: directory
- name: Ensure slurm-drmaa symlink exists
file:
src: /usr/lib/slurm-drmaa/lib/libdrmaa.so.1
dest: /usr/lib/slurm-drmaa/lib/libdrmaa.so
state: link
force: true
- name: Setup tmp area for slurm
file: path=/tmp/slurm state=directory owner={{ galaxy_user_name }} group={{ galaxy_user_name }}
- name: Add script to update slurm configuration file
template: src=configure_slurm.py.j2 dest=/usr/sbin/configure_slurm.py mode=0755
- name: Setup Munge permissions and folder
file: path={{ item }} state=directory owner=root group=root recurse=yes
with_items:
- /var/run/munge
- /var/lib/munge
- /var/log/munge
- /var/run/munge
- /etc/munge
# If job_conf.xml is installed before running galaxyprojectdotorg.galaxy, this would already be installed.
- name: Fetch DRMAA wheel for Galaxy
pip:
name: "drmaa"
extra_args: "--index-url https://wheels.galaxyproject.org/simple/ --extra-index-url https://pypi.python.org/simple"
virtualenv: "{{ galaxy_venv_dir }}"
environment:
PYTHOPATH: null
VIRTUAL_ENV: "{{ galaxy_venv_dir }}"
become_user: "{{ galaxy_user_name }}"
================================================
FILE: galaxy/ansible/supervisor.yml
================================================
- hosts: localhost
connection: local
remote_user: root
tasks:
- name: Install supervisor package
apt:
state: "{{ galaxy_apt_package_state }}"
name: supervisor
- name: Install cron
apt:
state: "{{ galaxy_apt_package_state }}"
name: cron
when: supervisor_manage_cron | bool
- name: Create Galaxy configuration file
template: src=supervisor.conf.j2 dest={{ supervisor_conf_path }}
- name: Stop supervisor
service: name=supervisor state=stopped
- name: Stop and remove munge.
service: name={{ item }} state=stopped enabled=no
with_items:
- munge
when: supervisor_manage_slurm | bool
- name: Stop and remove slurm.
service: name={{ item }} state=stopped enabled=no
with_items:
- slurmd
- slurmctld
when: supervisor_manage_slurm | bool
- name: Stop and remove postgresql.
service: name={{ item }} state=stopped enabled=no
with_items:
- postgresql
when: supervisor_manage_postgres | bool
- name: Stop and remove proftpd.
service: name={{ item }} state=stopped enabled=no
with_items:
- proftpd
when: supervisor_manage_proftp | bool
- name: Stop and remove nginx.
service: name={{ item }} state=stopped enabled=no
with_items:
- nginx
when: supervisor_manage_nginx | bool
- name: Stop and remove rabbitmq.
service: name={{ item }} state=stopped enabled=no
with_items:
- rabbitmq-server
when: supervisor_manage_rabbitmq | bool
- name: Stop and remove redis.
service: name={{ item }} state=stopped enabled=no
with_items:
- redis-server
when: supervisor_manage_redis | bool
- name: Purge systemd and perform cleanup
shell: apt purge -y systemd && apt-get autoremove -y && apt-get clean
- name: Start supervisor
service: name=supervisor state=started
================================================
FILE: galaxy/ansible/templates/add_tool_shed.py.j2
================================================
#!/usr/bin/env python
import os
import argparse
import xml.etree.ElementTree as ET
TOOL_SHEDS_XML = os.path.join(os.environ['GALAXY_ROOT_DIR'], "config/tool_sheds_conf.xml")
TOOL_SHEDS_XML_SAMPLE = TOOL_SHEDS_XML + '.sample'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Add new Tool Shed to Galaxy.')
parser.add_argument('-n', '--name', help='Tool Shed name that is displayed in the admin menue')
parser.add_argument('-u', '--url', help='Tool Shed URL')
args = parser.parse_args()
ts = ET.Element('tool_shed')
ts.set('name', args.name)
ts.set('url', args.url)
if os.path.exists( TOOL_SHEDS_XML ):
tree = ET.parse( TOOL_SHEDS_XML )
else:
tree = ET.parse( TOOL_SHEDS_XML_SAMPLE )
root = tree.getroot()
root.append( ts )
tree.write( TOOL_SHEDS_XML )
================================================
FILE: galaxy/ansible/templates/cgroupfs_mount.sh.j2
================================================
#!/bin/sh
set -e
# Get the latest version of this script from https://github.com/moby/moby/blob/65cfcc28ab37cb75e1560e4b4738719c07c6618e/hack/dind
# DinD: a wrapper script which allows docker to be run inside a docker container.
# Original version by Jerome Petazzoni
# See the blog post: https://www.docker.com/blog/docker-can-now-run-within-docker/
#
# This script should be executed inside a docker container in privileged mode
# ('docker run --privileged', introduced in docker 0.6).
# Usage: dind CMD [ARG...]
# apparmor sucks and Docker needs to know that it's in a container (c) @tianon
#
# Set the container env-var, so that AppArmor is enabled in the daemon and
# containerd when running docker-in-docker.
#
# see: https://github.com/containerd/containerd/blob/787943dc1027a67f3b52631e084db0d4a6be2ccc/pkg/apparmor/apparmor_linux.go#L29-L45
# see: https://github.com/moby/moby/commit/de191e86321f7d3136ff42ff75826b8107399497
export container=docker
# Allow AppArmor to work inside the container;
#
# aa-status
# apparmor filesystem is not mounted.
# apparmor module is loaded.
#
# mount -t securityfs none /sys/kernel/security
#
# aa-status
# apparmor module is loaded.
# 30 profiles are loaded.
# 30 profiles are in enforce mode.
# /snap/snapd/18357/usr/lib/snapd/snap-confine
# ...
#
# Note: https://0xn3va.gitbook.io/cheat-sheets/container/escaping/sensitive-mounts#sys-kernel-security
#
# ## /sys/kernel/security
#
# In /sys/kernel/security mounted the securityfs interface, which allows
# configuration of Linux Security Modules. This allows configuration of
# AppArmor policies, and so access to this may allow a container to disable
# its MAC system.
#
# Given that we're running privileged already, this should not be an issue.
if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
mount -t securityfs none /sys/kernel/security || {
echo >&2 'Could not mount /sys/kernel/security.'
echo >&2 'AppArmor detection and --privileged mode might break.'
}
fi
# Mount /tmp (conditionally)
if ! mountpoint -q /tmp; then
mount -t tmpfs none /tmp
fi
# cgroup v2: enable nesting
if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
# move the processes from the root group to the /init group,
# otherwise writing subtree_control fails with EBUSY.
# An error during moving non-existent process (i.e., "cat") is ignored.
mkdir -p /sys/fs/cgroup/init
xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
# enable controllers
sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
> /sys/fs/cgroup/cgroup.subtree_control
fi
# Change mount propagation to shared to make the environment more similar to a
# modern Linux system, e.g. with SystemD as PID 1.
mount --make-rshared /
if [ $# -gt 0 ]; then
exec "$@"
fi
echo >&2 'ERROR: No command specified.'
echo >&2 'You probably want to run hack/make.sh, or maybe a shell?'
================================================
FILE: galaxy/ansible/templates/check_database.py.j2
================================================
#!/usr/bin/env python
# This script checks if the database is connected by querying an user
import sys
sys.path.insert(1,'{{ galaxy_server_dir }}')
sys.path.insert(1,'{{ galaxy_server_dir }}/lib')
from galaxy.model import User
from galaxy.model.mapping import init
from galaxy.model.orm.scripts import get_config
import argparse
__author__ = "Lukas Voegtle"
__email__ = "voegtlel@tf.uni-freiburg.de"
if __name__ == "__main__":
db_url = get_config(sys.argv)['db_url']
mapping = init('/tmp/', db_url)
sa_session = mapping.context
security_agent = mapping.security_agent
# Just query something
query = sa_session.query(User).filter_by(email="admin@example.org")
query.count()
================================================
FILE: galaxy/ansible/templates/configure_rabbitmq_users.yml.j2
================================================
---
- hosts: localhost
connection: local
become: yes
tasks:
- name: Delete 'guest' user
rabbitmq_user:
name: guest
state: absent
- name: Add 'admin' user
rabbitmq_user:
user: {{ rabbitmq_admin_username }}
password: {{ rabbitmq_admin_password }}
vhost: /
configure_priv: .*
read_priv: .*
write_priv: .*
tags: administrator
state: present
- name: Add vhost for galaxy
rabbitmq_vhost:
vhost: {{ rabbitmq_galaxy_vhost }}
state: present
- name: Add 'galaxy' user
rabbitmq_user:
user: {{ rabbitmq_galaxy_username }}
password: {{ rabbitmq_galaxy_password }}
vhost: {{ rabbitmq_galaxy_vhost }}
configure_priv: .*
read_priv: .*
write_priv: .*
state: present
- name: Add 'flower' user
rabbitmq_user:
user: {{ rabbitmq_flower_username }}
password: {{ rabbitmq_flower_password }}
vhost: {{ rabbitmq_galaxy_vhost }}
configure_priv: .*
read_priv: .*
write_priv: .*
tags: administrator
state: present
================================================
FILE: galaxy/ansible/templates/configure_slurm.py.j2
================================================
from socket import gethostname
from os import environ
import subprocess
import json
CONFIG_FILE_PATH = "/etc/slurm/slurm.conf"
ENV_MAP = {
"CPUs": "SLURM_CPUS",
"RealMemory": "SLURM_MEMORY",
"Boards": "SLURM_BOARDS",
"SocketsPerBoard": "SLURM_SOCKETS_PER_BOARD",
"CoresPerSocket": "SLURM_CORES_PER_SOCKET",
"ThreadsPerCore": "SLURM_THREADS_PER_CORE",
}
FORCED_KV = {
"ProctrackType": "proctrack/pgid",
"TaskPlugin": "task/none",
"JobAcctGatherType": "jobacct_gather/none",
"MpiDefault": "none",
}
def _as_int(value):
try:
return int(str(value).split()[0])
except (TypeError, ValueError):
return None
def _slurmd_status():
try:
output = subprocess.check_output(["slurmd", "-C"], stderr=subprocess.DEVNULL).decode("utf-8")
except Exception:
return {}
info = {}
for chunk in output.split():
if "=" in chunk:
key, value = chunk.split("=", 1)
info[key] = value
return info
def _lscpu_status():
try:
output = subprocess.check_output(["lscpu", "-J"], stderr=subprocess.DEVNULL).decode("utf-8")
data = json.loads(output)
except Exception:
return {}
fields = {}
for entry in data.get("lscpu", []):
field = entry.get("field", "").strip().strip(":")
fields[field] = entry.get("data")
cpus = _as_int(fields.get("CPU(s)"))
sockets = _as_int(fields.get("Socket(s)"))
cores = _as_int(fields.get("Core(s) per socket"))
threads = _as_int(fields.get("Thread(s) per core"))
info = {}
if cpus is not None:
info["CPUs"] = str(cpus)
if sockets is not None:
info["SocketsPerBoard"] = str(sockets)
if cores is not None:
info["CoresPerSocket"] = str(cores)
if threads is not None:
info["ThreadsPerCore"] = str(threads)
info.setdefault("Boards", "1")
return info
def _real_memory_mb():
try:
with open("/proc/meminfo", "r") as handle:
for line in handle:
if line.startswith("MemTotal:"):
parts = line.split()
if len(parts) >= 2:
return int(int(parts[1]) / 1024)
except Exception:
return None
return None
def main():
dict_status = _slurmd_status()
for key, value in _lscpu_status().items():
dict_status.setdefault(key, value)
if "RealMemory" not in dict_status:
real_memory = _real_memory_mb()
if real_memory is not None:
dict_status["RealMemory"] = str(real_memory)
cpus = dict_status.get('CPUs')
memory = dict_status.get('RealMemory')
mem_per_cpu = None
if cpus and memory:
mem_per_cpu = int(int(memory) / int(cpus))
# Define variables based on environment or default values
hostname = gethostname()
template_params = {
"SlurmctldHost": environ.get('SLURMCTLD_HOST', hostname),
"ClusterName": environ.get('SLURM_CLUSTER_NAME', 'cluster'),
"SlurmUser": environ.get('SLURM_USER_NAME', '{{ galaxy_user_name }}'),
}
# Construct NodeName and PartitionName lines
node_parts = [f"NodeName={hostname}", "State=UNKNOWN"]
for key in ("CPUs", "Boards", "SocketsPerBoard", "CoresPerSocket", "ThreadsPerCore", "RealMemory"):
env_key = ENV_MAP.get(key)
value = environ.get(env_key) if env_key else None
if value is None:
value = dict_status.get(key)
if value is not None:
node_parts.append(f"{key}={value}")
node_line = " ".join(node_parts)
partition_line = f"PartitionName={environ.get('SLURM_PARTITION_NAME', 'debug')} Default=YES Nodes={hostname} " \
f"MaxTime=INFINITE State=UP Shared=YES"
if mem_per_cpu is not None:
partition_line += f" DefMemPerCPU={environ.get('SLURM_MEMORY_PER_CPU', mem_per_cpu)}"
with open(CONFIG_FILE_PATH, 'r') as file:
lines = file.readlines()
# Updated lines with replacements
updated_lines = []
found_keys = set()
for line in lines:
stripped_line = line.strip()
# Update lines based on key-value matching
if stripped_line.startswith("NodeName="):
updated_lines.append(node_line + "\n")
elif stripped_line.startswith("PartitionName="):
updated_lines.append(partition_line + "\n")
else:
# Update specific key-values based on template_params
updated = False
for key, value in template_params.items():
if stripped_line.startswith(f"{key}="):
updated_lines.append(f"{key}={value}\n")
found_keys.add(key)
updated = True
break
if not updated:
for key, value in FORCED_KV.items():
if stripped_line.startswith(f"{key}="):
updated_lines.append(f"{key}={value}\n")
found_keys.add(key)
updated = True
break
if not updated:
# Keep the line as-is if no match
updated_lines.append(line)
for key, value in template_params.items():
if key not in found_keys:
updated_lines.append(f"{key}={value}\n")
for key, value in FORCED_KV.items():
if key not in found_keys:
updated_lines.append(f"{key}={value}\n")
with open(CONFIG_FILE_PATH, 'w') as file:
file.writelines(updated_lines)
# Slurm 24.11 supports disabling cgroups, avoiding systemd/cgroup requirements in containers.
with open("/etc/slurm/cgroup.conf", "w") as file:
file.write("CgroupPlugin=disabled\n")
if __name__ == "__main__":
main()
================================================
FILE: galaxy/ansible/templates/container_resolvers_conf.yml.j2
================================================
{% if container_resolution_explicit %}
- type: explicit
{% endif %}
{% if container_resolution_cached_mulled %}
- type: cached_mulled
- type: cached_mulled_singularity
cache_directory: "/cvmfs/singularity.galaxyproject.org/all"
- type: cached_mulled_singularity
cache_directory: "/export/container_cache/singularity/mulled"
{% endif %}
{% if container_resolution_mulled %}
- type: mulled
namespace: "{{ container_resolution_mulled_namespace }}"
{% endif %}
{% if container_resolution_build_mulled %}
- type: build_mulled
namespace: local
{% endif %}
================================================
FILE: galaxy/ansible/templates/create_galaxy_user.py.j2
================================================
#!/usr/bin/env python
import sys
sys.path.insert(1,'{{ galaxy_server_dir }}')
sys.path.insert(1,'{{ galaxy_server_dir }}/lib')
from galaxy.model import User, APIKeys
from galaxy.model.mapping import init
from galaxy.model.orm.scripts import get_config
import argparse
def add_user(sa_session, security_agent, email, password, key=None, username="admin"):
"""
Add Galaxy User.
From John https://gist.github.com/jmchilton/4475646
"""
query = sa_session.query( User ).filter_by( email=email )
user = None
User.use_pbkdf2 = {{ use_pbkdf2 }}
if query.count() > 0:
user = query.first()
user.username = username
user.set_password_cleartext(password)
sa_session.add(user)
sa_session.flush()
else:
user = User(email)
user.username = username
user.set_password_cleartext(password)
sa_session.add(user)
sa_session.flush()
security_agent.create_private_user_role( user )
if not user.default_permissions:
security_agent.user_set_default_permissions( user, history=True, dataset=True )
if key is not None:
query = sa_session.query( APIKeys ).filter_by( user_id=user.id ).delete()
sa_session.flush()
api_key = APIKeys()
api_key.user_id = user.id
api_key.key = key
sa_session.add(api_key)
sa_session.flush()
sa_session.commit()
return user
if __name__ == "__main__":
db_url = get_config(sys.argv, use_argparse=False)['db_url']
parser = argparse.ArgumentParser(description='Create Galaxy Admin User.')
parser.add_argument("--user", required=True,
help="Username, it should be an email address.")
parser.add_argument("--password", required=True,
help="Password.")
parser.add_argument("--key", help="API-Key.")
parser.add_argument("--username", default="admin",
help="The public username. Public names must be at least three characters in length and contain only lower-case letters, numbers, and the '-' character.")
parser.add_argument('args', nargs=argparse.REMAINDER)
options = parser.parse_args()
mapping = init('/tmp/', db_url)
sa_session = mapping.context
security_agent = mapping.security_agent
add_user(sa_session, security_agent, options.user, options.password, key=options.key, username=options.username)
================================================
FILE: galaxy/ansible/templates/export_user_files.py.j2
================================================
#!/usr/bin/env python
import fnmatch
import glob
import sys
import os
import re
import hashlib
import shutil
import subprocess
PG_VERSION = os.environ.get('PG_VERSION', '15')
GALAXY_UID = int(os.environ['GALAXY_UID'])
GALAXY_GID = int(os.environ['GALAXY_GID'])
GALAXY_ROOT_DIR = os.environ.get('GALAXY_ROOT_DIR', '/galaxy/')
GALAXY_EXPORT_MARKER_PATH = '/export/.galaxy_export_marker'
if len( sys.argv ) == 2:
PG_DATA_DIR_DEFAULT = sys.argv[1]
else:
PG_DATA_DIR_DEFAULT = f"/var/lib/postgresql/{PG_VERSION}/main"
PG_DATA_DIR_HOST = os.environ.get("PG_DATA_DIR_HOST", f"/export/postgresql/{PG_VERSION}/main/")
def change_path( src ):
"""
src will be copied to /export/`src` and a symlink will be placed in src pointing to /export/
"""
if os.path.exists( src ):
dest = os.path.join( '/export/', src.strip('/') )
# if destination is empty move all files into /export/ and symlink back to source
if not os.path.exists( dest ):
dest_dir = os.path.dirname(dest)
if not os.path.exists( dest_dir ):
os.makedirs(dest_dir)
shutil.move( src, dest )
os.symlink( dest, src.rstrip('/') )
os.chown( src, GALAXY_UID, GALAXY_GID )
subprocess.call( f'chown -R {GALAXY_UID}:{GALAXY_GID} {dest}', shell=True )
# if destination exists (e.g. continuing a previous session), remove source and symlink
else:
if not os.path.realpath( src ) == os.path.realpath( dest ):
stripped_src = src.rstrip('/')
if not os.path.islink( stripped_src ):
if os.path.isdir( stripped_src ):
shutil.rmtree( stripped_src )
else:
os.unlink( stripped_src )
os.symlink( dest, src.rstrip('/') )
def copy_samples(src, dest):
if not os.path.realpath(src) == os.path.realpath(dest):
for filename in os.listdir(src):
if filename.endswith('ml.sample') or filename.endswith('ml.sample_advanced') or filename.endswith('ml.sample_basic'):
distrib_file = os.path.join(src, filename)
export_file = os.path.join(dest, filename)
shutil.copy(distrib_file, export_file)
os.chown(export_file, GALAXY_UID, GALAXY_GID)
def _makedir(path):
if not os.path.exists( path ):
os.makedirs( path )
os.chown(path, GALAXY_UID, GALAXY_GID)
def _ignore_static(dir, *patterns):
def __ignore_static(path, names):
ignored_names = []
if dir in path:
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return __ignore_static
def _read_image_marker():
marker = os.environ.get('GALAXY_EXPORT_MARKER')
if marker:
return marker.strip()
version_py = os.path.join(GALAXY_ROOT_DIR, 'lib', 'galaxy', 'version.py')
if os.path.exists(version_py):
try:
with open(version_py, 'r', encoding='utf-8', errors='ignore') as handle:
text = handle.read()
# Extract __version__ without importing Galaxy modules during startup.
match = re.search(r'__version__\\s*=\\s*[\\\'"]([^\\\'"]+)[\\\'"]', text)
if match:
return f"version:{match.group(1)}"
digest = hashlib.sha256(text.encode('utf-8')).hexdigest()
return f"version_py_sha256:{digest}"
except Exception:
pass
version_file = os.path.join(GALAXY_ROOT_DIR, 'VERSION')
if os.path.exists(version_file):
try:
with open(version_file, 'rb') as handle:
digest = hashlib.sha256(handle.read()).hexdigest()
return f"version_file_sha256:{digest}"
except Exception:
pass
return None
def _should_copy_distribution(marker):
if not os.path.exists('/export/galaxy'):
return True
if not marker:
return True
try:
# Explicit UTF-8 decoding avoids locale-dependent behavior for marker files.
with open(GALAXY_EXPORT_MARKER_PATH, 'r', encoding='utf-8') as handle:
return handle.read().strip() != marker
except OSError:
return True
def _write_marker(marker):
if not marker:
return
tmp_path = f"{GALAXY_EXPORT_MARKER_PATH}.tmp"
with open(tmp_path, 'w', encoding='utf-8') as handle:
handle.write(marker + '\n')
os.replace(tmp_path, GALAXY_EXPORT_MARKER_PATH)
if __name__ == "__main__":
"""
If the '/export/' folder exist, meaning docker was started with '-v /home/foo/bar:/export',
we will link every file that needs to persist to the host system. A marker file at
/export/.galaxy_export_marker is written to indicate the export contents match the image version.
If the user re-starts (with docker start) the container and the marker matches, the linking
is skipped.
"""
marker = _read_image_marker()
if _should_copy_distribution(marker):
galaxy_distrib_paths = {os.path.join(GALAXY_ROOT_DIR, 'config'): '/export/.distribution_config',
os.path.join(GALAXY_ROOT_DIR, 'lib'): '/export/galaxy/lib',
os.path.join(GALAXY_ROOT_DIR, 'tools'): '/export/galaxy/tools'}
for image_path, export_path in galaxy_distrib_paths.items():
if os.path.exists(export_path):
shutil.rmtree(export_path)
# Ignore 2 dead symlinks in galaxy code: see https://github.com/galaxyproject/galaxy/issues/9847
shutil.copytree( image_path, export_path, ignore=_ignore_static(os.path.join(GALAXY_ROOT_DIR, '/lib/galaxy/web/framework/static/style'), 'question-octagon-frame.png', 'ok_small.png') )
shutil.copy(os.path.join(GALAXY_ROOT_DIR, 'requirements.txt'), '/export/galaxy/requirements.txt')
_write_marker(marker)
_makedir('/export/galaxy/')
_makedir('/export/ftp/')
change_path( os.path.join(GALAXY_ROOT_DIR, 'config') )
# Copy all sample config files to config dir
# TODO find a way to update plugins/ without breaking user customizations
config_src = os.path.join(GALAXY_ROOT_DIR, 'config')
config_dest = os.path.join('/export/', GALAXY_ROOT_DIR, 'config')
copy_samples(config_src, config_dest)
# Copy all sample files to tool-data dir
# TODO find a way to update shared/ without breaking user customizations
tool_data_src = os.path.join(GALAXY_ROOT_DIR, 'tool-data')
tool_data_dest = os.path.join('/export/', GALAXY_ROOT_DIR, 'tool-data')
copy_samples(tool_data_src, tool_data_dest)
# TODO find a way to update /export/galaxy/display_applications/ without breaking user customizations
# Copy all files starting with "welcome"
# This enables a flexible start page design.
for filename in os.listdir('/export/'):
if filename.startswith('welcome'):
export_file = os.path.join( '/export/', filename)
image_file = os.path.join('/etc/galaxy/web/', filename)
shutil.copy(export_file, image_file)
os.chown( image_file, GALAXY_UID, GALAXY_GID )
# copy image defaults to config/.docker_sample to base derivatives on,
# and if there is a realized version of these files in the export directory
# replace Galaxy's copy with these. Use symbolic link instead of copying so
# deployer can update and reload Galaxy and changes will be reflected.
for config in [ 'galaxy.yml', 'gravity.yml' ,'job_conf.xml' ]:
image_config = os.path.join('/etc/galaxy/', config)
export_config = os.path.join( '/export/galaxy/config', config )
export_sample = export_config + ".docker_sample"
shutil.copy(image_config, export_sample)
if os.path.exists(export_config):
subprocess.call('ln -s -f %s %s' % (export_config, image_config), shell=True)
# Update Conda version if needed
if os.environ.get('GALAXY_AUTO_UPDATE_CONDA', '0') != 0:
src_conda = '/tool_deps/_conda/'
dest_conda = '/export/tool_deps/_conda/'
if os.path.exists(dest_conda) and os.path.realpath(src_conda) != os.path.realpath(dest_conda):
for subdir in ['bin', 'compiler_compat', 'conda-meta', 'etc', 'include', 'lib', 'share', 'ssl', 'x86_64-conda_cos6-linux-gnu']:
if os.path.exists(os.path.join(dest_conda, subdir)):
shutil.rmtree(os.path.join(dest_conda, subdir))
subprocess.call('cp -p --preserve -R %s %s' % (os.path.join(src_conda, subdir), os.path.join(dest_conda, subdir)), shell=True)
change_path( os.path.join(GALAXY_ROOT_DIR, 'tools.yaml') )
change_path( os.path.join(GALAXY_ROOT_DIR, 'integrated_tool_panel.xml') )
change_path( os.path.join(GALAXY_ROOT_DIR, 'display_applications') )
change_path( os.path.join('/tool_deps') )
change_path( os.path.join(GALAXY_ROOT_DIR, 'tool-data') )
change_path( os.path.join(GALAXY_ROOT_DIR, 'database') )
if os.path.exists('/export/common_htpasswd'):
shutil.copy('/export/common_htpasswd', '/etc/nginx/htpasswd')
try:
change_path('/var/lib/docker/')
except:
# In case of unprivileged access this will result in a "Device or resource busy." error.
pass
if not os.path.exists( PG_DATA_DIR_HOST ) or 'PG_VERSION' not in os.listdir( PG_DATA_DIR_HOST ):
dest_dir = os.path.dirname( PG_DATA_DIR_HOST )
if not os.path.exists( dest_dir ):
os.makedirs(dest_dir)
# User given dbpath, usually a directory from the host machine
# copy the postgresql data folder to the new location
subprocess.call('cp -R %s/* %s' % (PG_DATA_DIR_DEFAULT, PG_DATA_DIR_HOST), shell=True)
os.symlink( os.path.join(os.environ.get('PG_CONF_DIR_DEFAULT'), 'conf.d'), os.path.join(PG_DATA_DIR_HOST, 'conf.d') )
# copytree needs an non-existing dst dir, how annoying :(
# shutil.copytree(PG_DATA_DIR_DEFAULT, PG_DATA_DIR_HOST)
subprocess.call('chown -R postgres:postgres /export/postgresql/', shell=True)
subprocess.call('chmod -R 0755 /export/', shell=True)
subprocess.call('chmod -R 0700 %s' % PG_DATA_DIR_HOST, shell=True)
================================================
FILE: galaxy/ansible/templates/file_source_templates.yml.j2
================================================
- include: "{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_azure.yml"
- include: "{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_ftp.yml"
- include: "{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_s3fs.yml"
- include: "{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_aws_private_bucket.yml"
- include: "{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_aws_public_bucket.yml"
- include: "{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_azure.yml"
- include: "{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_dropbox.yml"
- include: "{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_google_drive.yml"
- include: "{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_webdav.yml"
- include: "{{ galaxy_config_dir }}/production_b2drop.yml"
================================================
FILE: galaxy/ansible/templates/gravity.yml.j2
================================================
# Configuration for Gravity process manager.
gravity:
# Process manager to use.
# ``supervisor`` is the default process manager when Gravity is invoked as a non-root user.
# ``systemd`` is the default when Gravity is invoked as root.
# Valid options are: supervisor, systemd
process_manager: {{ gravity_process_manager }}
# What command to write to the process manager configs
# `gravity` (`galaxyctl exec `) is the default
# `direct` (each service's actual command) is also supported.
# Valid options are: gravity, direct
# service_command_style: gravity
# Use the process manager's *service instance* functionality for services that can run multiple instances.
# Presently this includes services like gunicorn and Galaxy dynamic job handlers. Service instances are only supported if
# ``service_command_style`` is ``gravity``, and so this option is automatically set to ``false`` if
# ``service_command_style`` is set to ``direct``.
# use_service_instances: true
# umask under which services should be executed. Setting ``umask`` on an individual service overrides this value.
# umask: '022'
# Memory limit (in GB), processes exceeding the limit will be killed. Default is no limit. If set, this is default value
# for all services. Setting ``memory_limit`` on an individual service overrides this value. Ignored if ``process_manager``
# is ``supervisor``.
# memory_limit:
# Specify Galaxy config file (galaxy.yml), if the Gravity config is separate from the Galaxy config. Assumed to be the
# same file as the Gravity config if a ``galaxy`` key exists at the root level, otherwise, this option is required.
galaxy_config_file: {{ galaxy_config_file }}
# Specify Galaxy's root directory.
# Gravity will attempt to find the root directory, but you can set the directory explicitly with this option.
galaxy_root: {{ galaxy_server_dir }}
# User to run Galaxy as, required when using the systemd process manager as root.
# Ignored if ``process_manager`` is ``supervisor`` or user-mode (non-root) ``systemd``.
galaxy_user: {{ galaxy_user_name }}
# Group to run Galaxy as, optional when using the systemd process manager as root.
# Ignored if ``process_manager`` is ``supervisor`` or user-mode (non-root) ``systemd``.
# galaxy_group:
# Set to a directory that should contain log files for the processes controlled by Gravity.
# If not specified defaults to ``/gravity/log``.
log_dir: {{ galaxy_logs_dir }}
# Set to Galaxy's virtualenv directory.
# If not specified, Gravity assumes all processes are on PATH. This option is required in most circumstances when using
# the ``systemd`` process manager.
virtualenv: {{ galaxy_venv_dir }}
# Select the application server.
# ``gunicorn`` is the default application server.
# ``unicornherder`` is a production-oriented manager for (G)unicorn servers that automates zero-downtime Galaxy server restarts,
# similar to uWSGI Zerg Mode used in the past.
# Valid options are: gunicorn, unicornherder
# app_server: gunicorn
# Override the default instance name.
# this is hidden from you when running a single instance.
# instance_name: _default_
# Configuration for Gunicorn. Can be a list to run multiple gunicorns for rolling restarts.
gunicorn:
# Enable Galaxy gunicorn server.
enable: {{ galaxy_gunicorn }}
# The socket to bind. A string of the form: ``HOST``, ``HOST:PORT``, ``unix:PATH``, ``fd://FD``. An IP is a valid HOST.
bind: "127.0.0.1:{{ gunicorn_port }}"
# Controls the number of Galaxy application processes Gunicorn will spawn.
# Increased web performance can be attained by increasing this value.
# If Gunicorn is the only application on the server, a good starting value is the number of CPUs * 2 + 1.
# 4-12 workers should be able to handle hundreds if not thousands of requests per second.
workers: {{ gunicorn_workers }}
# Gunicorn workers silent for more than this many seconds are killed and restarted.
# Value is a positive number or 0. Setting it to 0 has the effect of infinite timeouts by disabling timeouts for all workers entirely.
# If you disable the ``preload`` option workers need to have finished booting within the timeout.
# timeout: 300
# Extra arguments to pass to Gunicorn command line.
# extra_args:
# Use Gunicorn's --preload option to fork workers after loading the Galaxy Application.
# Consumes less memory when multiple processes are configured. Default is ``false`` if using unicornherder, else ``true``.
# preload:
# umask under which service should be executed
# umask:
# Value of supervisor startsecs, systemd TimeoutStartSec
# start_timeout: 15
# Value of supervisor stopwaitsecs, systemd TimeoutStopSec
# stop_timeout: 65
# Amount of time to wait for a server to become alive when performing rolling restarts.
# restart_timeout: 300
# Memory limit (in GB). If the service exceeds the limit, it will be killed. Default is no limit or the value of the
# ``memory_limit`` setting at the top level of the Gravity configuration, if set. Ignored if ``process_manager`` is
# ``supervisor``.
# memory_limit:
# Extra environment variables and their values to set when running the service. A dictionary where keys are the variable
# names.
# environment: {}
# Configuration for Celery Processes.
celery:
# Enable Celery distributed task queue.
enable: {{ galaxy_celery }}
# Enable Celery Beat periodic task runner.
enable_beat: {{ galaxy_celery_beat }}
# Number of Celery Workers to start.
concurrency: {{ celery_workers }}
# Log Level to use for Celery Worker.
# Valid options are: DEBUG, INFO, WARNING, ERROR
# loglevel: DEBUG
# Queues to join
# queues: celery,galaxy.internal,galaxy.external
# Pool implementation
# Valid options are: prefork, eventlet, gevent, solo, processes, threads
# pool: threads
# Extra arguments to pass to Celery command line.
# extra_args:
# umask under which service should be executed
# umask:
# Value of supervisor startsecs, systemd TimeoutStartSec
# start_timeout: 10
# Value of supervisor stopwaitsecs, systemd TimeoutStopSec
# stop_timeout: 10
# Memory limit (in GB). If the service exceeds the limit, it will be killed. Default is no limit or the value of the
# ``memory_limit`` setting at the top level of the Gravity configuration, if set. Ignored if ``process_manager`` is
# ``supervisor``.
# memory_limit:
# Extra environment variables and their values to set when running the service. A dictionary where keys are the variable
# names.
# environment: {}
# Configuration for gx-it-proxy.
gx_it_proxy:
# Set to true to start gx-it-proxy
enable: {{ galaxy_gx_it_proxy }}
# gx-it-proxy version
version: '{{ gx_it_proxy_version }}'
# Public-facing IP of the proxy
ip: 127.0.0.1
# Public-facing port of the proxy
port: {{ gx_it_proxy_port }}
# Routes file to monitor.
# Should be set to the same path as ``interactivetools_map`` in the ``galaxy:`` section. This is ignored if
# ``interactivetools_map is set``.
sessions: {{ gx_it_proxy_sessions_path }}
# Include verbose messages in gx-it-proxy
# verbose: true
# Forward all requests to IP.
# This is an advanced option that is only needed when proxying to remote interactive tool container that cannot be reached through the local network.
# forward_ip:
# Forward all requests to port.
# This is an advanced option that is only needed when proxying to remote interactive tool container that cannot be reached through the local network.
# forward_port:
# Rewrite location blocks with proxy port.
# This is an advanced option that is only needed when proxying to remote interactive tool container that cannot be reached through the local network.
# reverse_proxy: false
# umask under which service should be executed
# umask:
# Value of supervisor startsecs, systemd TimeoutStartSec
# start_timeout: 10
# Value of supervisor stopwaitsecs, systemd TimeoutStopSec
# stop_timeout: 10
# Memory limit (in GB). If the service exceeds the limit, it will be killed. Default is no limit or the value of the
# ``memory_limit`` setting at the top level of the Gravity configuration, if set. Ignored if ``process_manager`` is
# ``supervisor``.
# memory_limit:
# Extra environment variables and their values to set when running the service. A dictionary where keys are the variable
# names.
# environment: {}
# Configuration for tusd server (https://github.com/tus/tusd).
# The ``tusd`` binary must be installed manually and made available on PATH (e.g in galaxy's .venv/bin directory).
tusd:
# Enable tusd server.
# If enabled, you also need to set up your proxy as outlined in https://docs.galaxyproject.org/en/latest/admin/nginx.html#receiving-files-via-the-tus-protocol.
enable: {{ galaxy_tusd }}
# Path to tusd binary
tusd_path: {{ tusd_path }}
# Host to bind the tusd server to
host: 127.0.0.1
# Port to bind the tusd server to
port: {{ tusd_port }}
# Directory to store uploads in.
# Must match ``tus_upload_store`` setting in ``galaxy:`` section.
upload_dir: {{ tus_upload_store_path }}
# Value of tusd -hooks-httpd option
#
# the default of is suitable for using tusd for Galaxy uploads and should not be changed unless you are using tusd for
# other purposes such as Pulsar staging.
#
# The value of galaxy_infrastructure_url is automatically prepended if the option starts with a `/`
# hooks_http: /api/upload/hooks
# Comma-separated string of enabled tusd hooks.
#
# Leave at the default value to require authorization at upload creation time.
# This means Galaxy's web process does not need to be running after creating the initial
# upload request.
#
# Set to empty string to disable all authorization. This means data can be uploaded (but not processed)
# without the Galaxy web process being available.
#
# You can find a list of available hooks at https://github.com/tus/tusd/blob/master/docs/hooks.md#list-of-available-hooks.
# hooks_enabled_events: pre-create
# Extra arguments to pass to tusd command line.
extra_args: -behind-proxy -base-path {{ tusd_base_path }}
# umask under which service should be executed
# umask:
# Value of supervisor startsecs, systemd TimeoutStartSec
# start_timeout: 10
# Value of supervisor stopwaitsecs, systemd TimeoutStopSec
# stop_timeout: 10
# Memory limit (in GB). If the service exceeds the limit, it will be killed. Default is no limit or the value of the
# ``memory_limit`` setting at the top level of the Gravity configuration, if set. Ignored if ``process_manager`` is
# ``supervisor``.
# memory_limit:
# Extra environment variables and their values to set when running the service. A dictionary where keys are the variable
# names.
# environment: {}
# Configure dynamic handlers in this section.
# See https://docs.galaxyproject.org/en/latest/admin/scaling.html#dynamically-defined-handlers for details.
{% if not galaxy_dynamic_handlers %}
handlers: {}
{% else %}
handlers:
handler:
processes: {{ galaxy_handler_processes }}
pools:
- job-handlers
- workflow-schedulers
{% endif %}
================================================
FILE: galaxy/ansible/templates/job_conf.xml.j2
================================================
{% import "macros.xml.j2" as macros with context %}
{% if galaxy_slurm %}
/usr/lib/slurm-drmaa/lib/libdrmaa.so
true
{% endif %}
{% if galaxy_condor %}
true
{% endif %}
{% if galaxy_pbs %}
/usr/lib/pbs-drmaa/lib/libdrmaa.so.1
true
{% endif %}
{% if galaxy_k8s_jobs %}
{{ galaxy_k8s_jobs_use_service_account }}
{{ galaxy_k8s_jobs_persistent_volume_claims }}
{{ galaxy_k8s_jobs_namespace }}
{{ galaxy_k8s_jobs_supplemental_group_id }}
{{ galaxy_k8s_jobs_fs_group_id }}
{{ galaxy_k8s_jobs_pull_policy }}
true
{% endif %}
{% if not galaxy_dynamic_handlers %}
{% if galaxy_handler_processes == 0 %}
{% else %}
{% for i in range(galaxy_handler_processes) %}
{% endfor %}
{% endif %}
{% else %}
{% endif %}
docker_dispatch
{{ galaxy_destination_docker_default }}
{{ galaxy_destination_default }}
{% call macros.destination("local_no_container", "local") %}{% endcall %}
{% call macros.destination("local_docker", "local", container_type="docker") %}{% endcall %}
{% call macros.destination("local_force_docker", "local", container_type="docker", force_container=True) %}{% endcall %}
{% if galaxy_pbs %}
{% call macros.destination("pbs_cluster", "pbs") %}{% endcall %}
{% call macros.destination("pbs_cluster_docker", "pbs", container_type="docker") %}{% endcall %}
{% call macros.destination("pbs_cluster_force_docker", "pbs", container_type="docker", force_container=True) %}{% endcall %}
{% endif %}
{% if galaxy_slurm %}
{% call macros.destination("slurm_cluster", "slurm") %}
--ntasks={{ galaxy_slurm_ntask | string }} --share
{% endcall %}
{% call macros.destination("slurm_cluster_docker", "slurm", container_type="docker") %}
--ntasks={{ galaxy_slurm_ntask | string }} --share
{% endcall %}
{% call macros.destination("slurm_cluster_force_docker", "slurm", container_type="docker", force_container=True) %}
--ntasks={{ galaxy_slurm_ntask | string }} --share
{% endcall %}
{% call macros.destination("slurm_cluster_singularity", "slurm", container_type="singularity") %}
--ntasks={{ galaxy_slurm_ntask | string }} --share
{% endcall %}
{% endif %}
{% if galaxy_condor %}
{% call macros.destination("condor_cluster", "condor") %}
vanilla
{% endcall %}
{% call macros.destination("condor_cluster_docker", "condor", container_type="docker") %}
vanilla
{% endcall %}
{% call macros.destination("condor_cluster_force_docker", "condor", container_type="docker", force_container=True) %}
vanilla
{% endcall %}
{% call macros.destination("condor_docker_universe", "condor", container_type="docker", force_container=True) %}
docker
{% endcall %}
{{ macros.docker_dispatch_destination("condor_docker_cluster_dispatch", "condor_cluster_docker", "condor_cluster")}}
{{ macros.docker_dispatch_destination("condor_docker_universe_dispatch", "condor_docker_universe", "condor_cluster")}}
{% endif %}
{% if galaxy_k8s_jobs %}
{% call macros.destination("k8s_default", "k8s", container_type="docker", force_container=True) %}{% endcall %}
{{ macros.docker_dispatch_destination("k8s_or_local_dispatch", "k8s_default", "local_no_container")}}
{{ macros.docker_dispatch_destination("k8s_or_slurm_dispatch", "k8s_default", "slurm_cluster")}}
{{ macros.docker_dispatch_destination("k8s_or_condor_dispatch", "k8s_default", "condor_cluster")}}
{% endif %}
================================================
FILE: galaxy/ansible/templates/job_metrics_conf.yml.j2
================================================
{% if galaxy_job_metrics_core %}
- type: core
{% endif %}
{% if galaxy_job_metrics_cpuinfo and galaxy_job_metrics_cpuinfo == "verbose" %}
- type: cpuinfo
verbose: false
{% elif galaxy_job_metrics_cpuinfo %}
- type: cpuinfo
{% endif %}
{% if galaxy_job_metrics_meminfo %}
- type: meminfo
{% endif %}
{% if galaxy_job_metrics_uname %}
- type: uname
{% endif %}
{% if galaxy_job_metrics_env %}
- type: env
{% endif %}
{% if galaxy_job_metrics_hostname %}
- type: hostname
{% endif %}
{% if galaxy_job_metrics_cgroup %}
- type: cgroup
{% endif %}
================================================
FILE: galaxy/ansible/templates/macros.xml.j2
================================================
{% macro destination(id, runner, container_type=None, force_container=False) -%}
{% if galaxy_source_shellrc %}
{% endif %}
true
{% if container_type == 'docker' %}
true
{{ galaxy_docker_sudo | string }}
{{ galaxy_docker_volumes_from }}
{{ galaxy_docker_volumes }}
{{ galaxy_docker_net }}
{{ galaxy_docker_auto_rm | string }}
{{ galaxy_docker_set_user }}
{% if force_container %}
{{ galaxy_docker_default_image }}
{% endif %}
{% endif %}
{% if container_type == 'singularity' %}
true
{{ galaxy_singularity_sudo | string }}
{{ galaxy_singularity_volumes_from }}
{{ galaxy_singularity_volumes }}
{% if force_container %}
{{ galaxy_singularity_default_image }}
{% endif %}
{% endif %}
{{ caller() }}
{%- endmacro %}
{% macro docker_dispatch_destination(id, default_destination, docker_destination) -%}
docker_dispatch
{{ docker_destination }}
{{ default_destination }}
{%- endmacro %}
{% macro singularity_dispatch_destination(id, default_destination, singularity_destination) -%}
singularity_dispatch
{{ singularity_destination }}
{{ default_destination }}
{%- endmacro %}
================================================
FILE: galaxy/ansible/templates/nginx/delegated_uploads.conf.j2
================================================
# delegated uploads
location {{ nginx_tusd_location }} {
# Disable request and response buffering
proxy_request_buffering off;
proxy_buffering off;
proxy_http_version 1.1;
# Add X-Forwarded-* headers
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
client_max_body_size 0;
proxy_pass http://127.0.0.1:{{ tusd_port }};
}
================================================
FILE: galaxy/ansible/templates/nginx/flower_auth.conf.j2
================================================
# Authenticating with htpasswd file
set $auth "Flower is restricted. Please contact your administrator.";
auth_basic $auth;
auth_basic_user_file htpasswd;
================================================
FILE: galaxy/ansible/templates/nginx/galaxy_common.conf.j2
================================================
{% if nginx_use_passwords %}
auth_basic "devbox";
auth_basic_user_file /etc/nginx/htpasswd;
{% endif %}
{% if nginx_proxy_flower %}
# enable flower under :80/flower/
location {{ nginx_flower_location }}/ {
# include authentification settings if enabled
include {{ nginx_conf_dir }}/flower_auth.conf;
proxy_pass http://127.0.0.1:{{ flower_port }};
proxy_set_header Host $host;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
{% endif %}
{% if nginx_proxy_rabbitmq_management %}
# enable rabbitmq management under :80/rabbitmq/
location ~* {{ nginx_rabbitmq_management_location }}/(.*) {
rewrite ^{{ nginx_rabbitmq_management_location }}/(.*)$ /$1 break;
proxy_pass http://127.0.0.1:{{ rabbitmq_management_port }};
proxy_buffering off;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
{% endif %}
{% if nginx_proxy_gunicorn %}
# pass to gunicorn by default
location {{ nginx_galaxy_location }}/ {
proxy_pass http://127.0.0.1:{{ gunicorn_port }};
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Upgrade $http_upgrade;
{% if galaxy_admin_user is defined and galaxy_admin_user %}
# hard-code a fixed user to pass to Galaxy to auto-login
proxy_set_header HTTP_REMOTE_USER '{{ galaxy_admin_user }}';
{% endif %}
{% if nginx_use_remote_header %}
# forward the remote_user header in case it is set by a previous proxy
proxy_set_header HTTP_REMOTE_USER $remote_user;
{% endif %}
}
{% endif %}
{% for a in nginx_additional_config %}
include {{ nginx_conf_dir }}/{{ a }}.conf;
{% endfor %}
# serve static content
location {{ nginx_galaxy_location }}/static {
alias {{ galaxy_server_dir }}/static;
gzip on;
gzip_types text/plain text/xml text/javascript text/css application/x-javascript;
expires 24h;
}
location {{ nginx_galaxy_location }}/static/style {
alias {{ galaxy_server_dir }}/static/style;
gzip on;
gzip_types text/plain text/xml text/javascript text/css application/x-javascript;
expires 24h;
}
location {{ nginx_galaxy_location }}/static/dist {
alias {{ galaxy_server_dir }}/static/dist;
gzip on;
gzip_types text/plain text/xml text/javascript text/css application/x-javascript;
expires 24h;
}
location /favicon.ico {
alias {{ galaxy_server_dir }}/static/favicon.ico;
}
# delegated downloads
location /_x_accel_redirect/ {
internal;
alias /;
# Add upstream response headers that would otherwise be omitted
add_header Access-Control-Allow-Origin $upstream_http_access_control_allow_origin;
add_header Access-Control-Allow-Methods $upstream_http_access_control_allow_methods;
}
# this is needed if 'welcome_url' is set to /etc/galaxy/web
location {{ nginx_welcome_location }} {
alias {{ nginx_welcome_path }};
gzip on;
gzip_types text/plain text/xml text/javascript text/css application/x-javascript;
expires 24h;
}
{% if nginx_proxy_interactive_tools %}
# Route all path-based interactive tool requests to the InteractiveTool proxy application
location ~* ^{{ nginx_galaxy_location }}/(interactivetool/.+)$ {
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://127.0.0.1:{{ gx_it_proxy_port }};
}
{% endif %}
location ~ ^{{ nginx_galaxy_location }}/plugins/(?[^/]+?)/((?[^/_]*)_?)?(?[^/]*?)/static/(?.*?)$ {
alias {{ galaxy_server_dir }}/config/plugins/$plug_type/;
try_files $vis_d/${vis_d}_${vis_name}/static/$static_file
$vis_d/static/$static_file =404;
}
# include delegated uploads settings if enabled
include {{ nginx_conf_dir }}/delegated_uploads.conf;
# error docs
error_page 502 503 504 {{ nginx_prefix_location }}/error/502/index.html;
error_page 413 {{ nginx_prefix_location }}/error/413/index.html;
error_page 500 {{ nginx_prefix_location }}/error/500/index.html;
location {{ nginx_prefix_location }}/error {
internal;
alias {{ galaxy_errordocs_dir }};
}
================================================
FILE: galaxy/ansible/templates/nginx/galaxy_http.j2
================================================
server {
listen 80 default_server;
listen [::]:80 default_server;
include {{ nginx_conf_dir }}/galaxy_common.conf;
}
================================================
FILE: galaxy/ansible/templates/nginx/galaxy_https.j2
================================================
server {
listen 443 ssl default_server;
listen [::]:443 ssl default_server;
include {{ nginx_conf_dir }}/galaxy_common.conf;
}
================================================
FILE: galaxy/ansible/templates/nginx/galaxy_redirect_ssl.j2
================================================
server {
listen 80 default_server;
listen [::]:80 default_server;
location /.well-known/ {
root {{ certbot_well_known_root }};
}
rewrite ^ https://$host$request_uri permanent;
}
================================================
FILE: galaxy/ansible/templates/nginx/htpasswd.j2
================================================
{% for p in nginx_htpasswds %}
{{ p }}
{% endfor %}
================================================
FILE: galaxy/ansible/templates/nginx/interactive_tools_common.conf.j2
================================================
# Match all requests for the interactive tools subdomain
server_name *.interactivetool.{{ galaxy_domain }};
# Log files will go here.
error_log /var/log/nginx/interactive_tools_error.log;
access_log /var/log/nginx/interactive_tools_access.log;
# Proxy all requests to the Gx IT Proxy application
location / {
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://127.0.0.1:{{ gx_it_proxy_port }};
}
================================================
FILE: galaxy/ansible/templates/nginx/interactive_tools_http.j2
================================================
server {
listen 80;
listen [::]:80;
include {{ nginx_conf_dir }}/interactive_tools_common.conf;
}
================================================
FILE: galaxy/ansible/templates/nginx/interactive_tools_https.j2
================================================
server {
listen 443 ssl;
listen [::]:443 ssl;
include {{ nginx_conf_dir }}/interactive_tools_common.conf;
}
================================================
FILE: galaxy/ansible/templates/nginx/interactive_tools_redirect_ssl.j2
================================================
server {
listen 80;
listen [::]:80;
rewrite ^ https://$host$request_uri permanent;
}
================================================
FILE: galaxy/ansible/templates/object_store_templates.yml.j2
================================================
# This is a catalog file for all the user object store templates
- include: "{{ galaxy_server_dir }}/lib/galaxy/objectstore/templates/examples/production_azure_blob.yml"
- include: "{{ galaxy_server_dir }}/lib/galaxy/objectstore/templates/examples/production_aws_s3.yml"
- include: "{{ galaxy_server_dir }}/lib/galaxy/objectstore/templates/examples/production_generic_s3.yml"
- include: "{{ galaxy_server_dir }}/lib/galaxy/objectstore/templates/examples/production_gcp_s3.yml"
- include: "{{ galaxy_server_dir }}/lib/galaxy/objectstore/templates/examples/cloudflare.yml"
- include: "{{ galaxy_server_dir }}/lib/galaxy/objectstore/templates/examples/onedata.yml"
- include: "{{ galaxy_server_dir }}/lib/galaxy/objectstore/templates/examples/minio_just_buckets.yml"
================================================
FILE: galaxy/ansible/templates/rabbitmq.sh.j2
================================================
#!/bin/sh
# call "rabbitmqctl stop" when exiting
# taken from https://gist.github.com/caioariede/342a583f75467509ad42
mkdir -p /var/run/rabbitmq && chown rabbitmq:rabbitmq /var/run/rabbitmq && chmod 755 /var/run/rabbitmq
RABBITMQ_ENV=/usr/lib/rabbitmq/bin/rabbitmq-env
RABBITMQ_SCRIPTS_DIR=$(dirname "$RABBITMQ_ENV")
. /usr/lib/rabbitmq/bin/rabbitmq-env
trap "{ echo Stopping rabbitmq; rabbitmqctl stop; exit 0; }" TERM
echo Starting rabbitmq
rabbitmq-server &
# from docs: When Bash receives a signal for which a
# trap has been set while waiting for a command to
# complete, the trap will not be executed until the
# command completes.
#
# This is why we use & and wait here. Idea taken from:
# http://veithen.github.io/2014/11/16/sigterm-propagation.html
PID=$!
wait $PID
================================================
FILE: galaxy/ansible/templates/startup_lite.sh.j2
================================================
#!/bin/bash
cd $GALAXY_ROOT_DIR
export GALAXY_CONFIG_STATIC_ENABLED=True
export GALAXY_CONFIG_ALLOW_PATH_PASTE=True
unset GALAXY_CONFIG_TUS_UPLOAD_STORE
# The lite mode can be useful to populate data libraries.
# To make this work it is needed to unset the following variables
unset GALAXY_CONFIG_JOB_WORKING_DIRECTORY
unset GALAXY_CONFIG_FILE_PATH
unset GALAXY_CONFIG_NEW_FILE_PATH
unset GALAXY_CONFIG_TEMPLATE_CACHE_PATH
unset GALAXY_CONFIG_CITATION_CACHE_DATA_DIR
unset GALAXY_CONFIG_FTP_UPLOAD_DIR
unset GALAXY_CONFIG_INTEGRATED_TOOL_PANEL_CONFIG
JOB_CONF=$GALAXY_ROOT_DIR/lib/galaxy/config/sample/job_conf.xml.sample_basic
while getopts "j" opt; do
case $opt in
j)
#if they pass -j, don't override the job config file
JOB_CONF=$GALAXY_CONFIG_JOB_CONFIG_FILE
;;
\?)
echo "Invalid option: -$OPTARG" >&2
;;
esac
done
export GALAXY_CONFIG_JOB_CONFIG_FILE=$JOB_CONF
service postgresql start
. {{ galaxy_venv_dir }}/bin/activate
echo "Checking if database is up and running"
until /usr/local/bin/check_database.py 2>&1 >/dev/null; do sleep 1; echo "Waiting for database"; done
echo "Database connected"
./run.sh -d galaxy_startup_lite.log --pidfile galaxy_startup_lite.pid --http-timeout 3000
================================================
FILE: galaxy/ansible/templates/supervisor.conf.j2
================================================
[supervisord]
nodaemon=false
{% if supervisor_webserver %}
[inet_http_server]
port={{ supervisor_webserver_port }}
{% if supervisor_webserver_username %}
username={{ supervisor_webserver_username }}
password={{ supervisor_webserver_password }}
{% endif %}
{% endif %}
{% if supervisor_manage_cron %}
[program:cron]
user = root
command = /usr/sbin/cron -f
autostart = {{ supervisor_cron_autostart }}
autorestart = true
{% endif %}
{% if supervisor_manage_autofs %}
[program:autofs]
user = root
command = /usr/sbin/automount -f
autostart = {{ supervisor_autofs_autostart }}
autorestart = true
redirect_stderr = true
stdout_logfile = /var/log/autofs.log
{% endif %}
{% if supervisor_manage_slurm %}
[program:munge]
user=root
# In VMs the chown seems to be needed, in containers the mkdir.
# Keep munge threads modest by default; increase via munge_num_threads if needed.
command=/bin/bash -c "mkdir -p /var/run/munge && chown -R root:root /var/run/munge && /usr/sbin/munged -f -F --num-threads={{ munge_num_threads | default(2) }}"
redirect_stderr = true
priority = 100
stopasgroup = true
[program:slurmctld]
user=root
command=/bin/bash -c "/usr/bin/python /usr/sbin/configure_slurm.py && /usr/sbin/slurmctld -D -L {{ supervisor_slurm_config_dir }}/slurmctld.log"
redirect_stderr=true
autostart = {{ supervisor_slurm_autostart }}
autorestart = true
priority = 200
stopasgroup = true
[program:slurmd]
user=root
command=/usr/sbin/slurmd -D -L {{ supervisor_slurm_config_dir }}/slurmd.log
autostart = {{ supervisor_slurm_autostart }}
redirect_stderr = true
autorestart = true
priority = 300
{% endif %}
{% if supervisor_manage_condor %}
[program:condor]
user=root
command=condor_master -f -t
redirect_stderr = true
autostart = {{ supervisor_condor_autostart }}
autorestart = true
priority = 100
{% endif %}
{% if supervisor_manage_postgres %}
{% if ansible_virtualization_type != "docker" %}
[program:pre_postgresql]
user = root
startsecs = 0
command = /bin/bash -c "install -d -m 2775 -o postgres -g postgres /var/run/postgresql"
{% endif %}
[program:postgresql]
user = postgres
command = /usr/lib/postgresql/{{ postgresql_version }}/bin/postmaster {{ supervisor_postgres_options }}
process_name = %(program_name)s
stopsignal = INT
autostart = {{ supervisor_postgres_autostart }}
autorestart = true
redirect_stderr = true
priority = 100
{% endif %}
{% if supervisor_manage_proftp %}
[program:proftpd]
{% if proftpd_nat_masquerade %}
command = bash -c " export MASQUERADE_ADDRESS={{ proftpd_masquerade_address }} && /usr/sbin/proftpd -n -c {{ proftpd_conf_path }}"
{% else %}
command = /usr/sbin/proftpd -n -c {{ proftpd_conf_path }}
{% endif %}
autostart = {{ supervisor_proftpd_autostart }}
autorestart = true
stopasgroup = true
killasgroup = true
{% endif %}
{% if supervisor_manage_nginx %}
[program:nginx]
command = /usr/sbin/nginx
directory = /
umask = 022
autostart = true
autorestart = unexpected
startsecs = 5
exitcodes = 0
user = root
priority = 200
{% endif %}
{% if supervisor_manage_toolshed %}
[program:toolshed]
command = {{ galaxy_venv_dir }}/bin/gunicorn 'tool_shed.webapp.fast_factory:factory()' --config python:galaxy.web_stack.gunicorn_config --worker-class galaxy.webapps.galaxy.workers.Worker --preload --workers 1 --bind 127.0.0.1:{{ galaxy_toolshed_port }} --timeout 600 --log-file {{ galaxy_logs_dir }}/toolshed.log --pid {{ galaxy_logs_dir }}/toolshed.pid --pythonpath lib
directory = {{ galaxy_server_dir }}
process_name = toolshed
umask = 022
autostart = true
autorestart = true
environment = PATH={{ galaxy_venv_dir }}:{{ galaxy_venv_dir }}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin,PYTHONPATH=lib,TOOL_SHED_CONFIG_FILE={{ galaxy_toolshed_config_file }}
startsecs = {{ supervisor_galaxy_startsecs }}
user = {{ galaxy_user_name }}
startretries = {{ supervisor_galaxy_startretries }}
{% endif %}
{% if supervisor_manage_docker %}
[program:docker]
directory = /
{% if docker_legacy %}
command = /usr/bin/docker daemon --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 -s {{ docker_storage_backend }}
{% else %}
command = /usr/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 -s {{ docker_storage_backend }}
{% endif %}
autostart = {{ supervisor_docker_autostart }}
autorestart = {{ supervisor_docker_autorestart }}
user = root
startsecs = 5
redirect_stderr = true
{% endif %}
{% if supervisor_manage_rabbitmq %}
[program:rabbitmq]
command = /bin/sh /usr/local/bin/rabbitmq.sh
user = root
autostart = {{ supervisor_rabbitmq_autostart }}
autorestart = true
{% endif %}
{% if supervisor_manage_redis %}
[program:redis]
command = /usr/bin/redis-server /etc/redis/redis.conf
user = root
autostart = {{ supervisor_redis_autostart }}
autorestart = true
{% endif %}
{% if supervisor_manage_flower %}
[program:flower]
command = {{ galaxy_venv_dir }}/bin/celery --broker={{ flower_broker_url }} --app {{ flower_app_name }} flower --conf={{ flower_conf_path }} --log_file_prefix={{ flower_log }}
directory = {{ galaxy_server_dir }}
umask = 022
autostart = {{ supervisor_flower_autostart }}
autorestart = true
startsecs = 10
user = {{ galaxy_user_name }}
environment = PATH={{ galaxy_venv_dir }}:{{ galaxy_venv_dir }}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin,PYTHONPATH={{ galaxy_server_dir }}/lib,GALAXY_ROOT_DIR={{ galaxy_server_dir }},GALAXY_CONFIG_FILE={{ galaxy_config_file }}
{% endif %}
================================================
FILE: galaxy/ansible/templates/update_yaml_value.py.j2
================================================
import sys
import yaml
import argparse
def modify_yaml(file_path, key_path, new_value):
# Load the YAML file
with open(file_path, 'r') as file:
data = yaml.safe_load(file)
# Split the key_path by '.' to access nested keys
keys = key_path.split('.')
# Traverse the dictionary to reach the correct key
temp = data
for key in keys[:-1]:
if key in temp:
temp = temp[key]
else:
print(f"Key path '{'.'.join(keys)}' does not exist in {file_path}. No update made.")
return
# Check if the last key exists and update its value
if keys[-1] in temp:
temp[keys[-1]] = yaml.safe_load(new_value)
# Write the updated data back to the YAML file
with open(file_path, 'w') as file:
yaml.dump(data, file, default_flow_style=False)
print(f"Updated {key_path} to {new_value} in {file_path}")
else:
print(f"Key '{keys[-1]}' does not exist in {file_path}. No update made.")
def main():
# Setup argparse for handling -h/--help and arguments
parser = argparse.ArgumentParser(description="Update a YAML file by modifying an existing key's value.")
# Positional arguments
parser.add_argument("file_path", help="The path to the YAML file")
parser.add_argument("key_path", help="The dot-separated key path (e.g., 'gravity.gunicorn.workers')")
parser.add_argument("new_value", help="The new value to set for the specified key")
# Parse the arguments
args = parser.parse_args()
# Modify the YAML file using the provided arguments
modify_yaml(args.file_path, args.key_path, args.new_value)
if __name__ == "__main__":
main()
================================================
FILE: galaxy/ansible/templates/vault_conf.yml.j2
================================================
type: database
path_prefix: /galaxy
# Encryption keys must be valid fernet keys
# To generate a valid key:
#
# Use the ascii string value as a key
# For more details, see: https://cryptography.io/en/latest/fernet/#
encryption_keys:
{% for encryption_key in galaxy_vault_encryption_keys %}
- {{ encryption_key }}
{% endfor %}
================================================
FILE: galaxy/ansible/tusd.yml
================================================
- hosts: localhost
connection: local
remote_user: root
vars:
tusd_version: v2.5.0
tusd_systemd: false
roles:
- role: galaxyproject.tusd
================================================
FILE: galaxy/bashrc
================================================
# ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# don't put duplicate lines in the history. See bash(1) for more options
# ... or force ignoredups and ignorespace
HISTCONTROL=ignoredups:ignorespace
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
#force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
;;
*)
;;
esac
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# some more ls aliases
alias ll='ls -lF --color=always'
alias lt='ls -ltr'
alias la='ls -A'
alias l='ls -CF'
# Add an "alert" alias for long running commands. Use like so:
# sleep 10; alert
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
================================================
FILE: galaxy/cgroupfs_mount.sh
================================================
#!/bin/bash
set -e
# DinD: a wrapper script which allows docker to be run inside a docker container.
# Original version by Jerome Petazzoni
# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/
#
# This script should be executed inside a docker container in privilieged mode
# ('docker run --privileged', introduced in docker 0.6).
# Usage: dind CMD [ARG...]
# apparmor sucks and Docker needs to know that it's in a container (c) @tianon
export container=docker
if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
mount -t securityfs none /sys/kernel/security || {
echo >&2 'Could not mount /sys/kernel/security.'
echo >&2 'AppArmor detection and --privileged mode might break.'
}
fi
# Mount /tmp (conditionally)
if ! mountpoint -q /tmp; then
mount -t tmpfs none /tmp
fi
# If a pidfile is still around (for example after a container restart),
# delete it so that docker can start.
rm -rf /var/run/docker.pid
================================================
FILE: galaxy/common_cleanup.sh
================================================
#!/bin/sh
set -x
# This usually drastically reduced the container size
# at the cost of the startup time of your application
find / -name '*.pyc' -delete
find / -name '*.log' -delete
find / -path /root/.cache -prune -o -name '.cache' -type d -prune -exec rm -rf '{}' +
find / -path /root/.npm -prune -o -name '.npm' -type d -prune -exec rm -rf '{}' +
find / -name '.launchpadlib' -type d -prune -exec rm -rf '{}' +
rm -rf /var/lib/apt/lists/*
rm -rf /var/cache/*
rm -rf /tmp/*
rm -rf /var/tmp/*
# https://askubuntu.com/questions/266738/how-to-truncate-all-logfiles
truncate -s 0 /var/log/*log || true
truncate -s 0 /var/log/**/*log || true
================================================
FILE: galaxy/docker-compose.yaml
================================================
# docker-compose wrapper for the single Galaxy container. This is useful for systems like EGI IM.
# Start via `IMAGE_TAG=dev GALAXY_CONFIG_BRAND=foo docker-compose up`
services:
galaxy-server:
image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy:${IMAGE_TAG:-latest}
build: ./
environment:
- GALAXY_DEFAULT_ADMIN_USER=admin
- GALAXY_DEFAULT_ADMIN_EMAIL=admin@example.org
- GALAXY_DEFAULT_ADMIN_PASSWORD=password
- GALAXY_DEFAULT_ADMIN_KEY=fakekey
- GALAXY_DESTINATIONS_DEFAULT=slurm_cluster_docker
- GALAXY_CONFIG_BRAND=${GALAXY_CONFIG_BRAND:-My own Galaxy flavour}
- GALAXY_AUTO_UPDATE_DB=True
hostname: galaxy-server
privileged: True
ports:
- "8080:80"
- "9002:9002"
- "4002:4002"
- "8021:21"
- "8022:22"
volumes:
# This is the directory where all your files from Galaxy will be stored
# on your host system
- ${EXPORT_DIR:-./export}/:/export/:delegated
- ${EXPORT_DIR:-./export}/tus_upload_store:/tus_upload_store:delegated
- /var/run/docker.sock:/var/run/docker.sock
# Optional CVMFS mount (shared with the sidecar when enabled).
- type: bind
source: ${CVMFS_MOUNT_DIR:-/cvmfs}
target: /cvmfs
bind:
# Propagate CVMFS mounts from the sidecar into this container.
propagation: rshared
cvmfs:
profiles:
- cvmfs
build: ../cvmfs
image: galaxy-cvmfs:latest
privileged: true
environment:
- CVMFS_REPOSITORIES=data.galaxyproject.org,singularity.galaxyproject.org
- CVMFS_CACHE_BASE=/var/lib/cvmfs
volumes:
- type: bind
source: ${CVMFS_MOUNT_DIR:-/cvmfs}
target: /cvmfs
bind:
# Allow mounts created here to propagate to the host and Galaxy container.
propagation: rshared
- ${EXPORT_DIR:-./export}/cvmfs-cache:/var/lib/cvmfs:delegated
================================================
FILE: galaxy/install_tools_wrapper.sh
================================================
#!/bin/bash
set -euo pipefail
# Basic defaults so set -u does not choke when running outside the normal entrypoint.
GALAXY_HOME=${GALAXY_HOME:-/galaxy}
GALAXY_ROOT_DIR=${GALAXY_ROOT_DIR:-$GALAXY_HOME}
export GALAXY_VIRTUAL_ENV=${GALAXY_VIRTUAL_ENV:-/galaxy_venv}
export PATH="${GALAXY_VIRTUAL_ENV}/bin:${PATH}"
export GALAXY_SKIP_REQUIREMENTS_INSTALL=1
export GALAXY_SKIP_COMMON_STARTUP=1
export GALAXY_SKIP_CLIENT_BUILD=1
export GALAXY_CONFIG_FILE=${GALAXY_CONFIG_FILE:-/etc/galaxy/galaxy.yml}
# Never create conda envs during tool install; rely on cached containers only.
export GALAXY_CONFIG_CONDA_AUTO_INSTALL=False
export GALAXY_CONFIG_CONDA_AUTO_INIT=False
# Keep managed configs inside the image, not /export.
export GALAXY_CONFIG_MANAGED_CONFIG_DIR=/galaxy/database/config
export GALAXY_CONFIG_INTEGRATED_TOOL_PANEL_CONFIG=/galaxy/integrated_tool_panel.xml
export GALAXY_CONFIG_FILE_PATH=/galaxy/database/files
export GALAXY_CONFIG_NEW_FILE_PATH=/galaxy/database/tmp
export GALAXY_CONFIG_TEMPLATE_CACHE_PATH=/galaxy/database/compiled_templates
export GALAXY_CONFIG_CITATION_CACHE_DATA_DIR=/galaxy/database/citations/data
export GALAXY_CONFIG_JOB_WORKING_DIRECTORY=/galaxy/database/job_working_directory
mkdir -p "${GALAXY_CONFIG_MANAGED_CONFIG_DIR}"
mkdir -p "${GALAXY_CONFIG_FILE_PATH}" "${GALAXY_CONFIG_NEW_FILE_PATH}" \
"${GALAXY_CONFIG_TEMPLATE_CACHE_PATH}" "${GALAXY_CONFIG_CITATION_CACHE_DATA_DIR}" \
"${GALAXY_CONFIG_JOB_WORKING_DIRECTORY}"
chown -R galaxy:galaxy "${GALAXY_CONFIG_MANAGED_CONFIG_DIR}" "${GALAXY_CONFIG_FILE_PATH}" \
"${GALAXY_CONFIG_NEW_FILE_PATH}" "${GALAXY_CONFIG_TEMPLATE_CACHE_PATH}" \
"${GALAXY_CONFIG_CITATION_CACHE_DATA_DIR}" "${GALAXY_CONFIG_JOB_WORKING_DIRECTORY}"
if [ ! -f "${GALAXY_CONFIG_INTEGRATED_TOOL_PANEL_CONFIG}" ]; then
cp -f /galaxy/config/integrated_tool_panel.xml.sample "${GALAXY_CONFIG_INTEGRATED_TOOL_PANEL_CONFIG}" 2>/dev/null || touch "${GALAXY_CONFIG_INTEGRATED_TOOL_PANEL_CONFIG}"
fi
# Enable Test Tool Shed for flavour installs.
export GALAXY_CONFIG_TOOL_SHEDS_CONFIG_FILE="${GALAXY_CONFIG_TOOL_SHEDS_CONFIG_FILE:-$GALAXY_HOME/tool_sheds_conf.xml}"
# Ensure shed-tools is available.
. /tool_deps/_conda/etc/profile.d/conda.sh
conda activate base
cd "${GALAXY_ROOT_DIR}"
INSTALL_TOOLS_VERBOSE="${INSTALL_TOOLS_VERBOSE:-false}"
wait_args=("-v")
access_log="-"
startup_log="/tmp/install_tools_startup.log"
startup_redirect=""
if ! [[ "${INSTALL_TOOLS_VERBOSE}" =~ ^([Tt][Rr][Uu][Ee]|1|[Yy][Ee][Ss])$ ]]; then
wait_args=()
access_log="/dev/null"
startup_redirect=">> ${startup_log} 2>&1"
fi
# If supervisord is already running we assume Galaxy is up (normal runtime).
if pgrep "supervisord" >/dev/null; then
echo "System is up and running. Installing tools against the running Galaxy (port 80)."
PORT=80
started_locally=false
else
PORT=8080
started_locally=true
install_log='galaxy_install.log'
echo "Starting PostgreSQL for tool installation"
PG_VER="${PG_VERSION:-15}"
PG_DATA="${PG_DATA_DIR_DEFAULT:-/var/lib/postgresql/${PG_VER}/main/}"
sudo -u postgres /usr/lib/postgresql/${PG_VER}/bin/pg_ctl -D "$PG_DATA" -l /tmp/pg_install.log -o "-k /var/run/postgresql" start
until pg_isready -h /var/run/postgresql -U galaxy >/dev/null 2>&1; do
echo "Waiting for PostgreSQL..."
sleep 1
done
# Ensure supervisord is running so gravity-managed services can start.
if ! pgrep "supervisord" >/dev/null; then
supervisord -c /etc/supervisor/supervisord.conf
sleep 2
fi
echo "Starting Galaxy for tool installation"
export GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL="http://localhost:${PORT}"
export GRAVITY_MANAGE_TUSD=False
export GALAXY_CONFIG_TUS_UPLOAD_ENABLED=False
export GRAVITY_MANAGE_GX_IT_PROXY=False
# Prefer env overrides instead of mutating config files.
export GALAXY_CONFIG_OVERRIDE__galaxy_infrastructure_url="http://localhost:${PORT}"
# Keep container resolvers simple (no CVMFS) for the install run; do not overwrite runtime config.
container_conf_target="$(mktemp /tmp/container_resolvers_conf.install.XXXX.yml)"
cat > "${container_conf_target}" <<'EOF'
- type: explicit
- type: cached_mulled_singularity
cache_directory: "/export/container_cache/singularity/mulled"
- type: mulled
namespace: "biocontainers"
- type: build_mulled
namespace: local
EOF
chown galaxy:galaxy "${container_conf_target}" || true
chmod 644 "${container_conf_target}" || true
export GALAXY_CONFIG_CONTAINER_RESOLVERS_CONFIG_FILE="${container_conf_target}"
sudo -E -H -u galaxy -- bash -c "
unset SUDO_UID SUDO_GID SUDO_COMMAND SUDO_USER
. /galaxy_venv/bin/activate
GALAXY_SKIP_REQUIREMENTS_INSTALL=1 GALAXY_SKIP_COMMON_STARTUP=1 GALAXY_SKIP_CLIENT_BUILD=1 GALAXY_NO_VENV=1 \
GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=http://localhost:${PORT} \
PYTHONPATH=lib GALAXY_CONFIG_FILE=/etc/galaxy/galaxy.yml \
gunicorn 'galaxy.webapps.galaxy.fast_factory:factory()' \
--timeout 300 --pythonpath lib -k galaxy.webapps.galaxy.workers.Worker \
-b 127.0.0.1:${PORT} --workers=1 --config python:galaxy.web_stack.gunicorn_config --preload \
--pid galaxy_install.pid --error-logfile ${install_log} --access-logfile ${access_log} ${startup_redirect} &
echo \$! > /tmp/galaxy_install_wrapper.pid
"
galaxy-wait -g "http://localhost:${PORT}" "${wait_args[@]}" --timeout 900
fi
# Ensure admin user exists (needed for shed-tools with fakekey).
if [[ -n "${GALAXY_DEFAULT_ADMIN_USER:-}" ]]; then
echo "Creating admin user ${GALAXY_DEFAULT_ADMIN_USER} (if missing)"
. "${GALAXY_VIRTUAL_ENV}/bin/activate"
python /usr/local/bin/create_galaxy_user.py \
--user "${GALAXY_DEFAULT_ADMIN_EMAIL}" \
--password "${GALAXY_DEFAULT_ADMIN_PASSWORD}" \
-c "${GALAXY_CONFIG_FILE}" \
--username "${GALAXY_DEFAULT_ADMIN_USER}" \
--key "${GALAXY_DEFAULT_ADMIN_KEY}"
deactivate
fi
echo "Installing tools from $1"
INSTALL_TOOL_DEPS="${INSTALL_TOOL_DEPENDENCIES:-false}"
if [[ "${INSTALL_TOOL_DEPS}" =~ ^([Tt][Rr][Uu][Ee]|1|[Yy][Ee][Ss])$ ]]; then
echo "Installing tool dependencies as well (INSTALL_TOOL_DEPENDENCIES=${INSTALL_TOOL_DEPS})"
shed-tools install -g "http://localhost:${PORT}" -a fakekey -t "$1" --install-tool-dependencies
else
echo "Skipping tool and resolver dependencies (INSTALL_TOOL_DEPENDENCIES=${INSTALL_TOOL_DEPS})"
shed-tools install -g "http://localhost:${PORT}" -a fakekey -t "$1" \
--skip-install-resolver-dependencies \
--skip-install-repository-dependencies
fi
if $started_locally; then
echo "Shutting down temporary Galaxy/PostgreSQL used for tool install"
if [ -f /tmp/galaxy_install_wrapper.pid ]; then
kill "$(cat /tmp/galaxy_install_wrapper.pid)" 2>/dev/null || true
rm -f /tmp/galaxy_install_wrapper.pid
fi
sudo -E -H -u galaxy kill "$(cat galaxy_install.pid)" 2>/dev/null || true
rm -f galaxy_install.pid "$install_log"
PG_VER="${PG_VERSION:-15}"
PG_DATA="${PG_DATA_DIR_DEFAULT:-/var/lib/postgresql/${PG_VER}/main/}"
sudo -u postgres /usr/lib/postgresql/${PG_VER}/bin/pg_ctl -D "$PG_DATA" stop
fi
================================================
FILE: galaxy/run.sh
================================================
#!/bin/sh
# Usage: ./run.sh
#
#
# Description: This script can be used to start or stop the galaxy
# web application.
cd "$(dirname "$0")"
. ./scripts/common_startup_functions.sh
# If there is a file that defines a shell environment specific to this
# instance of Galaxy, source the file.
if [ -z "$GALAXY_LOCAL_ENV_FILE" ];
then
GALAXY_LOCAL_ENV_FILE='./config/local_env.sh'
fi
if [ -f "$GALAXY_LOCAL_ENV_FILE" ];
then
. "$GALAXY_LOCAL_ENV_FILE"
fi
GALAXY_PID=${GALAXY_PID:-galaxy.pid}
GALAXY_LOG=${GALAXY_LOG:-galaxy.log}
PID_FILE=$GALAXY_PID
LOG_FILE=$GALAXY_LOG
parse_common_args $@
run_common_start_up
setup_python
if [ ! -z "$GALAXY_RUN_WITH_TEST_TOOLS" ];
then
export GALAXY_CONFIG_OVERRIDE_TOOL_CONFIG_FILE="$(pwd)/test/functional/tools/sample_tool_conf.xml"
export GALAXY_CONFIG_ENABLE_BETA_WORKFLOW_MODULES="true"
export GALAXY_CONFIG_OVERRIDE_ENABLE_BETA_TOOL_FORMATS="true"
export GALAXY_CONFIG_INTERACTIVETOOLS_ENABLE="true"
export GALAXY_CONFIG_OVERRIDE_WEBHOOKS_DIR="test/functional/webhooks"
export GALAXY_CONFIG_OVERRIDE_PANEL_VIEWS_DIR="$(pwd)/test/integration/panel_views_1/"
fi
set_galaxy_config_file_var
if [ "$INITIALIZE_TOOL_DEPENDENCIES" -eq 1 ]; then
# Install Conda environment if needed.
python ./scripts/manage_tool_dependencies.py init_if_needed
fi
find_server "${GALAXY_CONFIG_FILE:-none}" galaxy
echo "Executing: $run_server $server_args"
# args are properly quoted so use eval
eval GALAXY_ROOT_DIR="." $run_server $server_args
================================================
FILE: galaxy/sample_tool_list.yaml
================================================
# This is just a sample file. For a fully documented version of this file, see
# https://github.com/galaxyproject/ansible-galaxy-tools/blob/master/files/tool_list.yaml.sample
install_repository_dependencies: false
install_resolver_dependencies: false
install_tool_dependencies: false
tools:
- name: 'column_maker'
owner: 'devteam'
tool_panel_section_label: 'Columnmaker section'
- name: 'tabular_to_fasta'
owner: 'devteam'
tool_panel_section_label: 'New Converters'
revisions:
- '0b4e36026794' # v1.1.0
================================================
FILE: galaxy/setup_postgresql.py
================================================
import os
import shutil
import argparse
import subprocess
def pg_ctl(database_path, database_version, mod='start'):
"""
Start/Stop PostgreSQL with variable data_directory.
mod = [start, end, restart, reload]
"""
pg_conf = f'/etc/postgresql/{database_version}/main/postgresql.conf'
new_data_directory = f"'{database_path}'"
cmd = f'sed -i "s|data_directory = .*|data_directory = {new_data_directory}|g" {pg_conf}'
subprocess.call(cmd, shell=True)
subprocess.call(f'service postgresql {mod}', shell=True)
def set_pg_permission(database_path):
"""
Set the correct permissions for a newly created PostgreSQL data_directory.
"""
subprocess.call(f'chown -R postgres:postgres {database_path}', shell=True)
subprocess.call(f'chmod -R 0700 {database_path}', shell=True)
def create_pg_db(user, password, database, database_path, database_version):
"""
Initialize PostgreSQL Database, add database user und create the Galaxy Database.
"""
pg_bin = f"/usr/lib/postgresql/{database_version}/bin/"
os.makedirs(database_path)
set_pg_permission(database_path)
# initialize a new postgres database
subprocess.call(
f"su - postgres -c '{os.path.join(pg_bin, 'initdb')} "
f"--auth=trust --encoding UTF8 --pgdata={database_path}'",
shell=True
)
shutil.copy('/etc/ssl/certs/ssl-cert-snakeoil.pem', os.path.join(database_path, 'server.crt'))
shutil.copy('/etc/ssl/private/ssl-cert-snakeoil.key', os.path.join(database_path, 'server.key'))
set_pg_permission(os.path.join(database_path, 'server.crt'))
set_pg_permission(os.path.join(database_path, 'server.key'))
# change data_directory in postgresql.conf and start the service with the new location
pg_ctl(database_path, database_version, 'start')
subprocess.call(f"""su - postgres -c "psql --command \\"CREATE USER {user} WITH SUPERUSER PASSWORD '{password}'\\";"
""", shell=True)
subprocess.call(f"su - postgres -c 'createdb -O {user} {database}'", shell=True)
subprocess.call('service postgresql stop', shell=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Initializing a complete Galaxy Database with Tool Shed Tools.')
parser.add_argument("--dbuser", required=True,
help="Username of the Galaxy Database Administrator. That name will be specified in the "
"galaxy.yml file.")
parser.add_argument("--dbpassword", required=True,
help="Password of the Galaxy Database Administrator. That name will be specified in the "
"galaxy.yml file.")
parser.add_argument("--db-name", dest='db_name', required=True,
help="Galaxy Database name. That name will be specified in the galaxy.yml file.")
parser.add_argument("--dbpath",
help="Galaxy Database path.")
parser.add_argument("--dbversion", default='15',
help="Postgresql server major version.")
options = parser.parse_args()
"""
Initialize the Galaxy Database + adding an Admin user.
This database is the default one, created by the Dockerfile.
The user can set a volume (-v /path/:/export/) to get a persistent database.
"""
create_pg_db(options.dbuser, options.dbpassword, options.db_name, options.dbpath, options.dbversion)
================================================
FILE: galaxy/startup.sh
================================================
#!/usr/bin/env bash
# This is needed for Docker compose to have a unified alias for the main container.
# Modifying /etc/hosts can only happen during runtime not during build-time
echo "127.0.0.1 galaxy" >> /etc/hosts
# If the Galaxy config file is not in the expected place, copy from the sample
# and hope for the best (that the admin has done all the setup through env vars.)
if [ ! -f $GALAXY_CONFIG_FILE ]
then
# this should succesfully copy either .yml or .ini sample file to the expected location
cp /export/config/galaxy${GALAXY_CONFIG_FILE: -4}.sample $GALAXY_CONFIG_FILE
fi
# Set number of Gunicorn workers via GUNICORN_WORKERS or default to 2
python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.gunicorn.workers" "${GUNICORN_WORKERS:-2}" &> /dev/null
# Set number of Celery workers via CELERY_WORKERS or default to 2
python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.celery.concurrency" "${CELERY_WORKERS:-2}" &> /dev/null
# Set number of Galaxy handlers via GALAXY_HANDLER_NUMPROCS or default to 2
python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.handlers.handler.processes" "${GALAXY_HANDLER_NUMPROCS:-2}" &> /dev/null
# Initialize variables for optional ansible parameters
ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX=""
# Configure proxy prefix filtering
if [[ ! -z $PROXY_PREFIX ]]
then
echo "Configuring with proxy prefix: $PROXY_PREFIX"
export GALAXY_CONFIG_GALAXY_URL_PREFIX="$PROXY_PREFIX"
# TODO: Set this using GALAXY_CONFIG_INTERACTIVETOOLS_BASE_PATH after gravity config manager is updated to handle env vars properly
ansible localhost -m replace -a "path=${GALAXY_CONFIG_FILE} regexp='^ #interactivetools_base_path:.*' replace=' interactivetools_base_path: ${PROXY_PREFIX}'" &> /dev/null
python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.tusd.extra_args" "-behind-proxy -base-path $PROXY_PREFIX/api/upload/resumable_upload" &> /dev/null
ansible localhost -m replace -a "path=/etc/flower/flowerconfig.py regexp='^url_prefix.*' replace='url_prefix = \"$PROXY_PREFIX/flower\"'" &> /dev/null
# Fix path to html assets
ansible localhost -m replace -a "dest=$GALAXY_CONFIG_DIR/web/welcome.html regexp='(href=\"|\')[/\\w]*(/static)' replace='\\1${PROXY_PREFIX}\\2'" &> /dev/null
# Set some other vars based on that prefix
if [[ -z "$GALAXY_CONFIG_DYNAMIC_PROXY_PREFIX" ]]
then
export GALAXY_CONFIG_DYNAMIC_PROXY_PREFIX="$PROXY_PREFIX/gie_proxy"
fi
if [[ ! -z $GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL ]]
then
export GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL="${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL}${PROXY_PREFIX}"
fi
if [[ "$USE_HTTPS_LETSENCRYPT" != "False" || "$USE_HTTPS" != "False" ]]
then
ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX="--extra-vars nginx_prefix_location=$PROXY_PREFIX"
else
ansible-playbook -c local /ansible/nginx.yml \
--extra-vars nginx_prefix_location="$PROXY_PREFIX"
fi
fi
if [ "$USE_HTTPS_LETSENCRYPT" != "False" ]
then
echo "Settting up letsencrypt"
PATH=$GALAXY_CONDA_PREFIX/bin/:$PATH ansible-playbook -c local /ansible/nginx.yml \
--extra-vars '{"nginx_servers": ["galaxy_redirect_ssl", "interactive_tools_redirect_ssl"]}' \
--extra-vars '{"nginx_ssl_servers": ["galaxy_https", "interactive_tools_https"]}' \
--extra-vars nginx_ssl_role=usegalaxy_eu.certbot \
--extra-vars "{\"certbot_domains\": [\"$GALAXY_DOMAIN\"]}" \
--extra-vars nginx_conf_ssl_certificate_key=/etc/ssl/user/privkey-$GALAXY_USER.pem \
--extra-vars nginx_conf_ssl_certificate=/etc/ssl/certs/fullchain.pem \
$ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX
fi
if [ "$USE_HTTPS" != "False" ]
then
if [ -f /export/server.key -a -f /export/server.crt ]
then
echo "Copying SSL keys"
ssl_key_content=$(cat /export/server.key | sed 's/$/\\n/' | tr -d '\n')
ansible-playbook -c local /ansible/nginx.yml \
--extra-vars '{"nginx_servers": ["galaxy_redirect_ssl", "interactive_tools_redirect_ssl"]}' \
--extra-vars '{"nginx_ssl_servers": ["galaxy_https", "interactive_tools_https"]}' \
--extra-vars nginx_ssl_src_dir=/export \
--extra-vars "{\"sslkeys\": {\"server.key\": \"$ssl_key_content\"}}" \
--extra-vars nginx_conf_ssl_certificate_key=/etc/ssl/private/server.key \
--extra-vars nginx_conf_ssl_certificate=/etc/ssl/certs/server.crt \
$ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX
else
echo "Setting up self-signed SSL keys"
ansible-playbook -c local /ansible/nginx.yml \
--extra-vars '{"nginx_servers": ["galaxy_redirect_ssl", "interactive_tools_redirect_ssl"]}' \
--extra-vars '{"nginx_ssl_servers": ["galaxy_https", "interactive_tools_https"]}' \
--extra-vars nginx_ssl_role=galaxyproject.self_signed_certs \
--extra-vars nginx_conf_ssl_certificate_key=/etc/ssl/private/$GALAXY_DOMAIN.pem \
--extra-vars nginx_conf_ssl_certificate=/etc/ssl/certs/$GALAXY_DOMAIN.crt \
--extra-vars "{\"openssl_domains\": [\"$GALAXY_DOMAIN\"]}" \
$ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX
fi
fi
if [[ "$USE_HTTPS_LETSENCRYPT" != "False" || "$USE_HTTPS" != "False" ]]
then
# Check if GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL has http but not https
if [[ $GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL == "http:"* ]]
then
GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL/http:/https:}
export GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL
fi
fi
# Disable authentication of flower
if [[ ! -z $DISABLE_FLOWER_AUTH ]]; then
# disable authentification
echo "Disable flower authentification "
cp /etc/nginx/flower_auth.conf /etc/nginx/flower_auth.conf.source
echo "# No authentication defined" > /etc/nginx/flower_auth.conf
fi
# Try to guess if we are running under --privileged mode
if [[ ! -z $HOST_DOCKER_LEGACY ]]; then
if mount | grep "/proc/kcore"; then
PRIVILEGED=false
else
PRIVILEGED=true
fi
else
# Taken from http://stackoverflow.com/questions/32144575/how-to-know-if-a-docker-container-is-running-in-privileged-mode
ip link add dummy0 type dummy 2>/dev/null
if [[ $? -eq 0 ]]; then
PRIVILEGED=true
# clean the dummy0 link
ip link delete dummy0 2>/dev/null
else
PRIVILEGED=false
fi
fi
cd $GALAXY_ROOT_DIR
. $GALAXY_VIRTUAL_ENV/bin/activate
# Decide container routing based on runtime capabilities; prefer Singularity when available.
docker_ok=false
if [ -S /var/run/docker.sock ] || command -v docker >/dev/null 2>&1; then
docker_ok=true
fi
singularity_cmd=""
if command -v singularity >/dev/null 2>&1; then
singularity_cmd="singularity"
elif command -v apptainer >/dev/null 2>&1; then
singularity_cmd="apptainer"
fi
singularity_ok=false
if $PRIVILEGED && [ -n "$singularity_cmd" ]; then
singularity_ok=true
fi
dest_default="${GALAXY_DESTINATIONS_DEFAULT:-}"
dest_docker="${GALAXY_DESTINATIONS_DOCKER_DEFAULT:-}"
if [ -z "$dest_default" ] || { $singularity_ok && [ "$dest_default" = "slurm_cluster" ]; }; then
if $singularity_ok; then
dest_default="slurm_cluster_singularity"
elif $docker_ok; then
dest_default="slurm_cluster_docker"
else
dest_default="slurm_cluster"
fi
export GALAXY_DESTINATIONS_DEFAULT="$dest_default"
fi
if [ -z "$dest_docker" ]; then
if $docker_ok; then
dest_docker="slurm_cluster_docker"
else
dest_docker="$dest_default"
fi
export GALAXY_DESTINATIONS_DOCKER_DEFAULT="$dest_docker"
else
dest_docker="$GALAXY_DESTINATIONS_DOCKER_DEFAULT"
fi
if $singularity_ok; then
export SINGULARITY_CACHEDIR="${SINGULARITY_CACHEDIR:-/export/container_cache/singularity/mulled}"
export APPTAINER_CACHEDIR="${APPTAINER_CACHEDIR:-$SINGULARITY_CACHEDIR}"
echo "Container routing: default -> ${dest_default} (Singularity via ${singularity_cmd}); Docker -> ${dest_docker}"
elif $docker_ok; then
echo "Container routing: default -> ${dest_default} (Docker socket detected); Docker -> ${dest_docker}"
else
echo "Container routing: no Docker/Singularity detected; using ${dest_default}"
fi
cvmfs_repos="${CVMFS_REPOSITORIES:-data.galaxyproject.org singularity.galaxyproject.org}"
cvmfs_repos="${cvmfs_repos//,/ }"
if $PRIVILEGED; then
umount /var/lib/docker
if command -v mount.cvmfs >/dev/null 2>&1; then
chmod 666 /dev/fuse || true
for repo in $cvmfs_repos; do
repo_dir="/cvmfs/$repo"
mkdir -p "$repo_dir"
if ! mountpoint -q "$repo_dir"; then
echo "Mounting CVMFS repo $repo"
mount -t cvmfs "$repo" "$repo_dir" || echo "Warning: failed to mount $repo"
fi
done
else
echo "Info: CVMFS client not available; install CVMFS or use the sidecar via docker-compose --profile cvmfs."
fi
else
echo "Info: CVMFS mounts disabled (not running privileged). Use --privileged or the CVMFS sidecar in docker-compose."
fi
if ! mountpoint -q /cvmfs 2>/dev/null; then
for repo in $cvmfs_repos; do
repo_dir="/cvmfs/$repo"
mkdir -p "$repo_dir"
if [ "$repo" = "singularity.galaxyproject.org" ]; then
mkdir -p "$repo_dir/all"
fi
done
chown -R "$GALAXY_USER:$GALAXY_USER" /cvmfs
fi
if [[ ! -z $STARTUP_EXPORT_USER_FILES ]]; then
# If /export/ is mounted, export_user_files file moving all data to /export/
# symlinks will point from the original location to the new path under /export/
# If /export/ is not given, nothing will happen in that step
echo "Checking /export..."
python3 /usr/local/bin/export_user_files.py $PG_DATA_DIR_DEFAULT
mkdir -p /export/container_cache/singularity/mulled
export_cache_owner="$(stat -c '%u:%g' /export/container_cache 2>/dev/null || echo '')"
if [[ "$export_cache_owner" != "${GALAXY_UID}:${GALAXY_GID}" ]]; then
chown -R "$GALAXY_USER:$GALAXY_USER" /export/container_cache
fi
fi
# Delete compiled templates in case they are out of date
if [[ ! -z $GALAXY_CONFIG_TEMPLATE_CACHE_PATH ]]; then
rm -rf $GALAXY_CONFIG_TEMPLATE_CACHE_PATH/*
fi
# Enable loading of dependencies on startup. Such as LDAP.
# Adapted from galaxyproject/galaxy/scripts/common_startup.sh
if [[ ! -z $LOAD_GALAXY_CONDITIONAL_DEPENDENCIES ]]
then
echo "Installing optional dependencies in galaxy virtual environment..."
sudo -E -H -u $GALAXY_USER bash -c '
: ${GALAXY_WHEELS_INDEX_URL:="https://wheels.galaxyproject.org/simple"}
: ${PYPI_INDEX_URL:="https://pypi.python.org/simple"}
GALAXY_CONDITIONAL_DEPENDENCIES=$(PYTHONPATH=lib "$GALAXY_VIRTUAL_ENV/bin/python" -c "import galaxy.dependencies; print(\"\\n\".join(galaxy.dependencies.optional(\"$GALAXY_CONFIG_FILE\")))")
if [ -n "$GALAXY_CONDITIONAL_DEPENDENCIES" ]; then
deps_file="$(mktemp)"
printf "%s\n" "$GALAXY_CONDITIONAL_DEPENDENCIES" > "$deps_file"
/usr/local/bin/uv pip install \
--python "$GALAXY_VIRTUAL_ENV/bin/python" \
-r "$deps_file" \
--index-url "${GALAXY_WHEELS_INDEX_URL}" \
--extra-index-url "${PYPI_INDEX_URL}"
rm -f "$deps_file"
fi
'
fi
if [[ ! -z $LOAD_GALAXY_CONDITIONAL_DEPENDENCIES ]] && [[ ! -z $LOAD_PYTHON_DEV_DEPENDENCIES ]]
then
echo "Installing development requirements in galaxy virtual environment..."
sudo -E -H -u $GALAXY_USER bash -c '
: ${GALAXY_WHEELS_INDEX_URL:="https://wheels.galaxyproject.org/simple"}
: ${PYPI_INDEX_URL:="https://pypi.python.org/simple"}
dev_requirements="./lib/galaxy/dependencies/dev-requirements.txt"
if [ -f "$dev_requirements" ]; then
/usr/local/bin/uv pip install \
--python "$GALAXY_VIRTUAL_ENV/bin/python" \
-r "$dev_requirements" \
--index-url "${GALAXY_WHEELS_INDEX_URL}" \
--extra-index-url "${PYPI_INDEX_URL}"
fi
'
fi
# Enable Test Tool Shed
if [[ ! -z $ENABLE_TTS_INSTALL ]]
then
echo "Enable installation from the Test Tool Shed."
export GALAXY_CONFIG_TOOL_SHEDS_CONFIG_FILE=$GALAXY_HOME/tool_sheds_conf.xml
fi
# Remove all default tools from Galaxy by default
if [[ ! -z $BARE ]]
then
echo "Remove all tools from the tool_conf.xml file."
export GALAXY_CONFIG_TOOL_CONFIG_FILE=$GALAXY_ROOT_DIR/test/functional/tools/upload_tool_conf.xml
fi
# If auto installing conda envs, make sure bcftools is installed for __set_metadata__ tool
if [[ ! -z $GALAXY_CONFIG_CONDA_AUTO_INSTALL ]]
then
if [ ! -d "/tool_deps/_conda/envs/__bcftools@1.5" ]; then
su $GALAXY_USER -c "/tool_deps/_conda/bin/conda create -y --override-channels --channel iuc --channel conda-forge --channel bioconda --channel defaults --name __bcftools@1.5 bcftools=1.5"
su $GALAXY_USER -c "/tool_deps/_conda/bin/conda clean --tarballs --yes"
fi
fi
if [[ $NONUSE != *"postgres"* ]]
then
# Backward compatibility for exported postgresql directories before version 15.08.
# In previous versions postgres has the UID/GID of 102/106. We changed this in
# https://github.com/bgruening/docker-galaxy-stable/pull/71 to GALAXY_POSTGRES_UID=1550 and
# GALAXY_POSTGRES_GID=1550
if [ -e /export/postgresql/ ];
then
if [ `stat -c %g /export/postgresql/` == "106" ];
then
chown -R postgres:postgres /export/postgresql/
fi
fi
fi
if [[ ! -z $ENABLE_CONDOR ]]
then
if [[ ! -z $CONDOR_HOST ]]
then
echo "Enabling Condor with external scheduler at $CONDOR_HOST"
echo "# Config generated by startup.sh
CONDOR_HOST = $CONDOR_HOST
ALLOW_ADMINISTRATOR = *
ALLOW_OWNER = *
ALLOW_READ = *
ALLOW_WRITE = *
ALLOW_CLIENT = *
ALLOW_NEGOTIATOR = *
DAEMON_LIST = MASTER, SCHEDD
UID_DOMAIN = galaxy
DISCARD_SESSION_KEYRING_ON_STARTUP = False
TRUST_UID_DOMAIN = true" > /etc/condor/condor_config.local
fi
if [[ -e /export/condor_config ]]
then
echo "Replacing Condor config by locally supplied config from /export/condor_config"
rm -f /etc/condor/condor_config
ln -s /export/condor_config /etc/condor/condor_config
fi
fi
# Copy or link the slurm/munge config files
if [ -e /export/slurm.conf ]
then
rm -f /etc/slurm/slurm.conf
ln -s /export/slurm.conf /etc/slurm/slurm.conf
else
# Configure SLURM with runtime hostname.
# Use absolute path to python so virtualenv is not used.
mkdir -p /etc/slurm
/usr/bin/python /usr/sbin/configure_slurm.py
fi
mkdir -p /tmp/slurm /var/log/slurm /var/lib/slurm/slurmctld
chown -R $GALAXY_USER:$GALAXY_USER /tmp/slurm /var/log/slurm /var/lib/slurm
if [ -e /export/munge.key ]
then
rm -f /etc/munge/munge.key
ln -s /export/munge.key /etc/munge/munge.key
chmod 400 /export/munge.key
fi
# link the gridengine config file
if [ -e /export/act_qmaster ]
then
rm -f /var/lib/gridengine/default/common/act_qmaster
ln -s /export/act_qmaster /var/lib/gridengine/default/common/act_qmaster
fi
# Waits until postgres is ready
function wait_for_postgres {
echo "Checking if database is up and running"
until /usr/local/bin/check_database.py 2>&1 >/dev/null; do sleep 5; echo "Waiting for database"; done
echo "Database connected"
}
# Waits until rabbitmq is ready
function wait_for_rabbitmq {
echo "Checking if RabbitMQ is up and running"
until rabbitmqctl status 2>&1 >/dev/null; do sleep 5; echo "Waiting for RabbitMQ"; done
echo "RabbitMQ is ready"
}
# Waits until docker daemon is ready
function wait_for_docker {
echo "Checking if docker daemon is up and running"
until docker version 2>&1 >/dev/null; do sleep 5; echo "Waiting for docker daemon"; done
echo "Docker daemon is ready"
}
function wait_for_munge {
local retries=20
echo "Checking if munge is up and running"
until munge -n >/dev/null 2>&1; do
if [[ $retries -le 0 ]]; then
echo "Munge did not become ready"
return 1
fi
retries=$((retries - 1))
sleep 1
done
echo "Munge is ready"
}
# $NONUSE can be set to include postgres, cron, proftp, nodejs, condor, slurmd, slurmctld,
# celery, rabbitmq, redis, flower or tusd
# if included we will _not_ start these services.
function start_supervisor {
supervisord -c /etc/supervisor/supervisord.conf
sleep 5
if [[ ! -z $SUPERVISOR_MANAGE_POSTGRES && ! -z $SUPERVISOR_POSTGRES_AUTOSTART ]]; then
if [[ $NONUSE != *"postgres"* ]]
then
echo "Starting postgres"
supervisorctl start postgresql
fi
fi
if [[ ! -z $SUPERVISOR_MANAGE_CRON ]]; then
if [[ $NONUSE != *"cron"* ]]
then
echo "Starting cron"
supervisorctl start cron
fi
fi
if [[ ! -z $SUPERVISOR_MANAGE_PROFTP ]]; then
if [[ $NONUSE != *"proftp"* ]]
then
echo "Starting ProFTP"
supervisorctl start proftpd
fi
fi
if [[ ! -z $SUPERVISOR_MANAGE_CONDOR ]]; then
if [[ $NONUSE != *"condor"* ]]
then
echo "Starting condor"
supervisorctl start condor
fi
fi
if [[ ! -z $SUPERVISOR_MANAGE_SLURM ]]; then
echo "Starting munge"
supervisorctl start munge
wait_for_munge || true
if [[ $NONUSE != *"slurmctld"* ]]
then
echo "Starting slurmctld"
supervisorctl start slurmctld
fi
if [[ $NONUSE != *"slurmd"* ]]
then
echo "Starting slurmd"
supervisorctl start slurmd
fi
else
echo "Starting munge"
mkdir -p /var/run/munge && chown -R root:root /var/run/munge
/usr/sbin/munged -f -F --num-threads="${MUNGE_NUM_THREADS:-2}" &
wait_for_munge || true
if [[ $NONUSE != *"slurmctld"* ]]
then
echo "Starting slurmctld"
/usr/sbin/slurmctld -L $GALAXY_LOGS_DIR/slurmctld.log
fi
if [[ $NONUSE != *"slurmd"* ]]
then
echo "Starting slurmd"
/usr/sbin/slurmd -L $GALAXY_LOGS_DIR/slurmd.log
fi
fi
if [[ ! -z $SUPERVISOR_MANAGE_RABBITMQ ]]; then
if [[ $NONUSE != *"rabbitmq"* ]]
then
echo "Starting rabbitmq"
supervisorctl start rabbitmq
wait_for_rabbitmq
echo "Configuring rabbitmq users"
ansible-playbook -c local /usr/local/bin/configure_rabbitmq_users.yml &> /dev/null
echo "Restarting rabbitmq"
supervisorctl restart rabbitmq
fi
fi
if [[ ! -z $SUPERVISOR_MANAGE_REDIS ]]; then
if [[ $NONUSE != *"redis"* ]]
then
echo "Starting redis"
supervisorctl start redis
fi
fi
if [[ ! -z $SUPERVISOR_MANAGE_FLOWER ]]; then
if [[ $NONUSE != *"flower"* && $NONUSE != *"celery"* && $NONUSE != *"rabbitmq"* ]]
then
echo "Starting flower"
supervisorctl start flower
fi
fi
}
function start_gravity {
if [[ ! -z $GRAVITY_MANAGE_CELERY ]]; then
if [[ $NONUSE == *"celery"* ]]
then
echo "Disabling Galaxy celery app"
python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.celery.enable" "false" &> /dev/null
python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.celery.enable_beat" "false" &> /dev/null
else
export GALAXY_CONFIG_ENABLE_CELERY_TASKS='true'
if [[ $NONUSE != *"redis"* ]]
then
# Configure Galaxy to use Redis as the result backend for Celery tasks
ansible localhost -m replace -a "path=${GALAXY_CONFIG_FILE} regexp='^ #celery_conf:' replace=' celery_conf:'" &> /dev/null
ansible localhost -m replace -a "path=${GALAXY_CONFIG_FILE} regexp='^ # result_backend:.*' replace=' result_backend: redis://127.0.0.1:6379/0'" &> /dev/null
fi
fi
fi
if [[ ! -z $GRAVITY_MANAGE_GX_IT_PROXY ]]; then
if [[ $NONUSE == *"nodejs"* ]]
then
echo "Disabling nodejs"
python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.gx_it_proxy.enable" "false" &> /dev/null
else
# TODO: Remove this after gravity config manager is updated to handle env vars properly
ansible localhost -m replace -a "path=${GALAXY_CONFIG_FILE} regexp='^ #interactivetools_enable:.*' replace=' interactivetools_enable: true'" &> /dev/null
fi
fi
if [[ ! -z $GRAVITY_MANAGE_TUSD ]]; then
if [[ $NONUSE == *"tusd"* ]]
then
echo "Disabling Galaxy tusd app"
python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.tusd.enable" "false" &> /dev/null
cp /etc/nginx/delegated_uploads.conf /etc/nginx/delegated_uploads.conf.source
echo "# No delegated uploads" > /etc/nginx/delegated_uploads.conf
else
# TODO: Remove this after gravity config manager is updated to handle env vars properly
ansible localhost -m replace -a "path=${GALAXY_CONFIG_FILE} regexp='^ #galaxy_infrastructure_url:.*' replace=' galaxy_infrastructure_url: ${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL}'" &> /dev/null
fi
fi
if [[ $NONUSE != *"rabbitmq"* ]]
then
# Set AMQP internal connection for Galaxy
export GALAXY_CONFIG_AMQP_INTERNAL_CONNECTION="pyamqp://galaxy:galaxy@localhost:5672/galaxy"
fi
# Set the SUPERVISORD_SOCKET to overwrite gravity's default.
# The default will put the socket into the export dir, into gravity's state directory. And this caused some problems to start supervisord.
export SUPERVISORD_SOCKET=${SUPERVISORD_SOCKET:-/tmp/galaxy_supervisord.sock}
# Start galaxy services using gravity
/usr/local/bin/galaxyctl -d start
}
if [[ ! -z $SUPERVISOR_POSTGRES_AUTOSTART ]]; then
if [[ $NONUSE != *"postgres"* ]]
then
# Change the data_directory of postgresql in the main config file
ansible localhost -m lineinfile -a "line='data_directory = \'$PG_DATA_DIR_HOST\'' dest=$PG_CONF_DIR_DEFAULT/postgresql.conf backup=yes state=present regexp='data_directory'" &> /dev/null
fi
fi
if $PRIVILEGED; then
# In privileged mode autofs and CVMFS may be available, so only append existing files.
export GALAXY_CONFIG_TOOL_DATA_TABLE_CONFIG_PATH="${GALAXY_CONFIG_TOOL_DATA_TABLE_CONFIG_PATH},/cvmfs/data.galaxyproject.org/byhand/location/tool_data_table_conf.xml,/cvmfs/data.galaxyproject.org/managed/location/tool_data_table_conf.xml"
echo "Enable Galaxy Interactive Tools."
export GALAXY_CONFIG_INTERACTIVETOOLS_ENABLE=True
export GALAXY_CONFIG_TOOL_CONFIG_FILE="$GALAXY_CONFIG_TOOL_CONFIG_FILE,$GALAXY_INTERACTIVE_TOOLS_CONFIG_FILE"
# Update domain-based interactive tools nginx configuration with the galaxy domain if provided
if [[ ! -z $GALAXY_DOMAIN ]]; then
sed -i "s/\(\.interactivetool\.\)[^;]*/\1$GALAXY_DOMAIN/g" /etc/nginx/interactive_tools_common.conf
fi
if [[ -z $DOCKER_PARENT ]]; then
#build the docker in docker environment
# Ensure cgroup mounts are set up without triggering dind "no command" warnings.
bash /root/cgroupfs_mount.sh true
start_supervisor
start_gravity
supervisorctl start docker
wait_for_docker
else
#inheriting /var/run/docker.sock from parent, assume that you need to
#run docker with sudo to validate
echo "$GALAXY_USER ALL = NOPASSWD : ALL" >> /etc/sudoers
start_supervisor
start_gravity
fi
if [[ ! -z $PULL_IT_IMAGES ]]; then
echo "About to pull IT images. Depending on the size, this may take a while!"
for it in {JUPYTER,RSTUDIO,ETHERCALC,PHINCH,NEO}; do
enabled_var_name="GALAXY_IT_FETCH_${it}";
if [[ ${!enabled_var_name} ]]; then
# Store name in a var
image_var_name="GALAXY_IT_${it}_IMAGE"
# And then read from that var
docker pull "${!image_var_name}"
fi
done
fi
else
echo "Disable Galaxy Interactive Tools. Start with --privileged to enable ITs."
export GALAXY_CONFIG_INTERACTIVETOOLS_ENABLE=False
start_supervisor
start_gravity
fi
wait_for_postgres
# Make sure the database is automatically updated
if [[ ! -z $GALAXY_AUTO_UPDATE_DB ]]
then
echo "Updating Galaxy database"
sh manage_db.sh -c $GALAXY_CONFIG_FILE upgrade
fi
# In case the user wants the default admin to be created, do so.
if [[ ! -z $GALAXY_DEFAULT_ADMIN_USER ]]
then
echo "Creating admin user $GALAXY_DEFAULT_ADMIN_USER with key $GALAXY_DEFAULT_ADMIN_KEY and password $GALAXY_DEFAULT_ADMIN_PASSWORD if not existing"
python /usr/local/bin/create_galaxy_user.py --user "$GALAXY_DEFAULT_ADMIN_EMAIL" --password "$GALAXY_DEFAULT_ADMIN_PASSWORD" \
-c "$GALAXY_CONFIG_FILE" --username "$GALAXY_DEFAULT_ADMIN_USER" --key "$GALAXY_DEFAULT_ADMIN_KEY"
# If there is a need to execute actions that would require a live galaxy instance, such as adding workflows, setting quotas, adding more users, etc.
# then place a file with that logic named post-start-actions.sh on the /export/ directory, it should have access to all environment variables
# visible here.
# The file needs to be executable (chmod a+x post-start-actions.sh)
if [ -x /export/post-start-actions.sh ]
then
# uses ephemeris, present in docker-galaxy-stable, to wait for the local instance
/tool_deps/_conda/bin/galaxy-wait -g http://127.0.0.1 -v --timeout 600 > $GALAXY_LOGS_DIR/post-start-actions.log &&
/export/post-start-actions.sh >> $GALAXY_LOGS_DIR/post-start-actions.log &
fi
fi
# Reinstall tools if the user want to
if [[ ! -z $GALAXY_AUTO_UPDATE_TOOLS ]]
then
/tool_deps/_conda/bin/galaxy-wait -g http://127.0.0.1 -v --timeout 600 > /home/galaxy/logs/post-start-actions.log &&
OLDIFS=$IFS
IFS=','
for TOOL_YML in `echo "$GALAXY_AUTO_UPDATE_TOOLS"`
do
echo "Installing tools from $TOOL_YML"
/tool_deps/_conda/bin/shed-tools install -g "http://127.0.0.1" -a "$GALAXY_DEFAULT_ADMIN_KEY" -t "$TOOL_YML"
/tool_deps/_conda/bin/conda clean --tarballs --yes
done
IFS=$OLDIFS
fi
# migrate custom Visualisations (Galaxy plugins)
# this is needed for by the new client build system
python3 ${GALAXY_ROOT_DIR}/scripts/plugin_staging.py
# Enable verbose output
if [ `echo ${GALAXY_LOGGING:-'no'} | tr [:upper:] [:lower:]` = "full" ]
then
tail -f /var/log/supervisor/* /var/log/nginx/* $GALAXY_LOGS_DIR/*.log
else
tail -f $GALAXY_LOGS_DIR/*.log
fi
================================================
FILE: galaxy/startup2.sh
================================================
#!/usr/bin/env bash
STARTUP_LOG_DIR="${STARTUP_LOG_DIR:-${GALAXY_LOGS_DIR:-/home/galaxy/logs}}"
STARTUP_LOG="${STARTUP_LOG:-$STARTUP_LOG_DIR/startup2.log}"
STARTUP_LOG_LEVEL="${STARTUP_LOG_LEVEL:-info}"
STARTUP_LOG_TAIL="${STARTUP_LOG_TAIL:-200}"
STARTUP_PARALLEL="${STARTUP_PARALLEL:-true}"
STARTUP_VALIDATE="${STARTUP_VALIDATE:-false}"
STARTUP_WAIT_TIMEOUT="${STARTUP_WAIT_TIMEOUT:-600}"
STARTUP_GALAXY_URL="${STARTUP_GALAXY_URL:-http://127.0.0.1}"
STARTUP_OUT_FD=3
mkdir -p "$STARTUP_LOG_DIR"
exec 3>&1
if [ "$STARTUP_LOG_LEVEL" = "verbose" ]; then
exec > >(tee -a "$STARTUP_LOG") 2>&1
STARTUP_OUT_FD=1
else
exec >>"$STARTUP_LOG" 2>&1
fi
STARTUP_COLOR="${STARTUP_COLOR:-auto}"
STARTUP_USE_COLOR=false
if [ "$STARTUP_COLOR" = "always" ]; then
STARTUP_USE_COLOR=true
elif [ "$STARTUP_COLOR" = "auto" ] && [ -t "${STARTUP_OUT_FD}" ]; then
STARTUP_USE_COLOR=true
fi
if $STARTUP_USE_COLOR; then
COLOR_RESET=$'\033[0m'
COLOR_INFO=$'\033[36m'
COLOR_WARN=$'\033[33m'
COLOR_ERROR=$'\033[31m'
COLOR_SUCCESS=$'\033[32m'
else
COLOR_RESET=""
COLOR_INFO=""
COLOR_WARN=""
COLOR_ERROR=""
COLOR_SUCCESS=""
fi
print_log() {
local color="$1"
shift
if [ -n "$color" ]; then
printf '%s%s%s\n' "$color" "$*" "$COLOR_RESET" >&${STARTUP_OUT_FD}
else
printf '%s\n' "$*" >&${STARTUP_OUT_FD}
fi
}
log_info() {
if [ "$STARTUP_LOG_LEVEL" != "quiet" ]; then
print_log "$COLOR_INFO" "$*"
fi
}
log_success() {
if [ "$STARTUP_LOG_LEVEL" != "quiet" ]; then
print_log "$COLOR_SUCCESS" "$*"
fi
}
log_warn() {
print_log "$COLOR_WARN" "Warning: $*"
}
log_error() {
print_log "$COLOR_ERROR" "Error: $*"
}
show_runtime_summary() {
local gunicorn_workers="${GUNICORN_WORKERS:-2}"
local handler_processes="${GALAXY_HANDLER_NUMPROCS:-2}"
local celery_workers="${CELERY_WORKERS:-2}"
local destination_default="${GALAXY_DESTINATIONS_DEFAULT:-slurm_cluster}"
local slurm_enabled="${GALAXY_RUNNERS_ENABLE_SLURM:-default}"
local condor_enabled="${GALAXY_RUNNERS_ENABLE_CONDOR:-default}"
local docker_enabled="${GALAXY_DOCKER_ENABLED:-default}"
local mulled_enabled="${GALAXY_CONFIG_ENABLE_MULLED_CONTAINERS:-default}"
local conda_auto="${GALAXY_CONFIG_CONDA_AUTO_INSTALL:-default}"
local conda_prefix="${GALAXY_CONDA_PREFIX:-/tool_deps/_conda}"
local docker_label="default (galaxy.yml)"
local mulled_label="default (galaxy.yml)"
if [ -n "${GALAXY_DOCKER_ENABLED+x}" ]; then
docker_label="$docker_enabled"
fi
if [ -n "${GALAXY_CONFIG_ENABLE_MULLED_CONTAINERS+x}" ]; then
mulled_label="$mulled_enabled"
fi
log_info "Runtime summary:"
log_info " Web workers (gunicorn): ${gunicorn_workers}"
log_info " Job handlers: ${handler_processes}"
log_info " Celery workers: ${celery_workers}"
log_info " Default destination: ${destination_default}"
log_info " Runners: slurm=${slurm_enabled}, condor=${condor_enabled}"
log_info " Containers: docker=${docker_label}, mulled=${mulled_label}"
log_info " Conda: auto_install=${conda_auto}, prefix=${conda_prefix}"
log_info " Docs: https://github.com/bgruening/docker-galaxy"
}
mask_sensitive_value() {
local name="$1"
local value="$2"
case "$name" in
*KEY*|*SECRET*|*TOKEN*|*PASSWORD*|*PASSPHRASE*)
printf '***'
;;
*)
printf '%s' "$value"
;;
esac
}
show_galaxy_env_summary() {
local envs
envs="$(env | LC_ALL=C sort | grep '^GALAXY_')" || true
if [ -z "$envs" ]; then
log_info "Environment overrides (GALAXY_*): none"
return
fi
log_info "Environment overrides (GALAXY_*):"
while IFS='=' read -r name value; do
if [ -z "$name" ]; then
continue
fi
local display_value
display_value="$(mask_sensitive_value "$name" "$value")"
if [ "${#display_value}" -gt 200 ]; then
display_value="${display_value:0:200}..."
fi
log_info " ${name}=${display_value}"
done <<< "$envs"
}
show_startup_log_tail() {
tail -n "$STARTUP_LOG_TAIL" "$STARTUP_LOG" >&${STARTUP_OUT_FD} || true
}
show_failure_logs() {
log_error "Startup failed; showing recent logs"
show_startup_log_tail
if [ -d "${GALAXY_LOGS_DIR:-}" ]; then
for log in "$GALAXY_LOGS_DIR"/*.log; do
if [ -f "$log" ]; then
printf '\n==> %s <==\n' "$log" >&${STARTUP_OUT_FD}
tail -n "$STARTUP_LOG_TAIL" "$log" >&${STARTUP_OUT_FD} || true
fi
done
fi
}
log_info "Starting Galaxy container (startup2). Logs: $STARTUP_LOG"
# This is needed for Docker compose to have a unified alias for the main container.
# Modifying /etc/hosts can only happen during runtime not during build-time
echo "127.0.0.1 galaxy" >> /etc/hosts
# If the Galaxy config file is not in the expected place, copy from the sample
# and hope for the best (that the admin has done all the setup through env vars.)
if [ ! -f $GALAXY_CONFIG_FILE ]
then
# this should succesfully copy either .yml or .ini sample file to the expected location
cp /export/config/galaxy${GALAXY_CONFIG_FILE: -4}.sample $GALAXY_CONFIG_FILE
fi
log_info "Configuring runtime settings"
# Set number of Gunicorn workers via GUNICORN_WORKERS or default to 2
python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.gunicorn.workers" "${GUNICORN_WORKERS:-2}" &> /dev/null
# Set number of Celery workers via CELERY_WORKERS or default to 2
python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.celery.concurrency" "${CELERY_WORKERS:-2}" &> /dev/null
# Set number of Galaxy handlers via GALAXY_HANDLER_NUMPROCS or default to 2
python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.handlers.handler.processes" "${GALAXY_HANDLER_NUMPROCS:-2}" &> /dev/null
# Initialize variables for optional ansible parameters
ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX=""
# Configure proxy prefix filtering
if [[ ! -z $PROXY_PREFIX ]]
then
log_info "Configuring proxy prefix: $PROXY_PREFIX"
export GALAXY_CONFIG_GALAXY_URL_PREFIX="$PROXY_PREFIX"
# TODO: Set this using GALAXY_CONFIG_INTERACTIVETOOLS_BASE_PATH after gravity config manager is updated to handle env vars properly
ansible localhost -m replace -a "path=${GALAXY_CONFIG_FILE} regexp='^ #interactivetools_base_path:.*' replace=' interactivetools_base_path: ${PROXY_PREFIX}'" &> /dev/null
python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.tusd.extra_args" "-behind-proxy -base-path $PROXY_PREFIX/api/upload/resumable_upload" &> /dev/null
ansible localhost -m replace -a "path=/etc/flower/flowerconfig.py regexp='^url_prefix.*' replace='url_prefix = \"$PROXY_PREFIX/flower\"'" &> /dev/null
# Fix path to html assets
ansible localhost -m replace -a "dest=$GALAXY_CONFIG_DIR/web/welcome.html regexp='(href=\"|\')[/\\w]*(/static)' replace='\\1${PROXY_PREFIX}\\2'" &> /dev/null
# Set some other vars based on that prefix
if [[ -z "$GALAXY_CONFIG_DYNAMIC_PROXY_PREFIX" ]]
then
export GALAXY_CONFIG_DYNAMIC_PROXY_PREFIX="$PROXY_PREFIX/gie_proxy"
fi
if [[ ! -z $GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL ]]
then
export GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL="${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL}${PROXY_PREFIX}"
fi
if [[ "$USE_HTTPS_LETSENCRYPT" != "False" || "$USE_HTTPS" != "False" ]]
then
ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX="--extra-vars nginx_prefix_location=$PROXY_PREFIX"
else
ansible-playbook -c local /ansible/nginx.yml \
--extra-vars nginx_prefix_location="$PROXY_PREFIX"
fi
fi
if [ "$USE_HTTPS_LETSENCRYPT" != "False" ]
then
log_info "Setting up LetsEncrypt"
PATH=$GALAXY_CONDA_PREFIX/bin/:$PATH ansible-playbook -c local /ansible/nginx.yml \
--extra-vars '{"nginx_servers": ["galaxy_redirect_ssl", "interactive_tools_redirect_ssl"]}' \
--extra-vars '{"nginx_ssl_servers": ["galaxy_https", "interactive_tools_https"]}' \
--extra-vars nginx_ssl_role=usegalaxy_eu.certbot \
--extra-vars "{\"certbot_domains\": [\"$GALAXY_DOMAIN\"]}" \
--extra-vars nginx_conf_ssl_certificate_key=/etc/ssl/user/privkey-$GALAXY_USER.pem \
--extra-vars nginx_conf_ssl_certificate=/etc/ssl/certs/fullchain.pem \
$ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX
fi
if [ "$USE_HTTPS" != "False" ]
then
if [ -f /export/server.key -a -f /export/server.crt ]
then
log_info "Using SSL keys from /export"
ssl_key_content=$(cat /export/server.key | sed 's/$/\\n/' | tr -d '\n')
ansible-playbook -c local /ansible/nginx.yml \
--extra-vars '{"nginx_servers": ["galaxy_redirect_ssl", "interactive_tools_redirect_ssl"]}' \
--extra-vars '{"nginx_ssl_servers": ["galaxy_https", "interactive_tools_https"]}' \
--extra-vars nginx_ssl_src_dir=/export \
--extra-vars "{\"sslkeys\": {\"server.key\": \"$ssl_key_content\"}}" \
--extra-vars nginx_conf_ssl_certificate_key=/etc/ssl/private/server.key \
--extra-vars nginx_conf_ssl_certificate=/etc/ssl/certs/server.crt \
$ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX
else
log_info "Setting up self-signed SSL keys"
ansible-playbook -c local /ansible/nginx.yml \
--extra-vars '{"nginx_servers": ["galaxy_redirect_ssl", "interactive_tools_redirect_ssl"]}' \
--extra-vars '{"nginx_ssl_servers": ["galaxy_https", "interactive_tools_https"]}' \
--extra-vars nginx_ssl_role=galaxyproject.self_signed_certs \
--extra-vars nginx_conf_ssl_certificate_key=/etc/ssl/private/$GALAXY_DOMAIN.pem \
--extra-vars nginx_conf_ssl_certificate=/etc/ssl/certs/$GALAXY_DOMAIN.crt \
--extra-vars "{\"openssl_domains\": [\"$GALAXY_DOMAIN\"]}" \
$ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX
fi
fi
if [[ "$USE_HTTPS_LETSENCRYPT" != "False" || "$USE_HTTPS" != "False" ]]
then
# Check if GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL has http but not https
if [[ $GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL == "http:"* ]]
then
GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL/http:/https:}
export GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL
fi
fi
# Disable authentication of flower
if [[ ! -z $DISABLE_FLOWER_AUTH ]]; then
# disable authentification
log_info "Disabling flower authentication"
cp /etc/nginx/flower_auth.conf /etc/nginx/flower_auth.conf.source
echo "# No authentication defined" > /etc/nginx/flower_auth.conf
fi
# Try to guess if we are running under --privileged mode
if [[ ! -z $HOST_DOCKER_LEGACY ]]; then
if mount | grep "/proc/kcore"; then
PRIVILEGED=false
else
PRIVILEGED=true
fi
else
# Taken from http://stackoverflow.com/questions/32144575/how-to-know-if-a-docker-container-is-running-in-privileged-mode
ip link add dummy0 type dummy 2>/dev/null
if [[ $? -eq 0 ]]; then
PRIVILEGED=true
# clean the dummy0 link
ip link delete dummy0 2>/dev/null
else
PRIVILEGED=false
fi
fi
cd $GALAXY_ROOT_DIR
. $GALAXY_VIRTUAL_ENV/bin/activate
# Decide container routing based on runtime capabilities; prefer Singularity when available.
docker_ok=false
if [ -S /var/run/docker.sock ] || command -v docker >/dev/null 2>&1; then
docker_ok=true
fi
singularity_cmd=""
if command -v singularity >/dev/null 2>&1; then
singularity_cmd="singularity"
elif command -v apptainer >/dev/null 2>&1; then
singularity_cmd="apptainer"
fi
singularity_ok=false
if $PRIVILEGED && [ -n "$singularity_cmd" ]; then
singularity_ok=true
fi
dest_default="${GALAXY_DESTINATIONS_DEFAULT:-}"
dest_docker="${GALAXY_DESTINATIONS_DOCKER_DEFAULT:-}"
if [ -z "$dest_default" ] || { $singularity_ok && [ "$dest_default" = "slurm_cluster" ]; }; then
if $singularity_ok; then
dest_default="slurm_cluster_singularity"
elif $docker_ok; then
dest_default="slurm_cluster_docker"
else
dest_default="slurm_cluster"
fi
export GALAXY_DESTINATIONS_DEFAULT="$dest_default"
fi
if [ -z "$dest_docker" ]; then
if $docker_ok; then
dest_docker="slurm_cluster_docker"
else
dest_docker="$dest_default"
fi
export GALAXY_DESTINATIONS_DOCKER_DEFAULT="$dest_docker"
else
dest_docker="$GALAXY_DESTINATIONS_DOCKER_DEFAULT"
fi
if $singularity_ok; then
export SINGULARITY_CACHEDIR="${SINGULARITY_CACHEDIR:-/export/container_cache/singularity/mulled}"
export APPTAINER_CACHEDIR="${APPTAINER_CACHEDIR:-$SINGULARITY_CACHEDIR}"
log_info "Container routing: default -> ${dest_default} (Singularity via ${singularity_cmd}); Docker -> ${dest_docker}"
elif $docker_ok; then
log_info "Container routing: default -> ${dest_default} (Docker socket detected); Docker -> ${dest_docker}"
else
log_warn "Container routing: no Docker/Singularity detected; using ${dest_default}"
fi
cvmfs_repos="${CVMFS_REPOSITORIES:-data.galaxyproject.org singularity.galaxyproject.org}"
cvmfs_repos="${cvmfs_repos//,/ }"
cvmfs_autofs_configured=false
if [ -f /etc/auto.cvmfs ] || [ -f /etc/auto.master.d/cvmfs.autofs ]; then
cvmfs_autofs_configured=true
fi
if $PRIVILEGED; then
log_info "Configuring CVMFS mounts (privileged)"
umount /var/lib/docker
if command -v mount.cvmfs >/dev/null 2>&1; then
chmod 666 /dev/fuse || true
if $cvmfs_autofs_configured; then
log_info "CVMFS autofs configured; mounts will appear on first access after services start."
else
for repo in $cvmfs_repos; do
repo_dir="/cvmfs/$repo"
mkdir -p "$repo_dir"
if ! mountpoint -q "$repo_dir"; then
log_info "Mounting CVMFS repo $repo"
if ! mount -t cvmfs "$repo" "$repo_dir"; then
sleep 2
mount -t cvmfs "$repo" "$repo_dir" || log_warn "Failed to mount CVMFS repo $repo"
fi
fi
done
fi
else
log_info "CVMFS client not available; install CVMFS or use the sidecar via docker-compose --profile cvmfs."
fi
else
log_info "CVMFS mounts disabled (not running privileged). Use --privileged or the CVMFS sidecar in docker-compose."
fi
if ! mountpoint -q /cvmfs 2>/dev/null; then
for repo in $cvmfs_repos; do
repo_dir="/cvmfs/$repo"
mkdir -p "$repo_dir"
if [ "$repo" = "singularity.galaxyproject.org" ]; then
mkdir -p "$repo_dir/all"
fi
done
chown -R "$GALAXY_USER:$GALAXY_USER" /cvmfs
fi
show_runtime_summary
show_galaxy_env_summary
if [[ ! -z $STARTUP_EXPORT_USER_FILES ]]; then
# If /export/ is mounted, export_user_files file moving all data to /export/
# symlinks will point from the original location to the new path under /export/
# If /export/ is not given, nothing will happen in that step
log_info "Checking /export..."
python3 /usr/local/bin/export_user_files.py $PG_DATA_DIR_DEFAULT
mkdir -p /export/container_cache/singularity/mulled
export_cache_owner="$(stat -c '%u:%g' /export/container_cache 2>/dev/null || echo '')"
if [[ "$export_cache_owner" != "${GALAXY_UID}:${GALAXY_GID}" ]]; then
chown -R "$GALAXY_USER:$GALAXY_USER" /export/container_cache
fi
fi
# Delete compiled templates in case they are out of date
if [[ ! -z $GALAXY_CONFIG_TEMPLATE_CACHE_PATH ]]; then
rm -rf $GALAXY_CONFIG_TEMPLATE_CACHE_PATH/*
fi
# Enable loading of dependencies on startup. Such as LDAP.
# Adapted from galaxyproject/galaxy/scripts/common_startup.sh
if [[ ! -z $LOAD_GALAXY_CONDITIONAL_DEPENDENCIES ]]
then
log_info "Installing optional Galaxy dependencies"
sudo -E -H -u $GALAXY_USER bash -c '
: ${GALAXY_WHEELS_INDEX_URL:="https://wheels.galaxyproject.org/simple"}
: ${PYPI_INDEX_URL:="https://pypi.python.org/simple"}
GALAXY_CONDITIONAL_DEPENDENCIES=$(PYTHONPATH=lib "$GALAXY_VIRTUAL_ENV/bin/python" -c "import galaxy.dependencies; print(\"\\n\".join(galaxy.dependencies.optional(\"$GALAXY_CONFIG_FILE\")))")
if [ -n "$GALAXY_CONDITIONAL_DEPENDENCIES" ]; then
deps_file="$(mktemp)"
printf "%s\n" "$GALAXY_CONDITIONAL_DEPENDENCIES" > "$deps_file"
/usr/local/bin/uv pip install \
--python "$GALAXY_VIRTUAL_ENV/bin/python" \
-r "$deps_file" \
--index-url "${GALAXY_WHEELS_INDEX_URL}" \
--extra-index-url "${PYPI_INDEX_URL}"
rm -f "$deps_file"
fi
'
fi
if [[ ! -z $LOAD_GALAXY_CONDITIONAL_DEPENDENCIES ]] && [[ ! -z $LOAD_PYTHON_DEV_DEPENDENCIES ]]
then
echo "Installing development requirements in galaxy virtual environment..."
sudo -E -H -u $GALAXY_USER bash -c '
: ${GALAXY_WHEELS_INDEX_URL:="https://wheels.galaxyproject.org/simple"}
: ${PYPI_INDEX_URL:="https://pypi.python.org/simple"}
dev_requirements="./lib/galaxy/dependencies/dev-requirements.txt"
if [ -f "$dev_requirements" ]; then
/usr/local/bin/uv pip install \
--python "$GALAXY_VIRTUAL_ENV/bin/python" \
-r "$dev_requirements" \
--index-url "${GALAXY_WHEELS_INDEX_URL}" \
--extra-index-url "${PYPI_INDEX_URL}"
fi
'
fi
# Enable Test Tool Shed
if [[ ! -z $ENABLE_TTS_INSTALL ]]
then
log_info "Enabling installation from the Test Tool Shed"
export GALAXY_CONFIG_TOOL_SHEDS_CONFIG_FILE=$GALAXY_HOME/tool_sheds_conf.xml
fi
# Remove all default tools from Galaxy by default
if [[ ! -z $BARE ]]
then
log_info "Removing default tools from tool_conf.xml"
export GALAXY_CONFIG_TOOL_CONFIG_FILE=$GALAXY_ROOT_DIR/test/functional/tools/upload_tool_conf.xml
fi
# If auto installing conda envs, make sure bcftools is installed for __set_metadata__ tool
if [[ ! -z $GALAXY_CONFIG_CONDA_AUTO_INSTALL ]]
then
if [ ! -d "/tool_deps/_conda/envs/__bcftools@1.5" ]; then
su $GALAXY_USER -c "/tool_deps/_conda/bin/conda create -y --override-channels --channel iuc --channel conda-forge --channel bioconda --channel defaults --name __bcftools@1.5 bcftools=1.5"
su $GALAXY_USER -c "/tool_deps/_conda/bin/conda clean --tarballs --yes"
fi
fi
if [[ $NONUSE != *"postgres"* ]]
then
# Backward compatibility for exported postgresql directories before version 15.08.
# In previous versions postgres has the UID/GID of 102/106. We changed this in
# https://github.com/bgruening/docker-galaxy-stable/pull/71 to GALAXY_POSTGRES_UID=1550 and
# GALAXY_POSTGRES_GID=1550
if [ -e /export/postgresql/ ];
then
if [ `stat -c %g /export/postgresql/` == "106" ];
then
chown -R postgres:postgres /export/postgresql/
fi
fi
fi
if [[ ! -z $ENABLE_CONDOR ]]
then
if [[ ! -z $CONDOR_HOST ]]
then
log_info "Enabling Condor with external scheduler at $CONDOR_HOST"
echo "# Config generated by startup.sh
CONDOR_HOST = $CONDOR_HOST
ALLOW_ADMINISTRATOR = *
ALLOW_OWNER = *
ALLOW_READ = *
ALLOW_WRITE = *
ALLOW_CLIENT = *
ALLOW_NEGOTIATOR = *
DAEMON_LIST = MASTER, SCHEDD
UID_DOMAIN = galaxy
DISCARD_SESSION_KEYRING_ON_STARTUP = False
TRUST_UID_DOMAIN = true" > /etc/condor/condor_config.local
fi
if [[ -e /export/condor_config ]]
then
echo "Replacing Condor config by locally supplied config from /export/condor_config"
rm -f /etc/condor/condor_config
ln -s /export/condor_config /etc/condor/condor_config
fi
fi
# Copy or link the slurm/munge config files
if [ -e /export/slurm.conf ]
then
rm -f /etc/slurm/slurm.conf
ln -s /export/slurm.conf /etc/slurm/slurm.conf
else
# Configure SLURM with runtime hostname.
# Use absolute path to python so virtualenv is not used.
mkdir -p /etc/slurm
/usr/bin/python /usr/sbin/configure_slurm.py
fi
mkdir -p /tmp/slurm /var/log/slurm /var/lib/slurm/slurmctld
chown -R $GALAXY_USER:$GALAXY_USER /tmp/slurm /var/log/slurm /var/lib/slurm
if [ -e /export/munge.key ]
then
rm -f /etc/munge/munge.key
ln -s /export/munge.key /etc/munge/munge.key
chmod 400 /export/munge.key
fi
# link the gridengine config file
if [ -e /export/act_qmaster ]
then
rm -f /var/lib/gridengine/default/common/act_qmaster
ln -s /export/act_qmaster /var/lib/gridengine/default/common/act_qmaster
fi
# Waits until postgres is ready
function wait_for_postgres {
local retries="${STARTUP_POSTGRES_RETRIES:-60}"
log_info "Waiting for database..."
until /usr/local/bin/check_database.py >/dev/null 2>&1; do
retries=$((retries - 1))
if [[ $retries -le 0 ]]; then
log_warn "Database did not become ready"
return 1
fi
sleep 5
done
log_success "Database ready"
}
# Waits until rabbitmq is ready
function wait_for_rabbitmq {
local retries="${STARTUP_RABBITMQ_RETRIES:-60}"
log_info "Waiting for RabbitMQ..."
until rabbitmqctl status >/dev/null 2>&1; do
retries=$((retries - 1))
if [[ $retries -le 0 ]]; then
log_warn "RabbitMQ did not become ready"
return 1
fi
sleep 5
done
log_success "RabbitMQ ready"
}
# Waits until docker daemon is ready
function wait_for_docker {
local retries="${STARTUP_DOCKER_RETRIES:-60}"
log_info "Waiting for docker daemon..."
until docker version >/dev/null 2>&1; do
retries=$((retries - 1))
if [[ $retries -le 0 ]]; then
log_warn "Docker daemon did not become ready"
return 1
fi
sleep 5
done
log_success "Docker daemon ready"
}
function wait_for_munge {
local retries="${STARTUP_MUNGE_RETRIES:-20}"
log_info "Waiting for munge..."
until munge -n >/dev/null 2>&1; do
if [[ $retries -le 0 ]]; then
log_warn "Munge did not become ready"
return 1
fi
retries=$((retries - 1))
sleep 1
done
log_success "Munge ready"
}
# $NONUSE can be set to include postgres, cron, proftp, nodejs, condor, slurmd, slurmctld,
# celery, rabbitmq, redis, flower or tusd
# if included we will _not_ start these services.
function start_supervisor {
supervisord -c /etc/supervisor/supervisord.conf
sleep 5
local parallel=false
case "$STARTUP_PARALLEL" in
1|true|yes|on) parallel=true ;;
esac
local pids=()
local names=()
start_service() {
local name="$1"
shift
if $parallel; then
"$@" &
pids+=("$!")
names+=("$name")
else
if ! "$@"; then
if ! supervisorctl status "$name" 2>/dev/null | grep -q RUNNING; then
log_warn "Service start failed: $name"
fi
fi
fi
}
wait_services() {
local i
for i in "${!pids[@]}"; do
if ! wait "${pids[$i]}"; then
if ! supervisorctl status "${names[$i]}" 2>/dev/null | grep -q RUNNING; then
log_warn "Service start failed: ${names[$i]}"
fi
fi
done
pids=()
names=()
}
if [[ ! -z $SUPERVISOR_MANAGE_POSTGRES && ! -z $SUPERVISOR_POSTGRES_AUTOSTART ]]; then
if [[ $NONUSE != *"postgres"* ]]
then
start_service "postgres" supervisorctl start postgresql
fi
fi
if [[ ! -z $SUPERVISOR_MANAGE_CRON ]]; then
if [[ $NONUSE != *"cron"* ]]
then
start_service "cron" supervisorctl start cron
fi
fi
if [[ ! -z $SUPERVISOR_MANAGE_PROFTP ]]; then
if [[ $NONUSE != *"proftp"* ]]
then
start_service "proftpd" supervisorctl start proftpd
fi
fi
if [[ ! -z $SUPERVISOR_MANAGE_CONDOR ]]; then
if [[ $NONUSE != *"condor"* ]]
then
start_service "condor" supervisorctl start condor
fi
fi
if [[ ! -z $SUPERVISOR_MANAGE_REDIS ]]; then
if [[ $NONUSE != *"redis"* ]]
then
start_service "redis" supervisorctl start redis
fi
fi
wait_services
if [[ ! -z $SUPERVISOR_MANAGE_SLURM ]]; then
log_info "Starting munge"
mkdir -p /tmp/slurm && chown -R "${GALAXY_USER:-galaxy}:${GALAXY_USER:-galaxy}" /tmp/slurm
supervisorctl start munge
wait_for_munge || true
if [[ $NONUSE != *"slurmctld"* ]]
then
log_info "Starting slurmctld"
supervisorctl start slurmctld
fi
if [[ $NONUSE != *"slurmd"* ]]
then
log_info "Starting slurmd"
supervisorctl start slurmd
fi
else
log_info "Starting munge"
mkdir -p /var/run/munge && chown -R root:root /var/run/munge
mkdir -p /tmp/slurm && chown -R "${GALAXY_USER:-galaxy}:${GALAXY_USER:-galaxy}" /tmp/slurm
/usr/sbin/munged -f -F --num-threads="${MUNGE_NUM_THREADS:-2}" &
wait_for_munge || true
if [[ $NONUSE != *"slurmctld"* ]]
then
log_info "Starting slurmctld"
/usr/sbin/slurmctld -L $GALAXY_LOGS_DIR/slurmctld.log
fi
if [[ $NONUSE != *"slurmd"* ]]
then
log_info "Starting slurmd"
/usr/sbin/slurmd -L $GALAXY_LOGS_DIR/slurmd.log
fi
fi
if [[ ! -z $SUPERVISOR_MANAGE_RABBITMQ ]]; then
if [[ $NONUSE != *"rabbitmq"* ]]
then
log_info "Starting rabbitmq"
supervisorctl start rabbitmq
wait_for_rabbitmq
log_info "Configuring rabbitmq users"
ansible-playbook -c local /usr/local/bin/configure_rabbitmq_users.yml &> /dev/null
log_info "Restarting rabbitmq"
supervisorctl restart rabbitmq
fi
fi
if [[ ! -z $SUPERVISOR_MANAGE_FLOWER ]]; then
if [[ $NONUSE != *"flower"* && $NONUSE != *"celery"* && $NONUSE != *"rabbitmq"* ]]
then
log_info "Starting flower"
supervisorctl start flower
fi
fi
}
function start_gravity {
if [[ ! -z $GRAVITY_MANAGE_CELERY ]]; then
if [[ $NONUSE == *"celery"* ]]
then
log_info "Disabling Galaxy celery app"
python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.celery.enable" "false" &> /dev/null
python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.celery.enable_beat" "false" &> /dev/null
else
export GALAXY_CONFIG_ENABLE_CELERY_TASKS='true'
if [[ $NONUSE != *"redis"* ]]
then
# Configure Galaxy to use Redis as the result backend for Celery tasks
ansible localhost -m replace -a "path=${GALAXY_CONFIG_FILE} regexp='^ #celery_conf:' replace=' celery_conf:'" &> /dev/null
ansible localhost -m replace -a "path=${GALAXY_CONFIG_FILE} regexp='^ # result_backend:.*' replace=' result_backend: redis://127.0.0.1:6379/0'" &> /dev/null
fi
fi
fi
if [[ ! -z $GRAVITY_MANAGE_GX_IT_PROXY ]]; then
if [[ $NONUSE == *"nodejs"* ]]
then
log_info "Disabling nodejs"
python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.gx_it_proxy.enable" "false" &> /dev/null
else
# TODO: Remove this after gravity config manager is updated to handle env vars properly
ansible localhost -m replace -a "path=${GALAXY_CONFIG_FILE} regexp='^ #interactivetools_enable:.*' replace=' interactivetools_enable: true'" &> /dev/null
fi
fi
if [[ ! -z $GRAVITY_MANAGE_TUSD ]]; then
if [[ $NONUSE == *"tusd"* ]]
then
log_info "Disabling Galaxy tusd app"
python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.tusd.enable" "false" &> /dev/null
cp /etc/nginx/delegated_uploads.conf /etc/nginx/delegated_uploads.conf.source
echo "# No delegated uploads" > /etc/nginx/delegated_uploads.conf
else
# TODO: Remove this after gravity config manager is updated to handle env vars properly
ansible localhost -m replace -a "path=${GALAXY_CONFIG_FILE} regexp='^ #galaxy_infrastructure_url:.*' replace=' galaxy_infrastructure_url: ${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL}'" &> /dev/null
fi
fi
if [[ $NONUSE != *"rabbitmq"* ]]
then
# Set AMQP internal connection for Galaxy
export GALAXY_CONFIG_AMQP_INTERNAL_CONNECTION="pyamqp://galaxy:galaxy@localhost:5672/galaxy"
fi
# Set the SUPERVISORD_SOCKET to overwrite gravity's default.
# The default will put the socket into the export dir, into gravity's state directory. And this caused some problems to start supervisord.
export SUPERVISORD_SOCKET=${SUPERVISORD_SOCKET:-/tmp/galaxy_supervisord.sock}
# Start galaxy services using gravity
/usr/local/bin/galaxyctl -d start
}
if [[ ! -z $SUPERVISOR_POSTGRES_AUTOSTART ]]; then
if [[ $NONUSE != *"postgres"* ]]
then
# Change the data_directory of postgresql in the main config file
ansible localhost -m lineinfile -a "line='data_directory = \'$PG_DATA_DIR_HOST\'' dest=$PG_CONF_DIR_DEFAULT/postgresql.conf backup=yes state=present regexp='data_directory'" &> /dev/null
fi
fi
if $PRIVILEGED; then
# In privileged mode autofs and CVMFS may be available, so only append existing files.
export GALAXY_CONFIG_TOOL_DATA_TABLE_CONFIG_PATH="${GALAXY_CONFIG_TOOL_DATA_TABLE_CONFIG_PATH},/cvmfs/data.galaxyproject.org/byhand/location/tool_data_table_conf.xml,/cvmfs/data.galaxyproject.org/managed/location/tool_data_table_conf.xml"
log_info "Enabling Galaxy Interactive Tools"
export GALAXY_CONFIG_INTERACTIVETOOLS_ENABLE=True
export GALAXY_CONFIG_TOOL_CONFIG_FILE="$GALAXY_CONFIG_TOOL_CONFIG_FILE,$GALAXY_INTERACTIVE_TOOLS_CONFIG_FILE"
# Update domain-based interactive tools nginx configuration with the galaxy domain if provided
if [[ ! -z $GALAXY_DOMAIN ]]; then
sed -i "s/\(\.interactivetool\.\)[^;]*/\1$GALAXY_DOMAIN/g" /etc/nginx/interactive_tools_common.conf
fi
if [[ -z $DOCKER_PARENT ]]; then
#build the docker in docker environment
# Ensure cgroup mounts are set up without triggering dind "no command" warnings.
bash /root/cgroupfs_mount.sh true
log_info "Starting services (supervisord)"
start_supervisor
log_info "Starting Galaxy (gunicorn=${GUNICORN_WORKERS:-2}, handlers=${GALAXY_HANDLER_NUMPROCS:-2}, celery=${CELERY_WORKERS:-2})"
start_gravity
supervisorctl start docker
wait_for_docker
else
#inheriting /var/run/docker.sock from parent, assume that you need to
#run docker with sudo to validate
echo "$GALAXY_USER ALL = NOPASSWD : ALL" >> /etc/sudoers
log_info "Starting services (supervisord)"
start_supervisor
log_info "Starting Galaxy (gunicorn=${GUNICORN_WORKERS:-2}, handlers=${GALAXY_HANDLER_NUMPROCS:-2}, celery=${CELERY_WORKERS:-2})"
start_gravity
fi
if [[ ! -z $PULL_IT_IMAGES ]]; then
log_info "Pulling interactive tool images (this may take a while)"
for it in {JUPYTER,RSTUDIO,ETHERCALC,PHINCH,NEO}; do
enabled_var_name="GALAXY_IT_FETCH_${it}";
if [[ ${!enabled_var_name} ]]; then
# Store name in a var
image_var_name="GALAXY_IT_${it}_IMAGE"
# And then read from that var
docker pull "${!image_var_name}"
fi
done
fi
else
log_info "Interactive Tools disabled (start with --privileged to enable)"
export GALAXY_CONFIG_INTERACTIVETOOLS_ENABLE=False
log_info "Starting services (supervisord)"
start_supervisor
log_info "Starting Galaxy (gunicorn=${GUNICORN_WORKERS:-2}, handlers=${GALAXY_HANDLER_NUMPROCS:-2}, celery=${CELERY_WORKERS:-2})"
start_gravity
fi
wait_for_postgres
if [[ "$STARTUP_VALIDATE" == "true" ]]; then
log_info "Validating Galaxy readiness..."
if ! /tool_deps/_conda/bin/galaxy-wait -g "$STARTUP_GALAXY_URL" -v --timeout "$STARTUP_WAIT_TIMEOUT"; then
show_failure_logs
exit 1
fi
log_success "Galaxy is ready"
fi
# Make sure the database is automatically updated
if [[ ! -z $GALAXY_AUTO_UPDATE_DB ]]
then
log_info "Updating Galaxy database"
sh manage_db.sh -c $GALAXY_CONFIG_FILE upgrade
fi
# In case the user wants the default admin to be created, do so.
if [[ ! -z $GALAXY_DEFAULT_ADMIN_USER ]]
then
log_info "Ensuring admin user $GALAXY_DEFAULT_ADMIN_USER exists"
python /usr/local/bin/create_galaxy_user.py --user "$GALAXY_DEFAULT_ADMIN_EMAIL" --password "$GALAXY_DEFAULT_ADMIN_PASSWORD" \
-c "$GALAXY_CONFIG_FILE" --username "$GALAXY_DEFAULT_ADMIN_USER" --key "$GALAXY_DEFAULT_ADMIN_KEY"
# If there is a need to execute actions that would require a live galaxy instance, such as adding workflows, setting quotas, adding more users, etc.
# then place a file with that logic named post-start-actions.sh on the /export/ directory, it should have access to all environment variables
# visible here.
# The file needs to be executable (chmod a+x post-start-actions.sh)
if [ -x /export/post-start-actions.sh ]
then
# uses ephemeris, present in docker-galaxy-stable, to wait for the local instance
/tool_deps/_conda/bin/galaxy-wait -g http://127.0.0.1 -v --timeout 600 > $GALAXY_LOGS_DIR/post-start-actions.log &&
/export/post-start-actions.sh >> $GALAXY_LOGS_DIR/post-start-actions.log &
fi
fi
# Reinstall tools if the user want to
if [[ ! -z $GALAXY_AUTO_UPDATE_TOOLS ]]
then
/tool_deps/_conda/bin/galaxy-wait -g http://127.0.0.1 -v --timeout 600 > /home/galaxy/logs/post-start-actions.log &&
OLDIFS=$IFS
IFS=','
for TOOL_YML in `echo "$GALAXY_AUTO_UPDATE_TOOLS"`
do
log_info "Installing tools from $TOOL_YML"
/tool_deps/_conda/bin/shed-tools install -g "http://127.0.0.1" -a "$GALAXY_DEFAULT_ADMIN_KEY" -t "$TOOL_YML"
/tool_deps/_conda/bin/conda clean --tarballs --yes
done
IFS=$OLDIFS
fi
# migrate custom Visualisations (Galaxy plugins)
# this is needed for by the new client build system
python3 ${GALAXY_ROOT_DIR}/scripts/plugin_staging.py
# Enable verbose output
if [ `echo ${GALAXY_LOGGING:-'no'} | tr [:upper:] [:lower:]` = "full" ]
then
log_success "Startup complete; streaming logs"
tail -f /var/log/supervisor/* /var/log/nginx/* $GALAXY_LOGS_DIR/*.log >&${STARTUP_OUT_FD}
else
log_success "Startup complete; streaming logs"
tail -f $GALAXY_LOGS_DIR/*.log >&${STARTUP_OUT_FD}
fi
================================================
FILE: galaxy/tool_conf_interactive.xml.sample
================================================
================================================
FILE: galaxy/tool_sheds_conf.xml
================================================
================================================
FILE: galaxy/welcome.html
================================================
Galaxy is an open platform for supporting data intensive
research. Galaxy is developed by The Galaxy Team
with the support of many contributors.
The Galaxy Docker project is supported by the University of Freiburg, part of de.NBI.
================================================
FILE: skills/galaxy-docker/SKILL.md
================================================
---
name: galaxy-docker
description: Maintain and upgrade the bgruening/docker-galaxy project: bump Galaxy/Ubuntu versions, update Ansible roles and scheduler support, adjust startup/CI/tests, and manage CVMFS.
---
# Galaxy Docker skill
Use this skill when working in the `bgruening/docker-galaxy` repo to upgrade Galaxy releases or refresh runtime, scheduler, CVMFS, and CI behavior.
## Quick start workflow
1. **Define targets**: Galaxy release, Ubuntu base, scheduler expectations (Slurm/HTCondor), and CI scope.
2. **Update build**: `galaxy/Dockerfile` (release ARGs, build stages, slurm-drmaa, uv usage, npm cleanup).
3. **Update Ansible**: `galaxy/ansible/requirements.yml` and playbooks (`rabbitmq.yml`, `condor.yml`, `slurm.yml`, `nginx.yml`, `proftpd.yml`).
4. **Update runtime**: `galaxy/startup.sh`, `galaxy/startup2.sh`, and `galaxy/ansible/templates/export_user_files.py.j2`.
5. **CVMFS changes**: `cvmfs/` sidecar + `galaxy/docker-compose.yaml` + resolver config.
6. **Tests/CI**: `test/` scripts and `.github/workflows/` (buildx caches, test orchestration).
7. **Run tests**: Use both `--privileged` and non-privileged runs where relevant.
## Repo map (files to touch)
- Build: `galaxy/Dockerfile`
- Startup: `galaxy/startup.sh`, `galaxy/startup2.sh`
- Galaxy config export: `galaxy/ansible/templates/export_user_files.py.j2`
- Ansible roles: `galaxy/ansible/requirements.yml`
- Services: `galaxy/ansible/rabbitmq.yml`, `galaxy/ansible/condor.yml`, `galaxy/ansible/slurm.yml`, `galaxy/ansible/nginx.yml`, `galaxy/ansible/proftpd.yml`
- Slurm config template: `galaxy/ansible/templates/configure_slurm.py.j2`
- Container resolvers: `galaxy/ansible/templates/container_resolvers_conf.yml.j2`
- CVMFS sidecar: `cvmfs/` and `galaxy/docker-compose.yaml`
- Tests: `test/bioblend/`, `test/slurm/`, `test/gridengine/`, `test/cvmfs/`, `test/container_resolvers_conf.ci.yml`
- CI: `.github/workflows/*.yml` and `.github/workflows/single.sh`
## Guardrails and expectations
- Keep Python installs on `uv` (build and runtime). Avoid `pip install` directly.
- Prefer buildx cache mounts in Dockerfiles and `cache-to/cache-from` in GitHub Actions.
- Use `--rm` for test containers and clean up by name to avoid conflicts.
- If `/tmp` fills up on CI, use `TMPDIR=/var/tmp` for heavy Docker tests.
- Use `startup2` for richer diagnostics; keep `startup.sh` minimal.
## CVMFS
- Privileged runs use full CVMFS client + autofs.
- Sidecar is optional via compose profile (`cvmfs/` image).
- Container resolver config should include cached mulled paths on both CVMFS and `/export`.
- See `references/upgrade-25.1.md` for the exact sidecar design and tests.
## Slurm
- Ensure Slurm works in containers without systemd/cgroup v2 requirements.
- `configure_slurm.py.j2` writes `cgroup.conf` with `CgroupPlugin=disabled`.
- Slurm-DRMAA is built from source when ABI mismatches exist (documented in references).
## Tests (typical order)
- `test/slurm/test.sh` (set `GALAXY_IMAGE=galaxy:test` if needed)
- `test/gridengine/test.sh` (uses ephemeris container for wait)
- `test/bioblend/test.sh`
- `test/cvmfs/test.sh` (sidecar + mount propagation)
- `startup2` sanity: `docker run --rm --privileged ... /usr/bin/startup2`
## References
- `references/upgrade-25.1.md` for 25.1 upgrade decisions, pins, and pitfalls.
================================================
FILE: skills/galaxy-docker/references/upgrade-25.1.md
================================================
# 25.1 upgrade reference (docker-galaxy)
This reference captures the key decisions, pins, and fixes applied during the 25.1 upgrade.
Use it as a **lessons-learned checklist** and re-validate each item for the next release.
## Base versions and build decisions
- **Ubuntu base**: `ubuntu:24.04` in `galaxy/Dockerfile` (`galaxy_cluster_base` stage).
- **Galaxy release**: set via `ARG GALAXY_RELEASE` in `galaxy/Dockerfile` (target `release_25.1`).
- **gx-it-proxy**: preinstalled via npm during build, then npm removed to save space.
- **Python installs**: migrate to `uv` for optional dependencies and tests.
- **jemalloc**: custom build kept for Grid Engine compatibility (see comment in Dockerfile).
## Slurm and slurm-drmaa (25.1-specific)
- **Slurm version**: for 25.1 on Ubuntu 24.04, Slurm 24.11 was required for ABI compatibility in this image. Re-check available packages and ABI compatibility each upgrade.
- **Slurm-DRMAA**: built from source in a dedicated build stage because the natefoo PPA binaries were built against Slurm 23.11 and broke at runtime with 24.11.
- Build stage in `galaxy/Dockerfile` has a large comment that explains this as temporary and should be removed once 24.11-compatible packages are available.
- **Cgroups**: container-friendly configuration writes `/etc/slurm/cgroup.conf` with `CgroupPlugin=disabled` (via `configure_slurm.py.j2`).
- **Runtime config**: `configure_slurm.py.j2` merges `slurmd -C`, `lscpu -J`, and `/proc/meminfo` to avoid hardware mismatch errors; also forces `TaskPlugin=task/none`, `JobAcctGatherType=jobacct_gather/none`, `MpiDefault=none`, `ProctrackType=proctrack/pgid`.
## RabbitMQ
- Use Team RabbitMQ repositories (per rabbitmq.com install instructions).
- Pin `rabbitmq_version` in `galaxy/ansible/rabbitmq.yml`.
- Install Erlang packages explicitly and enable `rabbitmq_management`.
## HTCondor
- Prefer upstream roles and official repositories when they support the target OS and version.
- If upstream lags (e.g., no packages yet), document the temporary workaround and remove it once upstream catches up.
## CVMFS
- Main container supports CVMFS only in `--privileged` mode.
- Sidecar container added under `cvmfs/` with autofs and a minimal Ansible playbook.
- Compose profile `cvmfs` in `galaxy/docker-compose.yaml` uses rshared mount propagation so the Galaxy container sees CVMFS mounts.
- Container resolver config adds cached mulled paths:
- `/cvmfs/singularity.galaxyproject.org/all`
- `/export/container_cache/singularity/mulled`
## Startup scripts
- `startup2` adds colored logging, runtime summary, and a `GALAXY_*` env summary with masking.
- CVMFS messaging avoids early warnings by skipping manual mounts when autofs is configured.
- `startup2` and `startup.sh` call `/root/cgroupfs_mount.sh true` to avoid the "No command specified" warning.
- Optional dependency installs use `uv` when `LOAD_GALAXY_CONDITIONAL_DEPENDENCIES` is set.
- Creates `/tmp/slurm`, `/var/log/slurm`, and `/var/lib/slurm/slurmctld` to avoid missing state file errors.
## Job handlers
- `galaxy/ansible/galaxy_job_conf.yml` ensures `job_handler_assignment_method: db-skip-locked` when dynamic handlers are enabled.
- `galaxy/Dockerfile` runs `ansible-playbook /ansible/galaxy_job_conf.yml` after copying the `galaxy.yml.sample` so the setting persists in the built image.
## CI and tests
- Buildx caching enabled in workflows; `single.sh` uses buildx with cache-to/cache-from.
- `test/container_resolvers_conf.ci.yml` keeps resolver tests fast.
- `test/cvmfs/test.sh` validates mount propagation from sidecar to Galaxy.
- `test/gridengine/test.sh` uses ephemeris container to `galaxy-wait`.
- `test/bioblend` updated for Galaxy 25.1 and newer Bioblend.
## Known pitfalls and fixes
- **CVMFS warnings on startup**: resolved by checking autofs config before manual mounts.
- **Munge readiness**: add a wait loop and configurable `MUNGE_NUM_THREADS` (default 2).
- **Dynamic handler warning in Gravity**: fix by setting `job_handler_assignment_method` via Ansible.
- **No command specified**: avoid by running `/root/cgroupfs_mount.sh true` instead of no args.
- **/tmp full on CI**: run tests with `TMPDIR=/var/tmp`.
## Files touched during the 25.1 upgrade
High-signal files for reference:
- `galaxy/Dockerfile`
- `galaxy/startup.sh`, `galaxy/startup2.sh`
- `galaxy/ansible/requirements.yml`
- `galaxy/ansible/rabbitmq.yml`, `galaxy/ansible/condor.yml`, `galaxy/ansible/slurm.yml`
- `galaxy/ansible/templates/configure_slurm.py.j2`
- `galaxy/ansible/templates/container_resolvers_conf.yml.j2`
- `galaxy/ansible/templates/export_user_files.py.j2`
- `galaxy/docker-compose.yaml`
- `cvmfs/` (sidecar)
- `test/` (slurm, gridengine, bioblend, cvmfs)
- `.github/workflows/` (buildx caching, single-container tests, CVMFS workflow)
================================================
FILE: test/bioblend/Dockerfile
================================================
FROM alpine:3.17 as build
ENV BIOBLEND_VERSION=1.7.0 \
TOX_ENV=py310 \
BIOBLEND_GALAXY_API_KEY=fakekey \
BIOBLEND_GALAXY_URL=http://galaxy \
BIOBLEND_TEST_JOB_TIMEOUT="240" \
GALAXY_VERSION=release_25.1 \
UV_INSTALL_DIR=/usr/local/bin
ADD "https://github.com/galaxyproject/bioblend/archive/v$BIOBLEND_VERSION.zip" /src/bioblend.zip
RUN apk update && apk add bash curl python3-dev unzip \
&& curl -LsSf https://astral.sh/uv/install.sh | bash \
&& uv pip install --system pep8 tox \
&& cd /src \
&& unzip bioblend.zip && rm bioblend.zip \
&& mv "bioblend-$BIOBLEND_VERSION" bioblend \
&& cd bioblend \
&& uv pip install --system .
WORKDIR /src/bioblend
CMD /bin/sh -c "tox -e $TOX_ENV -- -k 'not test_upload_from_galaxy_filesystem and not test_get_datasets and not test_datasets_from_fs and not test_cancel_invocation and not test_run_step_actions'"
# library tests, needs share /tmp filesystem
# * test_upload_from_galaxy_filesystem
# * test_get_datasets
# * test_datasets_from_fs
================================================
FILE: test/bioblend/test.sh
================================================
#!/bin/bash
if ! docker build -t bioblend_test .; then
echo "Bioblend docker image build failed."
exit 1
fi
if ! docker run --rm --name bioblend_test --link galaxy -v /tmp/:/tmp/ bioblend_test; then
echo "Bioblend tests failed."
exit 1
fi
docker rmi bioblend_test
================================================
FILE: test/container_resolvers_conf.ci.yml
================================================
# Minimal container resolvers for CI to keep resolve_toolbox fast.
- type: explicit
- type: cached_mulled_singularity
cache_directory: "/export/container_cache/singularity/mulled"
================================================
FILE: test/cvmfs/test.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
if ! docker build -t galaxy:test ./galaxy; then
echo "Galaxy docker image build failed."
exit 1
fi
if ! docker build -t galaxy-cvmfs:test ./cvmfs; then
echo "CVMFS sidecar image build failed."
exit 1
fi
cvmfs_mount_dir="$(mktemp -d)"
cvmfs_cache_dir="$(mktemp -d)"
cleanup() {
docker exec galaxy-cvmfs-test sh -c "umount -l /cvmfs/data.galaxyproject.org /cvmfs/singularity.galaxyproject.org >/dev/null 2>&1 || true" || true
docker exec galaxy-cvmfs-test sh -c "service autofs stop >/dev/null 2>&1 || true" || true
docker stop galaxy-cvmfs-test >/dev/null 2>&1 || true
rm -rf "$cvmfs_mount_dir" "$cvmfs_cache_dir" >/dev/null 2>&1 || true
}
trap cleanup EXIT
if ! docker run -d --rm --name galaxy-cvmfs-test --privileged \
-e CVMFS_REPOSITORIES=data.galaxyproject.org,singularity.galaxyproject.org \
-v "$cvmfs_mount_dir:/cvmfs:rshared" \
-v "$cvmfs_cache_dir:/var/lib/cvmfs:delegated" \
galaxy-cvmfs:test >/dev/null; then
echo "CVMFS sidecar container failed to start."
exit 1
fi
mounted=false
for _ in $(seq 1 90); do
if docker exec galaxy-cvmfs-test ls /cvmfs/data.galaxyproject.org/byhand >/dev/null 2>&1; then
mounted=true
break
fi
sleep 2
done
if ! $mounted; then
echo "CVMFS mount test failed in the sidecar."
exit 1
fi
if ! docker run --rm \
-v "$cvmfs_mount_dir:/cvmfs:rshared" \
galaxy:test /bin/sh -c "ls /cvmfs/data.galaxyproject.org/byhand >/dev/null"; then
echo "CVMFS mount not visible in the Galaxy container."
exit 1
fi
================================================
FILE: test/gridengine/Dockerfile
================================================
FROM ubuntu:22.04 AS sge_master
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update -qq \
&& apt-get install -y wget gridengine-exec gridengine-client \
# need to run this before gridengine-master installation (https://bugs.launchpad.net/ubuntu/+source/gridengine/+bug/1774302)
&& wget http://ftp.debian.org/debian/pool/main/g/gridengine/gridengine-client_8.1.9+dfsg-10+b1_amd64.deb \
&& dpkg -x gridengine-client_8.1.9+dfsg-10+b1_amd64.deb ge-client \
&& cp ge-client/usr/lib/gridengine/spooldefaults.bin /usr/lib/gridengine/ \
&& cp ge-client/usr/lib/gridengine/libspool*.so /usr/lib/gridengine/ \
&& rm -rf gridengine-client_8.1.9+dfsg-10+b1_amd64.deb ge-client \
&& apt purge -y wget && apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
RUN apt-get update -qq \
&& apt-get install -y gridengine-master \
&& apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
# dummy user account
RUN useradd -m dummy \
&& echo "dummy:dummy" | chpasswd
ADD --chmod=755 setup_gridengine.sh /usr/local/bin/setup_gridengine.sh
FROM python:3.10.15 AS sge_bioblend_test
ENV UV_INSTALL_DIR=/usr/local/bin
RUN apt-get update -qq \
&& apt-get install -y curl \
&& curl -LsSf https://astral.sh/uv/install.sh | sh \
&& uv pip install --system bioblend==1.3.0 \
&& apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
================================================
FILE: test/gridengine/act_qmaster
================================================
sgemaster
================================================
FILE: test/gridengine/job_conf.xml.sge
================================================
/usr/lib/gridengine-drmaa/lib/libdrmaa.so.1.0
False
11111
================================================
FILE: test/gridengine/master_script.sh
================================================
#!/bin/bash
useradd -u 1450 -m galaxy
/usr/local/bin/setup_gridengine.sh
tail -f /var/spool/gridengine/qmaster/messages
================================================
FILE: test/gridengine/outputhostname/outputhostname.xml
================================================
data in ascending or descending order
hostname > ${out_file1}
================================================
FILE: test/gridengine/outputhostname.tool.xml
================================================
================================================
FILE: test/gridengine/setup_gridengine.sh
================================================
#!/bin/bash
# hostname > /var/lib/gridengine/default/common/act_qmaster
/etc/init.d/gridengine-master start
/etc/init.d/gridengine-exec start
cat << EOS > /tmp/qconf-ae.txt
hostname $(hostname)
load_scaling NONE
complex_values NONE
user_lists NONE
xuser_lists NONE
projects NONE
xprojects NONE
usage_scaling NONE
report_variables NONE
EOS
qconf -Ae /tmp/qconf-ae.txt
# Add submit host
qconf -as `hostname`
# shell bash
cat << EOS > /tmp/qconf-aq.txt
qname testq
hostlist $(hostname)
seq_no 0
load_thresholds np_load_avg=1.75
suspend_thresholds NONE
nsuspend 1
suspend_interval 00:05:00
priority 0
min_cpu_interval 00:05:00
processors UNDEFINED
qtype BATCH INTERACTIVE
ckpt_list NONE
pe_list make
rerun FALSE
slots 1
tmpdir /tmp
shell /bin/bash
prolog NONE
epilog NONE
shell_start_mode posix_compliant
starter_method NONE
suspend_method NONE
resume_method NONE
terminate_method NONE
notify 00:00:60
owner_list NONE
user_lists NONE
xuser_lists NONE
subordinate_list NONE
complex_values NONE
projects NONE
xprojects NONE
calendar NONE
initial_state default
s_rt INFINITY
h_rt INFINITY
s_cpu INFINITY
h_cpu INFINITY
s_fsize INFINITY
h_fsize INFINITY
s_data INFINITY
h_data INFINITY
s_stack INFINITY
h_stack INFINITY
s_core INFINITY
h_core INFINITY
s_rss INFINITY
h_rss INFINITY
s_vmem INFINITY
h_vmem INFINITY
EOS
qconf -Aq /tmp/qconf-aq.txt
# avoid 'stdin: is not a tty'
sed -i -e 's/^mesg n//' /root/.profile
# echo "hostname ; date" | qsub
#
for HOST in $@
do
qconf -as $HOST
done
================================================
FILE: test/gridengine/setup_tool.sh
================================================
#!/bin/bash
# cp tool_conf.xml config
export GALAXY_CONFIG_TOOL_CONFIG_FILE=/galaxy/tool_conf.xml
/usr/bin/startup
tailf /home/galaxy/logs/*
================================================
FILE: test/gridengine/test.sh
================================================
#!/usr/bin/env bash
echo "Test that jobs run successfully on an external gridengine cluster"
docker build --target sge_master --tag sge_master .
docker build --target sge_bioblend_test --tag sge_bioblend_test .
# start master
# We use a temporary directory as an export dir that will hold the shared data between
# galaxy and gridengine:
EXPORT=`mktemp --directory`
chmod 777 ${EXPORT}
docker run -d --rm --hostname sgemaster --name sgemaster -v ${EXPORT}:/export -v $PWD/master_script.sh:/usr/local/bin/master_script.sh sge_master /usr/local/bin/master_script.sh
# wait for sge master
sleep 10
# start galaxy
GALAXY_CONTAINER=${GALAXY_CONTAINER:-quay.io/bgruening/galaxy}
EPHEMERIS_IMAGE=${EPHEMERIS_IMAGE:-quay.io/biocontainers/ephemeris:0.10.11--pyhdfd78af_0}
GALAXY_WAIT_TIMEOUT=${GALAXY_WAIT_TIMEOUT:-600}
GALAXY_CONTAINER_NAME=galaxytest
GALAXY_CONTAINER_HOSTNAME=galaxytest
GALAXY_ROOT_DIR=/galaxy
docker run -d --rm \
-e SGE_ROOT=/var/lib/gridengine \
--link sgemaster:sgemaster \
--name ${GALAXY_CONTAINER_NAME} \
--hostname ${GALAXY_CONTAINER_HOSTNAME} \
-p 20080:80 -e NONUSE="condor" \
-v $PWD/job_conf.xml.sge:/etc/galaxy/job_conf.xml \
-v ${EXPORT}:/export \
-v $PWD/outputhostname:$GALAXY_ROOT_DIR/tools/outputhostname \
-v $PWD/outputhostname.tool.xml:$GALAXY_ROOT_DIR/outputhostname.tool.xml \
-v $PWD/setup_tool.sh:$GALAXY_ROOT_DIR/setup_tool.sh \
-v $PWD/tool_conf.xml:$GALAXY_ROOT_DIR/tool_conf.xml \
-v $PWD/act_qmaster:/var/lib/gridengine/default/common/act_qmaster \
${GALAXY_CONTAINER} \
$GALAXY_ROOT_DIR/setup_tool.sh
echo "Wait 30sec"
sleep 30
echo "show logs from ${GALAXY_CONTAINER_NAME}"
docker logs ${GALAXY_CONTAINER_NAME}
# Add host setting galaxytest to sgemaster
echo "Get host info from ${GALAXY_CONTAINER_HOSTNAME}"
SGECLIENT=$(docker exec ${GALAXY_CONTAINER_NAME} cat /etc/hosts | grep ${GALAXY_CONTAINER_HOSTNAME})
echo "Add host info to sgemaster"
docker exec sgemaster bash -c "echo ${SGECLIENT} >> /etc/hosts ; /etc/init.d/gridengine-master restart"
echo "Output /etc/hosts on sgemaster"
docker exec sgemaster cat /etc/hosts
# Add gridengine client host
echo "Add submit host ${GALAXY_CONTAINER_HOSTNAME}"
docker exec sgemaster bash -c "qconf -as ${GALAXY_CONTAINER_HOSTNAME}"
echo "Waiting for Galaxy to become ready"
if ! docker run --rm --link ${GALAXY_CONTAINER_NAME}:galaxytest \
${EPHEMERIS_IMAGE} galaxy-wait -g http://galaxytest --timeout ${GALAXY_WAIT_TIMEOUT}; then
echo "Galaxy did not become ready within ${GALAXY_WAIT_TIMEOUT}s."
docker logs ${GALAXY_CONTAINER_NAME} || true
exit 1
fi
echo "Exec test"
docker run --rm --link galaxytest:galaxytest -v $PWD/test_outputhostname.py:/work/test_outputhostname.py sge_bioblend_test python /work/test_outputhostname.py > out
grep sgemaster out
RET=$?
# remove container
docker stop sgemaster || true
docker stop galaxytest || true
# Remove images
docker rmi sge_master
docker rmi sge_bioblend_test
if [ $RET -ne 0 ]; then
echo "Grid Engine test failed"
exit $RET
fi
================================================
FILE: test/gridengine/test_outputhostname.py
================================================
#!/usr/bin/python
import time
from bioblend.galaxy import GalaxyInstance
gi = GalaxyInstance('http://galaxytest', key='fakekey')
gi.histories.create_history()
# print(gi.tools.get_tool_panel())
history = gi.histories.get_most_recently_used_history()
# print(dir(history))
history_id = history['id']
# print(history_id)
tool_output = gi.tools.run_tool(
history_id=history_id,
tool_id="outputhostname",
tool_inputs={}
)
# print(tool_output)
# loop until job finish timeout is 30sec
result = "noresult"
for x in range(0, 30):
time.sleep(1)
show_history = gi.histories.show_history(history_id)
if len(show_history['state_ids']['ok']) > 0:
dataset_id = show_history['state_ids']['ok'][0]
dataset = gi.datasets.show_dataset(dataset_id)
result = dataset['peek']
break
print(result)
================================================
FILE: test/gridengine/tool_conf.xml
================================================
================================================
FILE: test/slurm/Dockerfile
================================================
FROM ubuntu:24.04
ENV DEBIAN_FRONTEND=noninteractive
ENV UV_INSTALL_DIR=/usr/local/bin
RUN apt-get update -qq && apt-get install -y --no-install-recommends \
munge \
python3-psutil supervisor samtools apt-transport-https software-properties-common dirmngr gpg curl sudo gpg-agent && \
add-apt-repository ppa:ubuntu-hpc/slurm-wlm-24.11 && \
apt-get update -qq && \
apt-get install -y slurm-wlm && \
curl -LsSf https://astral.sh/uv/install.sh | sh && \
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - && \
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" && \
apt update && \
apt install -y docker-ce && \
cd / && \
ldconfig && \
apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && rm -rf ~/.cache/ && \
adduser --disabled-password --gecos "" galaxy &&\
mkdir -p /var/log/slurm /tmp/slurm && \
touch /var/log/slurm/slurmctld.log /var/log/slurm/slurmd.log
ADD configure_slurm.py /usr/local/bin/configure_slurm.py
ADD munge.conf /etc/default/munge
RUN service munge start && service munge stop
ADD startup.sh /usr/bin/startup.sh
ADD supervisor_slurm.conf /etc/supervisor/conf.d/slurm.conf
RUN chmod +x /usr/bin/startup.sh
#RUN locale-gen en_US.UTF-8 && dpkg-reconfigure locales
ENV GALAXY_DIR=/export/galaxy \
SYMLINK_TARGET=/galaxy \
SLURM_USER_NAME=galaxy \
SLURM_UID=1450 \
SLURM_GID=1450 \
SLURM_PARTITION_NAME=work \
SLURM_CLUSTER_NAME=Cluster \
SLURMD_AUTOSTART=True \
SLURMCTLD_AUTOSTART=True \
SLURM_CONF_PATH=/export/slurm.conf \
MUNGE_KEY_PATH=/export/munge.key
VOLUME ["/export/", "/var/lib/docker"]
CMD ["/usr/bin/startup.sh"]
================================================
FILE: test/slurm/configure_slurm.py
================================================
from socket import gethostname
from string import Template
from os import environ
import subprocess
import json
SLURM_CONFIG_TEMPLATE = '''
# slurm.conf file generated by configurator.html.
# Put this file on all nodes of your cluster.
# See the slurm.conf man page for more information.
#
SlurmctldHost=$control_machine
#SlurmctldAddr=
#
AuthType=auth/munge
#CheckpointType=checkpoint/none
CryptoType=crypto/munge
MpiDefault=none
#PluginDir=
#PlugStackConfig=
#PrivateData=jobs
ProctrackType=proctrack/pgid
#Prolog=
#PrologSlurmctld=
#PropagatePrioProcess=0
#PropagateResourceLimits=
#PropagateResourceLimitsExcept=
ReturnToService=1
#SallocDefaultCommand=
SlurmctldPidFile=/var/run/slurmctld.pid
SlurmctldPort=6817
SlurmdPidFile=/var/run/slurmd.pid
SlurmdPort=6818
SlurmdSpoolDir=/tmp/slurmd
SlurmUser=$user
#SlurmdUser=root
#SrunEpilog=
#SrunProlog=
StateSaveLocation=/tmp/slurm
SwitchType=switch/none
#TaskEpilog=
TaskPlugin=task/none
#TaskPluginParam=
#TaskProlog=
JobAcctGatherType=jobacct_gather/none
InactiveLimit=0
KillWait=30
MinJobAge=300
#OverTimeLimit=0
SlurmctldTimeout=120
SlurmdTimeout=300
#UnkillableStepTimeout=60
#VSizeFactor=0
Waittime=0
SchedulerType=sched/backfill
SelectType=select/cons_tres
SelectTypeParameters=CR_Core_Memory
AccountingStorageType=accounting_storage/none
#AccountingStorageUser=
AccountingStoreFlags=job_comment
ClusterName=$cluster_name
#DebugFlags=
#JobCompHost=
#JobCompLoc=
#JobCompPass=
#JobCompPort=
JobCompType=jobcomp/none
#JobCompUser=
JobAcctGatherFrequency=30
JobAcctGatherType=jobacct_gather/none
SlurmctldDebug=3
#SlurmctldLogFile=
SlurmdDebug=3
#SlurmdLogFile=
NodeName=$hostname CPUs=$cpus RealMemory=$memory State=UNKNOWN$topology
PartitionName=$partition_name Nodes=$nodes Default=YES MaxTime=INFINITE State=UP Shared=YES
'''
ENV_MAP = {
"CPUs": "SLURM_CPUS",
"RealMemory": "SLURM_MEMORY",
"Boards": "SLURM_BOARDS",
"SocketsPerBoard": "SLURM_SOCKETS_PER_BOARD",
"CoresPerSocket": "SLURM_CORES_PER_SOCKET",
"ThreadsPerCore": "SLURM_THREADS_PER_CORE",
}
def _as_int(value):
try:
return int(str(value).split()[0])
except (TypeError, ValueError):
return None
def _slurmd_status():
try:
output = subprocess.check_output(["slurmd", "-C"], stderr=subprocess.DEVNULL).decode("utf-8")
except Exception:
return {}
info = {}
for chunk in output.split():
if "=" in chunk:
key, value = chunk.split("=", 1)
info[key] = value
return info
def _lscpu_status():
try:
output = subprocess.check_output(["lscpu", "-J"], stderr=subprocess.DEVNULL).decode("utf-8")
data = json.loads(output)
except Exception:
return {}
fields = {}
for entry in data.get("lscpu", []):
field = entry.get("field", "").strip().strip(":")
fields[field] = entry.get("data")
cpus = _as_int(fields.get("CPU(s)"))
sockets = _as_int(fields.get("Socket(s)"))
cores = _as_int(fields.get("Core(s) per socket"))
threads = _as_int(fields.get("Thread(s) per core"))
info = {}
if cpus is not None:
info["CPUs"] = str(cpus)
if sockets is not None:
info["SocketsPerBoard"] = str(sockets)
if cores is not None:
info["CoresPerSocket"] = str(cores)
if threads is not None:
info["ThreadsPerCore"] = str(threads)
info.setdefault("Boards", "1")
return info
def _real_memory_mb():
try:
with open("/proc/meminfo", "r") as handle:
for line in handle:
if line.startswith("MemTotal:"):
parts = line.split()
if len(parts) >= 2:
return int(int(parts[1]) / 1024)
except Exception:
return None
return None
def main():
hostname = gethostname()
dict_status = _slurmd_status()
for key, value in _lscpu_status().items():
dict_status.setdefault(key, value)
if "RealMemory" not in dict_status:
real_memory = _real_memory_mb()
if real_memory is not None:
dict_status["RealMemory"] = str(real_memory)
cpus = dict_status.get("CPUs") or "1"
memory = dict_status.get("RealMemory") or "1024"
topology_parts = []
for key in ("Boards", "SocketsPerBoard", "CoresPerSocket", "ThreadsPerCore"):
env_key = ENV_MAP.get(key)
value = environ.get(env_key) if env_key else None
if value is None:
value = dict_status.get(key)
if value is not None:
topology_parts.append(f" {key}={value}")
template_params = {
"hostname": hostname,
"nodes": ",".join(environ.get('SLURM_NODES', hostname).split(',')),
"cluster_name": environ.get('SLURM_CLUSTER_NAME', 'Cluster'),
"control_machine": environ.get('SLURM_CONTROL_MACHINE', hostname),
"user": environ.get('SLURM_USER_NAME', '{{ galaxy_user_name }}'),
"cpus": environ.get("SLURM_CPUS", cpus),
"partition_name": environ.get('SLURM_PARTITION_NAME', 'debug'),
"memory": environ.get("SLURM_MEMORY", memory),
"topology": "".join(topology_parts),
}
config_contents = Template(SLURM_CONFIG_TEMPLATE).substitute(template_params)
open("/etc/slurm/slurm.conf", "w").write(config_contents)
# Slurm 24.11 supports disabling cgroups to avoid systemd/cgroup requirements in containers.
with open("/etc/slurm/cgroup.conf", "w") as handle:
handle.write("CgroupPlugin=disabled\n")
if __name__ == "__main__":
main()
================================================
FILE: test/slurm/job_conf.xml
================================================
/usr/lib/slurm-drmaa/lib/libdrmaa.so
-p work -n 2
False
False
False
galaxy
$defaults
bridge
True
================================================
FILE: test/slurm/munge.conf
================================================
###############################################################################
# $Id: munge.sysconfig 507 2006-05-11 20:28:55Z dun $
###############################################################################
##
# Pass additional command-line options to the daemon.
##
OPTIONS="--force --key-file /etc/munge/munge.key --num-threads 1"
##
# Adjust the scheduling priority of the daemon.
##
# NICE=
================================================
FILE: test/slurm/startup.sh
================================================
#!/usr/bin/env bash
# Setup the galaxy user UID/GID and pass control on to supervisor
if id "$SLURM_USER_NAME" >/dev/null 2>&1; then
echo "user exists"
else
echo "user does not exist, creating"
useradd -m -d /var/"$SLURM_USER_NAME" "$SLURM_USER_NAME"
fi
usermod -u $SLURM_UID $SLURM_USER_NAME
groupmod -g $SLURM_GID $SLURM_USER_NAME
if [ ! -f "$MUNGE_KEY_PATH" ]
then
cp /etc/munge/munge.key "$MUNGE_KEY_PATH"
fi
if [ ! -f "$SLURM_CONF_PATH" ]
then
mkdir -p /etc/slurm
python3 /usr/local/bin/configure_slurm.py
cp /etc/slurm/slurm.conf "$SLURM_CONF_PATH"
if [ -f /etc/slurm/cgroup.conf ]
then
cp /etc/slurm/cgroup.conf "$(dirname "$SLURM_CONF_PATH")/cgroup.conf"
rm -f /etc/slurm/cgroup.conf
fi
rm /etc/slurm/slurm.conf
fi
if [ ! -f "$GALAXY_DIR"/.venv ]
then
mkdir -p "$GALAXY_DIR"/.venv
chown $SLURM_USER_NAME:$SLURM_USER_NAME "$GALAXY_DIR"/.venv
su $SLURM_USER_NAME -c \
"GALAXY_DIR=$GALAXY_DIR uv venv \"$GALAXY_DIR\"/.venv && \
uv pip install --python \"$GALAXY_DIR\"/.venv/bin/python galaxy-lib"
fi
mkdir -p /tmp/slurmd
chown $SLURM_USER_NAME /tmp/slurm /tmp/slurmd
ln -s "$GALAXY_DIR" "$SYMLINK_TARGET"
ln -sf "$SLURM_CONF_PATH" /etc/slurm/slurm.conf
if [ -f "$(dirname "$SLURM_CONF_PATH")/cgroup.conf" ]
then
ln -sf "$(dirname "$SLURM_CONF_PATH")/cgroup.conf" /etc/slurm/cgroup.conf
fi
exec /usr/bin/supervisord -n -c /etc/supervisor/supervisord.conf
================================================
FILE: test/slurm/supervisor_slurm.conf
================================================
[program:munge]
user=root
command=/usr/sbin/munged --key-file=%(ENV_MUNGE_KEY_PATH)s -F --force
[program:slurmctld]
user=root
command=/usr/sbin/slurmctld -D -L /var/log/slurm/slurmctld.log -f %(ENV_SLURM_CONF_PATH)s
autostart = %(ENV_SLURMCTLD_AUTOSTART)s
autorestart = true
priority = 200
[program:slurmd]
user=root
command=/usr/sbin/slurmd -f %(ENV_SLURM_CONF_PATH)s -D -L /var/log/slurm/slurmd.log
autostart = %(ENV_SLURMD_AUTOSTART)s
autorestart = true
priority = 300
================================================
FILE: test/slurm/test.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
set -x
# Test that jobs run successfully on an external slurm cluster
# We use a temporary directory as an export dir that will hold the shared data between
# galaxy and slurm:
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
EXPORT=`mktemp --directory -p /var/tmp`
chmod 777 "$EXPORT"
GALAXY_IMAGE="${GALAXY_IMAGE:-galaxy:test}"
# Ensure leftover containers from previous runs don't conflict.
docker rm -f slurm galaxy-slurm-test >/dev/null 2>&1 || true
# We build the slurm image
docker build -t slurm "$SCRIPT_DIR"
# We fire up a slurm node (with hostname slurm)
docker run -d --rm -v "$EXPORT":/export -v /sys/fs/cgroup:/sys/fs/cgroup:rw --name slurm \
--hostname slurm \
slurm
# We start galaxy (without the internal slurm, but with a modified job_conf.xml)
# and link it to the slurm container (so that galaxy resolves the slurm container's hostname)
docker run -d --rm -e "NONUSE=slurmd,slurmctld" \
--link slurm --name galaxy-slurm-test -h galaxy \
-p 80:80 -v "$EXPORT":/export "${GALAXY_IMAGE}"
# We wait for the creation of the /galaxy/config/ if it does not exist yet
sleep 180s
# We restart galaxy
docker stop galaxy-slurm-test || true
for i in $(seq 1 30); do
if ! docker ps -a --format '{{.Names}}' | grep -qx galaxy-slurm-test; then
break
fi
sleep 1s
done
# We copy the job_conf.xml to the $EXPORT folder
docker run --rm -v "$EXPORT":/export -v "$SCRIPT_DIR":/workspace busybox sh -c \
"mkdir -p /export/galaxy/config && cp /workspace/job_conf.xml /export/galaxy/config/job_conf.xml && chown 1450:1450 /export/galaxy/config/job_conf.xml"
docker run -d --rm -e "NONUSE=slurmd,slurmctld" \
--link slurm --name galaxy-slurm-test -h galaxy \
-p 80:80 -v "$EXPORT":/export "${GALAXY_IMAGE}"
# Let's submit a job from the galaxy container and check it runs in the slurm container
sleep 60s
for i in $(seq 1 30); do
if docker exec galaxy-slurm-test scontrol ping 2>/dev/null | grep -q "UP"; then
break
fi
sleep 2s
done
docker exec galaxy-slurm-test scontrol ping | grep -q "UP"
docker exec galaxy-slurm-test su - galaxy -c 'srun hostname' | grep slurm
docker exec -i galaxy-slurm-test /bin/sh -s <<'EOF' | grep slurm
set -e
rm -f /export/drmaa.out /export/drmaa.err
DRMAA_LIBRARY_PATH=/usr/lib/slurm-drmaa/lib/libdrmaa.so /galaxy_venv/bin/python - <<'PY'
import drmaa
with drmaa.Session() as session:
jt = session.createJobTemplate()
jt.remoteCommand = "/bin/hostname"
jt.outputPath = ":" + "/export/drmaa.out"
jt.errorPath = ":" + "/export/drmaa.err"
jt.nativeSpecification = "-n 1"
jobid = session.runJob(jt)
session.deleteJobTemplate(jt)
session.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)
with open("/export/drmaa.out", "r") as handle:
print(handle.read().strip())
PY
EOF
docker stop galaxy-slurm-test slurm || true
docker rmi slurm || true
# TODO: Run a galaxy tool and check it runs on the cluster