[
  {
    "path": ".dive-ci",
    "content": "rules:\n  # If the efficiency is measured below X%, mark as failed.\n  # Expressed as a ratio between 0-1.\n  lowestEfficiency: 0.95\n\n  # If the amount of wasted space is at least X or larger than X, mark as failed.\n  # Expressed in B, KB, MB, and GB.\n  # highestWastedBytes: 20MB\n\n  # If the amount of wasted space makes up for X% or more of the image, mark as failed.\n  # Note: the base image layer is NOT included in the total image size.\n  # Expressed as a ratio between 0-1; fails if the threshold is met or crossed.\n  highestUserWastedPercent: 0.10\n"
  },
  {
    "path": ".editorconfig",
    "content": "root = true\n\n[*]\nindent_style = space\nindent_size = 2\ncharset = utf-8\ntrim_trailing_whitespace = true\ninsert_final_newline = true\n\n[*.py]\nindent_size = 4\n"
  },
  {
    "path": ".github/workflows/compose.yml",
    "content": "name: build-and-test\non: [push]\njobs:\n  build_container_base:\n    if: false  # Temporarily disable workflow \n    runs-on: ubuntu-22.04\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v6\n      - name: Set image tag\n        id: image_tag\n        run: |\n          if [ \"${GITHUB_REF#refs/heads/}\" = \"master\" ]; then\n            echo \"image_tag=latest\" >> $GITHUB_OUTPUT;\n          else\n            echo \"image_tag=${GITHUB_REF#refs/heads/}\" >> $GITHUB_OUTPUT;\n          fi\n      - name: Docker Login\n        run: echo \"${{ secrets.docker_registry_password }}\" | docker login -u ${{ secrets.docker_registry_username }} --password-stdin ${{ secrets.docker_registry }}\n      - name: Set up Docker Buildx\n        id: buildx\n        uses: docker/setup-buildx-action@v3\n        with:\n          version: v0.17.1\n      - name: Run Buildx\n        env:\n          image_name: galaxy-container-base\n        run: |\n          for i in {1..4}; do\n            set +e\n            docker buildx build  \\\n            --output \"type=image,name=${{ secrets.docker_registry }}/${{ secrets.docker_registry_username }}/$image_name:${{ steps.image_tag.outputs.image_tag }},push=true\" \\\n            --cache-from type=gha \\\n            --cache-to type=gha,mode=max \\\n            --build-arg IMAGE_TAG=${{ steps.image_tag.outputs.image_tag }} \\\n            --build-arg DOCKER_REGISTRY=${{ secrets.docker_registry }} \\\n            --build-arg DOCKER_REGISTRY_USERNAME=${{ secrets.docker_registry_username }} \\\n            $image_name && break || echo \"Fail.. Retrying\"\n          done;\n        shell: bash\n        working-directory: ./compose/base-images\n  build_cluster_base:\n    needs: build_container_base\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v6\n      - name: Set image tag\n        id: image_tag\n        run: |\n          if [ \"${GITHUB_REF#refs/heads/}\" = \"master\" ]; then\n            echo \"image_tag=latest\" >> $GITHUB_OUTPUT;\n          else\n            echo \"image_tag=${GITHUB_REF#refs/heads/}\" >> $GITHUB_OUTPUT;\n          fi\n      - name: Docker Login\n        run: echo \"${{ secrets.docker_registry_password }}\" | docker login -u ${{ secrets.docker_registry_username }} --password-stdin ${{ secrets.docker_registry }}\n      - name: Set up Docker Buildx\n        id: buildx\n        uses: docker/setup-buildx-action@v3\n        with:\n          version: v0.17.1\n      - name: Run Buildx\n        env:\n          image_name: galaxy-cluster-base\n        run: |\n          for i in {1..4}; do\n            set +e\n            docker buildx build  \\\n            --output \"type=image,name=${{ secrets.docker_registry }}/${{ secrets.docker_registry_username }}/$image_name:${{ steps.image_tag.outputs.image_tag }},push=true\" \\\n            --cache-from type=gha \\\n            --cache-to type=gha,mode=max \\\n            --build-arg IMAGE_TAG=${{ steps.image_tag.outputs.image_tag }} \\\n            --build-arg DOCKER_REGISTRY=${{ secrets.docker_registry }} \\\n            --build-arg DOCKER_REGISTRY_USERNAME=${{ secrets.docker_registry_username }} \\\n            $image_name && break || echo \"Fail.. Retrying\"\n          done;\n        shell: bash\n        working-directory: ./compose/base-images\n  build:\n    needs: build_cluster_base\n    runs-on: ubuntu-latest\n    strategy:\n      matrix:\n        image:\n          - name: galaxy-server\n          - name: galaxy-nginx\n          - name: galaxy-htcondor\n          - name: galaxy-slurm\n          - name: galaxy-slurm-node-discovery\n          - name: galaxy-kind\n          - name: pulsar\n          - name: galaxy-configurator\n          - name: galaxy-bioblend-test\n            subdir: tests/\n          - name: galaxy-workflow-test\n            subdir: tests/\n          - name: galaxy-selenium-test\n            subdir: tests/\n      fail-fast: false\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v6\n      - name: Set image tag\n        id: image_tag\n        run: |\n          if [ \"${GITHUB_REF#refs/heads/}\" = \"master\" ]; then\n            echo \"image_tag=latest\" >> $GITHUB_OUTPUT;\n          else\n            echo \"image_tag=${GITHUB_REF#refs/heads/}\" >> $GITHUB_OUTPUT;\n          fi\n      - name: Docker Login\n        run: echo \"${{ secrets.docker_registry_password }}\" | docker login -u ${{ secrets.docker_registry_username }} --password-stdin ${{ secrets.docker_registry }}\n      - name: Set up Docker Buildx\n        id: buildx\n        uses: docker/setup-buildx-action@v3\n        with:\n          version: v0.17.1\n      - name: Run Buildx\n        run: |\n          for i in {1..4}; do\n            set +e\n            docker buildx build \\\n            --output \"type=image,name=${{ secrets.docker_registry }}/${{ secrets.docker_registry_username }}/${{ matrix.image.name }}:${{ steps.image_tag.outputs.image_tag }},push=true\" \\\n            --cache-from type=gha \\\n            --cache-to type=gha,mode=max \\\n            --build-arg IMAGE_TAG=${{ steps.image_tag.outputs.image_tag }} \\\n            --build-arg DOCKER_REGISTRY=${{ secrets.docker_registry }} \\\n            --build-arg DOCKER_REGISTRY_USERNAME=${{ secrets.docker_registry_username }} \\\n            --build-arg GALAXY_REPO=https://github.com/galaxyproject/galaxy \\\n            ${{ matrix.image.subdir }}${{ matrix.image.name }} && break || echo \"Fail.. Retrying\"\n          done;\n        shell: bash\n        working-directory: ./compose\n  test:\n    needs: [build]\n    runs-on: ubuntu-latest\n    strategy:\n      matrix:\n        infrastructure:\n          - name: galaxy-base\n            files: -f docker-compose.yml\n          - name: galaxy-proxy-prefix\n            files: -f docker-compose.yml\n            env: GALAXY_PROXY_PREFIX=/arbitrary_Galaxy-prefix GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=http://localhost/arbitrary_Galaxy-prefix EXTRA_SKIP_TESTS_BIOBLEND=\"not test_import_export_workflow_dict and not test_import_export_workflow_from_local_path\"\n            exclude_test:\n              - selenium\n          - name: galaxy-htcondor\n            files: -f docker-compose.yml -f docker-compose.htcondor.yml\n          - name: galaxy-slurm\n            files: -f docker-compose.yml -f docker-compose.slurm.yml\n            env: SLURM_NODE_COUNT=3\n            options: --scale slurm_node=3\n          - name: galaxy-pulsar\n            files: -f docker-compose.yml -f docker-compose.pulsar.yml\n            exclude_test:\n              - workflow_quality_control\n            env: EXTRA_SKIP_TESTS_BIOBLEND=\"not test_wait_for_job\"\n          - name: galaxy-pulsar-mq\n            files: -f docker-compose.yml -f docker-compose.pulsar.yml -f docker-compose.pulsar.mq.yml\n            exclude_test:\n              - workflow_quality_control\n            env: EXTRA_SKIP_TESTS_BIOBLEND=\"not test_wait_for_job\"\n          - name: galaxy-k8s\n            files: -f docker-compose.yml -f docker-compose.k8s.yml\n          - name: galaxy-singularity\n            files: -f docker-compose.yml -f docker-compose.singularity.yml\n            env: EXTRA_SKIP_TESTS_BIOBLEND=\"not test_get_container_resolvers and not test_show_container_resolver\"\n          - name: galaxy-pulsar-mq-singularity\n            files: -f docker-compose.yml -f docker-compose.pulsar.yml -f docker-compose.pulsar.mq.yml -f docker-compose.singularity.yml\n            env: EXTRA_SKIP_TESTS_BIOBLEND=\"not test_wait_for_job and not test_get_container_resolvers and not test_show_container_resolver\"\n            exclude_test:\n              - workflow_quality_control\n          - name: galaxy-slurm-singularity\n            files: -f docker-compose.yml -f docker-compose.slurm.yml -f docker-compose.singularity.yml\n            env: EXTRA_SKIP_TESTS_BIOBLEND=\"not test_get_container_resolvers and not test_show_container_resolver\"\n          - name: galaxy-htcondor-singularity\n            files: -f docker-compose.yml -f docker-compose.htcondor.yml -f docker-compose.singularity.yml\n            env: EXTRA_SKIP_TESTS_BIOBLEND=\"not test_get_container_resolvers and not test_show_container_resolver\"\n        test:\n          - name: bioblend\n            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.bioblend.yml\n            exit-from: galaxy-bioblend-test\n            timeout: 60\n            second_run: \"true\"\n          - name: workflow_ard\n            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml\n            exit-from: galaxy-workflow-test\n            workflow: sklearn/ard/ard.ga\n            timeout: 60\n            second_run: \"true\"\n          - name: workflow_quality_control\n            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml\n            exit-from: galaxy-workflow-test\n            workflow: training/sequence-analysis/quality-control/quality_control.ga\n            timeout: 60\n          - name: workflow_example1\n            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml\n            exit-from: galaxy-workflow-test\n            workflow: example1/wf3-shed-tools.ga\n            timeout: 60\n          - name: selenium\n            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.selenium.yml\n            exit-from: galaxy-selenium-test\n            timeout: 60\n      fail-fast: false\n    steps:\n      # Self-made `exclude` as Github Actions currently does not support\n      # exclude/including of dicts in matrices\n      - name: Check if test should be run\n        id: run_check\n        if: contains(matrix.infrastructure.exclude_test, matrix.test.name) != true\n        run: echo \"run=true\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@v6\n      - name: Set image tag in env\n        run: echo \"IMAGE_TAG=${GITHUB_REF#refs/heads/}\" >> $GITHUB_ENV\n      - name: Master branch - Set image to to 'latest'\n        if: github.ref == 'refs/heads/master'\n        run: echo \"IMAGE_TAG=latest\" >> $GITHUB_ENV\n      - name: Set WORKFLOWS env for worfklows-test\n        if: matrix.test.workflow\n        run: echo \"WORKFLOWS=${{ matrix.test.workflow }}\" >> $GITHUB_ENV\n      - name: Install Docker Compose\n        run: |\n          sudo apt-get update -qq && sudo apt-get install docker-compose -y\n      - name: Run tests for the first time\n        if: steps.run_check.outputs.run\n        run: |\n          export DOCKER_REGISTRY=${{ secrets.docker_registry }}\n          export DOCKER_REGISTRY_USERNAME=${{ secrets.docker_registry_username }}\n          export ${{ matrix.infrastructure.env }}\n          export TIMEOUT=${{ matrix.test.timeout }}\n          docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} config\n          env\n          for i in {1..4}; do\n            echo \"Running test - try \\#$i\"\n            echo \"Removing export directory if existent\";\n            sudo rm -rf export\n            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} pull\n            set +e\n            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} up ${{ matrix.infrastructure.options }} --exit-code-from ${{ matrix.test.exit-from }}\n            test_exit_code=$?\n            error_exit_codes_count=$(expr $(docker ps -a --filter exited=1 | wc -l) - 1)\n            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} down\n            if [ $error_exit_codes_count != 0 ] || [ $test_exit_code != 0 ] ; then\n              echo \"Test failed..\";\n              continue;\n            else\n              exit $test_exit_code;\n            fi\n          done;\n          exit 1\n        shell: bash\n        working-directory: ./compose\n        continue-on-error: false\n      - name: Fix file names before saving artifacts\n        if: failure()\n        run: |\n          sudo find ./compose/export/galaxy/database -depth -name '*:*' -execdir bash -c 'mv \"$1\" \"${1//:/-}\"' bash {} \\;\n      - name: Allow upload-artifact read access\n        if: failure()\n        run: sudo chmod -R +r ./compose/export/galaxy/database\n      - name: Save artifacts for debugging a failed test\n        uses: actions/upload-artifact@v6\n        if: failure()\n        with:\n          name: ${{ matrix.infrastructure.name }}_${{ matrix.test.name }}_first-run\n          path: ./compose/export/galaxy/database\n      - name: Clean up after first run\n        if: matrix.test.second_run == 'true'\n        run: |\n          sudo rm -rf export/postgres\n          sudo rm -rf export/galaxy/database\n        working-directory: ./compose\n      - name: Run tests a second time\n        if: matrix.test.second_run == 'true' && steps.run_check.outputs.run\n        run: |\n          export DOCKER_REGISTRY=${{ secrets.docker_registry }}\n          export DOCKER_REGISTRY_USERNAME=${{ secrets.docker_registry_username }}\n          export ${{ matrix.infrastructure.env }}\n          export TIMEOUT=${{ matrix.test.timeout }}\n          for i in {1..4}; do\n            echo \"Running test - try \\#$i\"\n            echo \"Removing export directory if existent\";\n            sudo rm -rf export\n            set +e\n            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} up ${{ matrix.infrastructure.options }} --exit-code-from ${{ matrix.test.exit-from }}\n            test_exit_code=$?\n            error_exit_codes_count=$(expr $(docker ps -a --filter exited=1 | wc -l) - 1)\n            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} down\n            if [ $error_exit_codes_count != 0 ] || [ $test_exit_code != 0 ] ; then\n              echo \"Test failed..\";\n              continue;\n            else\n              exit $test_exit_code;\n            fi\n          done;\n          exit 1\n        shell: bash\n        working-directory: ./compose\n        continue-on-error: false\n      - name: Fix file names before saving artifacts\n        if: failure() && matrix.test.second_run == 'true'\n        run: |\n          sudo find ./compose/export/galaxy/database -depth -name '*:*' -execdir bash -c 'mv \"$1\" \"${1//:/-}\"' bash {} \\;\n      - name: Allow upload-artifact read access\n        if: failure() && matrix.test.second_run == 'true'\n        run: sudo chmod -R +r ./compose/export/galaxy/database\n      - name: Save artifacts for debugging a failed test\n        uses: actions/upload-artifact@v6\n        if: failure() && matrix.test.second_run == 'true'\n        with:\n          name: ${{ matrix.infrastructure.name }}_${{ matrix.test.name }}_second-run\n          path: ./compose/export/galaxy/database\n"
  },
  {
    "path": ".github/workflows/cvmfs.yml",
    "content": "name: cvmfs-sidecar\non:\n  push:\n    branches:\n      - '**'\n    tags:\n      - '*'\n  pull_request:\n    paths:\n      - 'cvmfs/**'\n      - 'test/cvmfs/**'\n      - '.github/workflows/cvmfs.yml'\n\njobs:\n  build_test_publish:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v6\n\n      - name: Detect CVMFS changes\n        id: changes\n        uses: dorny/paths-filter@v3\n        with:\n          filters: |\n            cvmfs:\n              - 'cvmfs/**'\n              - 'test/cvmfs/**'\n              - '.github/workflows/cvmfs.yml'\n\n      - name: Run CVMFS sidecar tests\n        if: github.event_name == 'pull_request' || steps.changes.outputs.cvmfs == 'true' || startsWith(github.ref, 'refs/tags/')\n        run: bash test/cvmfs/test.sh\n\n      - name: Set image version\n        id: version\n        if: github.event_name == 'push' && (steps.changes.outputs.cvmfs == 'true' || startsWith(github.ref, 'refs/tags/'))\n        run: |\n          set -euo pipefail\n          if [[ \"${GITHUB_REF}\" == refs/tags/* ]]; then\n            version=\"${GITHUB_REF_NAME}\"\n          else\n            ref=\"${GITHUB_REF_NAME//\\//-}\"\n            version=\"${ref}-${GITHUB_SHA::7}\"\n          fi\n          echo \"version=$version\" >> \"$GITHUB_OUTPUT\"\n\n      - name: Set up Docker Buildx\n        if: github.event_name == 'push' && (steps.changes.outputs.cvmfs == 'true' || startsWith(github.ref, 'refs/tags/'))\n        uses: docker/setup-buildx-action@v3\n\n      - name: Login to Quay IO\n        if: github.event_name == 'push' && (steps.changes.outputs.cvmfs == 'true' || startsWith(github.ref, 'refs/tags/'))\n        uses: docker/login-action@v3\n        with:\n          registry: quay.io\n          username: '$oauthtoken'\n          password: ${{ secrets.QUAY_OAUTH_TOKEN }}\n\n      - name: Build and push CVMFS image\n        if: github.event_name == 'push' && (steps.changes.outputs.cvmfs == 'true' || startsWith(github.ref, 'refs/tags/'))\n        uses: docker/build-push-action@v6\n        with:\n          context: \"{{defaultContext}}:cvmfs\"\n          push: true\n          tags: quay.io/bgruening/cvmfs:${{ steps.version.outputs.version }}\n          cache-from: type=gha\n          cache-to: type=gha,mode=max\n"
  },
  {
    "path": ".github/workflows/lint.yml",
    "content": "name: Lint\non: [push]\njobs:\n  lint:\n    runs-on: ubuntu-latest\n    steps:\n    - name: Checkout\n      uses: actions/checkout@v6\n    # - name: Cleanup to only use compose\n    #   run: rm -R docs galaxy test\n    - name: Run shellcheck with reviewdog\n      uses: reviewdog/action-shellcheck@v1.27.0\n      with:\n        github_token: ${{ secrets.GITHUB_TOKEN }}\n        reporter: github-check\n        level: warning\n        pattern: \"*.sh\"\n    - name: Run hadolint with reviewdog\n      uses: reviewdog/action-hadolint@v1.46.0\n      with:\n        github_token: ${{ secrets.GITHUB_TOKEN }}\n        reporter: github-check\n"
  },
  {
    "path": ".github/workflows/pull-request.yml",
    "content": "name: pr-test\non: pull_request\njobs:\n  test:\n    if: false  # Temporarily disable workflow \n    runs-on: ubuntu-22.04\n    strategy:\n      matrix:\n        infrastructure:\n          - name: galaxy-base\n            files: -f docker-compose.yml\n          - name: galaxy-proxy-prefix\n            files: -f docker-compose.yml\n            env: GALAXY_PROXY_PREFIX=/arbitrary_Galaxy-prefix GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=http://localhost/arbitrary_Galaxy-prefix EXTRA_SKIP_TESTS_BIOBLEND=\"not test_import_export_workflow_dict and not test_import_export_workflow_from_local_path\"\n            exclude_test:\n              - selenium\n          - name: galaxy-htcondor\n            files: -f docker-compose.yml -f docker-compose.htcondor.yml\n          - name: galaxy-slurm\n            files: -f docker-compose.yml -f docker-compose.slurm.yml\n            env: SLURM_NODE_COUNT=3\n            options: --scale slurm_node=3\n          - name: galaxy-pulsar\n            files: -f docker-compose.yml -f docker-compose.pulsar.yml\n            env: EXTRA_SKIP_TESTS_BIOBLEND=\"not test_wait_for_job\"\n            exclude_test:\n              - workflow_quality_control\n          - name: galaxy-pulsar-mq\n            files: -f docker-compose.yml -f docker-compose.pulsar.yml -f docker-compose.pulsar.mq.yml\n            env: EXTRA_SKIP_TESTS_BIOBLEND=\"not test_wait_for_job\"\n            exclude_test:\n              - workflow_quality_control\n          - name: galaxy-k8s\n            files: -f docker-compose.yml -f docker-compose.k8s.yml\n          - name: galaxy-singularity\n            files: -f docker-compose.yml -f docker-compose.singularity.yml\n            env: EXTRA_SKIP_TESTS_BIOBLEND=\"not test_get_container_resolvers and not test_show_container_resolver\"\n          - name: galaxy-pulsar-mq-singularity\n            files: -f docker-compose.yml -f docker-compose.pulsar.yml -f docker-compose.pulsar.mq.yml -f docker-compose.singularity.yml\n            env: EXTRA_SKIP_TESTS_BIOBLEND=\"not test_wait_for_job and not test_get_container_resolvers and not test_show_container_resolver\"\n            exclude_test:\n              - workflow_quality_control\n          - name: galaxy-slurm-singularity\n            files: -f docker-compose.yml -f docker-compose.slurm.yml -f docker-compose.singularity.yml\n            env: EXTRA_SKIP_TESTS_BIOBLEND=\"not test_get_container_resolvers and not test_show_container_resolver\"\n          - name: galaxy-htcondor-singularity\n            files: -f docker-compose.yml -f docker-compose.htcondor.yml -f docker-compose.singularity.yml\n            env: EXTRA_SKIP_TESTS_BIOBLEND=\"not test_get_container_resolvers and not test_show_container_resolver\"\n        test:\n          - name: bioblend\n            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.bioblend.yml\n            exit-from: galaxy-bioblend-test\n            timeout: 60\n            second_run: \"true\"\n          - name: workflow_ard\n            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml\n            exit-from: galaxy-workflow-test\n            workflow: sklearn/ard/ard.ga\n            timeout: 60\n            second_run: \"true\"\n          - name: workflow_quality_control\n            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml\n            exit-from: galaxy-workflow-test\n            workflow: training/sequence-analysis/quality-control/quality_control.ga\n            timeout: 60\n          - name: workflow_example1\n            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.workflows.yml\n            exit-from: galaxy-workflow-test\n            workflow: example1/wf3-shed-tools.ga\n            timeout: 60\n          - name: selenium\n            files: -f tests/docker-compose.test.yml -f tests/docker-compose.test.selenium.yml\n            exit-from: galaxy-selenium-test\n            timeout: 60\n      fail-fast: false\n    steps:\n      # Self-made `exclude` as Github Actions currently does not support\n      # exclude/including of dicts in matrices\n      - name: Check if test should be run\n        id: run_check\n        if: contains(matrix.infrastructure.exclude_test, matrix.test.name) != true\n        run: echo \"run=true\" >> $GITHUB_OUTPUT\n      - name: Checkout\n        uses: actions/checkout@v6\n      - name: Set WORKFLOWS env for worfklows-test\n        if: matrix.test.workflow\n        run: echo \"WORKFLOWS=${{ matrix.test.workflow }}\" >> $GITHUB_ENV\n      - name: Build galaxy-container-base\n        env:\n          image_name: galaxy-container-base\n        run: |\n          docker buildx build  \\\n            --output \"type=image,name=quay.io/bgruening/$image_name:ci-testing\" \\\n            --cache-from type=gha \\\n            --cache-to type=gha,mode=max \\\n            --build-arg IMAGE_TAG=ci-testing \\\n            $image_name\n        working-directory: ./compose/base-images\n      - name: Build galaxy-cluster-base\n        env:\n          image_name: galaxy-cluster-base\n        run: |\n          docker buildx build  \\\n            --output \"type=image,name=quay.io/bgruening/$image_name:ci-testing\" \\\n            --cache-from type=gha \\\n            --cache-to type=gha,mode=max \\\n            --build-arg IMAGE_TAG=ci-testing \\\n            $image_name\n        working-directory: ./compose/base-images\n      - name: Install Docker Compose\n        run: |\n          sudo apt-get update -qq && sudo apt-get install docker-compose -y\n      - name: Run tests for the first time\n        if: steps.run_check.outputs.run\n        run: |\n          export IMAGE_TAG=ci-testing\n          export COMPOSE_DOCKER_CLI_BUILD=1\n          export DOCKER_BUILDKIT=1\n          export ${{ matrix.infrastructure.env }}\n          export TIMEOUT=${{ matrix.test.timeout }}\n          docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} config\n          env\n          for i in {1..4}; do\n            echo \"Running test - try \\#$i\"\n            echo \"Removing export directory if existent\";\n            sudo rm -rf export\n            set +e\n            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} build --build-arg IMAGE_TAG=ci-testing --build-arg GALAXY_REPO=https://github.com/galaxyproject/galaxy\n            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} up ${{ matrix.infrastructure.options }} --exit-code-from ${{ matrix.test.exit-from }}\n            test_exit_code=$?\n            error_exit_codes_count=$(expr $(docker ps -a --filter exited=1 | wc -l) - 1)\n            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} down\n            if [ $error_exit_codes_count != 0 ] || [ $test_exit_code != 0 ] ; then\n              echo \"Test failed..\";\n              continue;\n            else\n              exit $test_exit_code;\n            fi\n          done;\n          exit 1\n        shell: bash\n        working-directory: ./compose\n        continue-on-error: false\n      - name: Fix file names before saving artifacts\n        if: failure()\n        run: |\n          sudo find ./compose/export/galaxy/database -depth -name '*:*' -execdir bash -c 'mv \"$1\" \"${1//:/-}\"' bash {} \\;\n      - name: Allow upload-artifact read access\n        if: failure()\n        run: sudo chmod -R +r ./compose/export/galaxy/database\n      - name: Save artifacts for debugging a failed test\n        uses: actions/upload-artifact@v6\n        if: failure()\n        with:\n          name: ${{ matrix.infrastructure.name }}_${{ matrix.test.name }}_first-run\n          path: ./compose/export/galaxy/database\n      - name: Clean up after first run\n        if: matrix.test.second_run == 'true'\n        run: |\n          sudo rm -rf export/postgres\n          sudo rm -rf export/galaxy/database\n        working-directory: ./compose\n      - name: Run tests a second time\n        if: matrix.test.second_run == 'true' && steps.run_check.outputs.run\n        run: |\n          export IMAGE_TAG=ci-testing\n          export COMPOSE_DOCKER_CLI_BUILD=1\n          export DOCKER_BUILDKIT=1\n          export ${{ matrix.infrastructure.env }}\n          export TIMEOUT=${{ matrix.test.timeout }}\n          for i in {1..4}; do\n            echo \"Running test - try \\#$i\"\n            echo \"Removing export directory if existent\";\n            sudo rm -rf export\n            set +e\n            docker-compose ${{ matrix.infrastructure.files }} ${{ matrix.test.files }} up ${{ matrix.infrastructure.options }} --exit-code-from ${{ matrix.test.exit-from }}\n            test_exit_code=$?\n            error_exit_codes_count=$(expr $(docker ps -a --filter exited=1 | wc -l) - 1)\n            if [ $error_exit_codes_count != 0 ] || [ $test_exit_code != 0 ] ; then\n              echo \"Test failed..\";\n              continue;\n            else\n              exit $test_exit_code;\n            fi\n          done;\n          exit 1\n        shell: bash\n        working-directory: ./compose\n        continue-on-error: false\n      - name: Fix file names before saving artifacts\n        if: failure() && matrix.test.second_run == 'true'\n        run: |\n          sudo find ./compose/export/galaxy/database -depth -name '*:*' -execdir bash -c 'mv \"$1\" \"${1//:/-}\"' bash {} \\;\n      - name: Allow upload-artifact read access\n        if: failure() && matrix.test.second_run == 'true'\n        run: sudo chmod -R +r ./compose/export/galaxy/database\n      - name: Save artifacts for debugging a failed test\n        uses: actions/upload-artifact@v6\n        if: failure() && matrix.test.second_run == 'true'\n        with:\n          name: ${{ matrix.infrastructure.name }}_${{ matrix.test.name }}_second-run\n          path: ./compose/export/galaxy/database\n"
  },
  {
    "path": ".github/workflows/release.yml",
    "content": "name: release-CI\n\non:\n  release:\n    types: [published]\n\n  # Allows you to run this workflow manually from the Actions tab\n  workflow_dispatch:\n\njobs:\n  build_and_publish:\n    runs-on: ubuntu-latest\n\n    steps:\n      # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it\n      - uses: actions/checkout@v6\n          \n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v3\n        \n      - name: Login to Quay IO\n        uses: docker/login-action@v3\n        with:\n          registry: quay.io\n          username: '$oauthtoken'\n          password: ${{ secrets.QUAY_OAUTH_TOKEN }}\n    \n      - name: Build docker image and push to quay.io\n        uses: docker/build-push-action@v6\n        with:\n          context: \"{{defaultContext}}:galaxy\"\n          push: true\n          tags: quay.io/bgruening/galaxy:${{ github.event.release.tag_name }}\n          cache-from: type=gha\n          cache-to: type=gha,mode=max\n\n"
  },
  {
    "path": ".github/workflows/single.sh",
    "content": "#!/bin/bash\nset -ex\n\ndocker --version\ndocker info\n\nexport GALAXY_HOME=/home/galaxy\nexport GALAXY_USER=admin@example.org\nexport GALAXY_USER_EMAIL=admin@example.org\nexport GALAXY_USER_PASSWD=password\nexport BIOBLEND_GALAXY_API_KEY=fakekey\nexport BIOBLEND_GALAXY_URL=http://localhost:8080\nexport EPHEMERIS_IMAGE=${EPHEMERIS_IMAGE:-quay.io/biocontainers/ephemeris:0.10.11--pyhdfd78af_0}\nexport GALAXY_WAIT_TIMEOUT=${GALAXY_WAIT_TIMEOUT:-600}\n\nSKIP_SFTP=false\nSKIP_DIVE=false\n\nif [[ \"${CI:-}\" == \"true\" ]]; then\n    sudo apt-get update -qq\n    #sudo apt-get install docker-ce --no-install-recommends -y -o Dpkg::Options::=\"--force-confmiss\" -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confnew\"\n    sudo apt-get install sshpass --no-install-recommends -y\nelse\n    if ! command -v sshpass >/dev/null 2>&1; then\n        echo \"sshpass not found; skipping SFTP test.\"\n        SKIP_SFTP=true\n    fi\nfi\n\nif [[ \"${CI:-}\" == \"true\" ]]; then\n    DIVE_VERSION=$(curl -sL \"https://api.github.com/repos/wagoodman/dive/releases/latest\" | grep '\"tag_name\":' | sed -E 's/.*\"v([^\"]+)\".*/\\1/')\n    curl -OL https://github.com/wagoodman/dive/releases/download/v${DIVE_VERSION}/dive_${DIVE_VERSION}_linux_amd64.deb\n    sudo apt install ./dive_${DIVE_VERSION}_linux_amd64.deb\n    rm ./dive_${DIVE_VERSION}_linux_amd64.deb\nelse\n    if ! command -v dive >/dev/null 2>&1; then\n        echo \"dive not found; skipping image analysis.\"\n        SKIP_DIVE=true\n    fi\nfi\n\ngalaxy_wait() {\n    docker run --rm --link galaxy:galaxy \\\n        \"${EPHEMERIS_IMAGE}\" galaxy-wait -g http://galaxy --timeout \"${1:-$GALAXY_WAIT_TIMEOUT}\"\n}\n\n# start building this repo\nif [[ \"${CI:-}\" == \"true\" ]]; then\n    sudo chown 1450 /tmp && sudo chmod a=rwx /tmp\nfi\n\n## define a container size check function, first parameter is the container name, second the max allowed size in MB\ncontainer_size_check () {\n\n    # check that the image size is not growing too much between releases\n    # the 19.05 monolithic image was around 1.500 MB\n    size=\"${docker image inspect $1 --format='{{.Size}}'}\"\n    size_in_mb=$(($size/(1024*1024)))\n    if [[ $size_in_mb -ge $2 ]]\n    then\n        echo \"The new compiled image ($1) is larger than allowed. $size_in_mb vs. $2\"\n        sleep 2\n        #exit\n    fi\n}\n\nexport WORKING_DIR=${GITHUB_WORKSPACE:-$PWD}\n\nexport DOCKER_RUN_CONTAINER=\"quay.io/bgruening/galaxy\"\nSAMPLE_TOOLS=$GALAXY_HOME/ephemeris/sample_tool_list.yaml\nGALAXY_EXTRA_MOUNTS=()\nif [ -f \"$WORKING_DIR/test/container_resolvers_conf.ci.yml\" ]; then\n    GALAXY_EXTRA_MOUNTS+=(-v \"$WORKING_DIR/test/container_resolvers_conf.ci.yml:/etc/galaxy/container_resolvers_conf.yml:ro\")\nfi\ncd \"$WORKING_DIR\"\ndocker buildx build \\\n    --load \\\n    --cache-from type=gha \\\n    --cache-to type=gha,mode=max \\\n    -t quay.io/bgruening/galaxy \\\n    galaxy/\n#container_size_check   quay.io/bgruening/galaxy  1500\n\ndocker rm -f galaxy httpstest || true\nmkdir -p local_folder\ndocker run -d -p 8080:80 -p 8021:21 -p 8022:22 \\\n    --name galaxy \\\n    --privileged=true \\\n    -v \"$(pwd)/local_folder:/export/\" \\\n    \"${GALAXY_EXTRA_MOUNTS[@]}\" \\\n    -e GALAXY_CONFIG_ALLOW_USER_DATASET_PURGE=True \\\n    -e GALAXY_CONFIG_ALLOW_PATH_PASTE=True \\\n    -e GALAXY_CONFIG_ALLOW_USER_DELETION=True \\\n    -e GALAXY_CONFIG_ENABLE_BETA_WORKFLOW_MODULES=True \\\n    -v /tmp/:/tmp/ \\\n    quay.io/bgruening/galaxy\n\nsleep 30\ndocker logs galaxy\n# Define start functions\ndocker_exec() {\n      cd \"$WORKING_DIR\"\n      docker exec galaxy \"$@\"\n}\ndocker_exec_run() {\n   cd \"$WORKING_DIR\"\n   docker run quay.io/bgruening/galaxy \"$@\"\n}\ndocker_run() {\n   cd \"$WORKING_DIR\"\n   docker run \"$@\"\n}\n\ndocker ps\n\n# Test submitting jobs to an external slurm cluster\ncd \"${WORKING_DIR}/test/slurm/\" && bash test.sh && cd \"$WORKING_DIR\"\n\n# Test submitting jobs to an external gridengine cluster\ncd $WORKING_DIR/test/gridengine/ && bash test.sh || exit 1 && cd $WORKING_DIR\n\necho \"SLURM and SGE tests have finished.\"\n\ndocker ps\necho 'Waiting for Galaxy to come up.'\ngalaxy_wait_timeout=$GALAXY_WAIT_TIMEOUT\ngalaxy_wait_interval=30\ngalaxy_wait_end=$((SECONDS + galaxy_wait_timeout))\nwhile [ $SECONDS -lt $galaxy_wait_end ]; do\n    if galaxy_wait 30; then\n        break\n    fi\n    echo \"Galaxy still starting, tailing logs...\"\n    docker logs --tail 200 galaxy || true\n    sleep $galaxy_wait_interval\ndone\nif [ $SECONDS -ge $galaxy_wait_end ]; then\n    echo \"Galaxy did not become ready within ${galaxy_wait_timeout}s.\"\n    docker logs --tail 400 galaxy || true\n    exit 1\nfi\n\ncurl -v --fail $BIOBLEND_GALAXY_URL/api/version\n\n# Test self-signed HTTPS\ndocker_run -d --name httpstest -p 443:443 -e \"USE_HTTPS=True\" $DOCKER_RUN_CONTAINER\nsleep 30\ndocker logs httpstest\n\nsleep 180s && curl -v -k --fail https://127.0.0.1:443/api/version\necho | openssl s_client -connect 127.0.0.1:443 2>/dev/null | openssl x509 -issuer -noout| grep localhost\n\ndocker rm -f httpstest || true\n\n# Test FTP Server upload\ndate > time.txt\n# FIXME passive mode does not work, it would require the container to run with --net=host\n#curl -v --fail -T time.txt ftp://localhost:8021 --user $GALAXY_USER:$GALAXY_USER_PASSWD || true\n# Test FTP Server get\n#curl -v --fail ftp://localhost:8021 --user $GALAXY_USER:$GALAXY_USER_PASSWD\n\n# Test SFTP Server\nif [[ \"$SKIP_SFTP\" != \"true\" ]]; then\n    sshpass -p $GALAXY_USER_PASSWD sftp -v -P 8022 -o User=$GALAXY_USER -o \"StrictHostKeyChecking no\" localhost <<< $'put time.txt'\nfi\n\n# Test FTP Server from within the container (avoids host NAT/passive issues)\ndocker_exec python - <<'PY'\nimport ftplib\n\nftp = ftplib.FTP()\nftp.connect(\"localhost\", 21, timeout=30)\nftp.login(\"admin@example.org\", \"password\")\nftp.retrlines(\"LIST\")\nftp.quit()\nPY\n\n# Test CVMFS\ndocker_exec bash -c \"service autofs start\"\ndocker_exec bash -c \"cvmfs_config chksetup\"\ndocker_exec bash -c \"ls /cvmfs/data.galaxyproject.org/byhand\"\n\n# Run a ton of BioBlend test against our servers.\ncd \"$WORKING_DIR/test/bioblend/\" && . ./test.sh && cd \"$WORKING_DIR/\"\n\n# Test without install-repository wrapper\ncurl -v --fail POST -H \"Content-Type: application/json\" -H \"x-api-key: fakekey\" -d \\\n    '{\n        \"tool_shed_url\": \"https://toolshed.g2.bx.psu.edu\",\n        \"name\": \"cut_columns\",\n        \"owner\": \"devteam\",\n        \"changeset_revision\": \"cec635fab700\",\n        \"new_tool_panel_section_label\": \"BEDTools\"\n    }' \\\n\"http://localhost:8080/api/tool_shed_repositories\"\n\n\n# Test the 'new' tool installation script\ndocker_exec install-tools \"$SAMPLE_TOOLS\"\n# Test the Conda installation\ndocker_exec_run bash -c 'export PATH=$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR/_conda/bin/:$PATH && conda --version && conda install samtools -c bioconda --yes'\n\n# Test if data persistence works\ndocker stop galaxy\ndocker rm -f galaxy\n\ncd \"$WORKING_DIR\"\ndocker run -d -p 8080:80 \\\n    --name galaxy \\\n    --privileged=true \\\n    -v \"$(pwd)/local_folder:/export/\" \\\n    \"${GALAXY_EXTRA_MOUNTS[@]}\" \\\n    -e GALAXY_CONFIG_ALLOW_USER_DATASET_PURGE=True \\\n    -e GALAXY_CONFIG_ALLOW_PATH_PASTE=True \\\n    -e GALAXY_CONFIG_ALLOW_USER_DELETION=True \\\n    -e GALAXY_CONFIG_ENABLE_BETA_WORKFLOW_MODULES=True \\\n    -v /tmp/:/tmp/ \\\n    quay.io/bgruening/galaxy\n\necho 'Waiting for Galaxy to come up.'\ngalaxy_wait \"$GALAXY_WAIT_TIMEOUT\"\n\n# Test if the tool installed previously is available\ncurl -v --fail 'http://localhost:8080/api/tools/toolshed.g2.bx.psu.edu/repos/devteam/cut_columns/Cut1/1.0.2'\n\n# analyze image using dive tool\nif [[ \"$SKIP_DIVE\" == \"true\" ]]; then\n    echo \"Skipping dive image analysis (dive not installed).\"\nelse\n    CI=true dive quay.io/bgruening/galaxy\nfi\n\ndocker stop galaxy\ndocker rm -f galaxy\ndocker rmi -f $DOCKER_RUN_CONTAINER || true\n"
  },
  {
    "path": ".github/workflows/single_container.yml",
    "content": "name: Single Container Test\non: [push, pull_request]\njobs:\n  build_and_test:\n    runs-on: ubuntu-latest\n    strategy:\n      matrix:\n        python-version: ['3.10']\n    steps:\n    - name: Checkout\n      uses: actions/checkout@v6\n    - name: Configure Docker data-root\n      run: |\n        sudo mkdir -p /mnt/docker\n        if [ ! -f /etc/docker/daemon.json ]; then\n          echo '{}' | sudo tee /etc/docker/daemon.json\n        fi\n        sudo jq '.\"data-root\"=\"/mnt/docker\"' /etc/docker/daemon.json > /tmp/docker_daemon.json\n        sudo mv /tmp/docker_daemon.json /etc/docker/daemon.json\n        sudo systemctl daemon-reload\n        sudo systemctl restart docker\n    - name: Set up Docker Buildx\n      uses: docker/setup-buildx-action@v3\n    - uses: actions/setup-python@v6\n      with:\n        python-version: ${{ matrix.python-version }}\n    - name: Build and Test\n      run: bash .github/workflows/single.sh\n"
  },
  {
    "path": ".github/workflows/update-site.yml",
    "content": "name: Deploy Documentation\n\non:\n  push:\n    branches:\n      - main\n    paths:\n      - 'README.md'\n\njobs:\n  deploy_docs:\n    runs-on: ubuntu-latest\n\n    steps:\n      - name: Check out the repository\n        uses: actions/checkout@v6\n        with:\n          persist-credentials: false\n\n      - name: Set up Python\n        uses: actions/setup-python@v6\n        with:\n          python-version: \"3.12\"\n          cache: \"pip\"\n\n      - name: Install python dependencies\n        run: pip install -r docs/src/requirements.txt\n\n      - name: Generate documentation\n        run: python docs/src/generate_docs.py\n\n      - name: Deploy to GitHub Pages\n        uses: peaceiris/actions-gh-pages@v4\n        with:\n          github_token: ${{ secrets.GITHUB_TOKEN }}\n          publish_dir: ./docs\n"
  },
  {
    "path": ".gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nenv/\nbin/\nbuild/\ndevelop-eggs/\ndist/\neggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\n*.egg-info/\n.installed.cfg\n*.egg\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.cache\nnosetests.xml\ncoverage.xml\n\n# Translations\n*.mo\n\n# Mr Developer\n.mr.developer.cfg\n.project\n.pydevproject\n\n# Rope\n.ropeproject\n\n# Django stuff:\n*.log\n*.pot\n\n# Sphinx documentation\ndocs/_build/\n\n# Export folder for docker-compose setup\ncompose/export\ncompose-v2/export\n\n.DS_Store\n"
  },
  {
    "path": ".travis.yml",
    "content": "sudo: required\n\nlanguage: python\npython: 3.10\n\nservices:\n  - docker\n\nenv:\n  matrix:\n    - TOX_ENV=py310\n  global:\n    - secure: \"SEjcKJQ0NGXdpFxFhLVlyJmiBvgiLtR5Uufg90Vm3owKlMy0NSfIrOR+2dwNniqOp7QI3eVepnqjid/Ka0QStzVqMCe55OLkJ/TbTHnMLpbtY63mpGfogVRvxMMAVpzLpcQqtJFORZmO/MIWSLlBiXMMzOg3+tbXvQXmL17Rbmw=\"\n\nmatrix:\n  allow_failures:\n    - env: KUBE=True\n\ngit:\n  submodules: false\n\nbefore_install:\n  - set -e\n  - export GALAXY_HOME=/home/galaxy\n  - export GALAXY_USER=admin@example.org\n  - export GALAXY_USER_EMAIL=admin@example.org\n  - export GALAXY_USER_PASSWD=password\n  - export BIOBLEND_GALAXY_API_KEY=fakekey\n  - export BIOBLEND_GALAXY_URL=http://localhost:8080\n\n  - sudo apt-get update -qq\n  - sudo apt-get install docker-ce --no-install-recommends -y -o Dpkg::Options::=\"--force-confmiss\" -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confnew\"\n  - sudo apt-get install sshpass --no-install-recommends -y\n  - pip install ephemeris\n\n  - docker --version\n  - docker info\n\n\n  # start building this repo\n  - sudo chown 1450 /tmp && sudo chmod a=rwx /tmp\n  - export WORKING_DIR=\"$TRAVIS_BUILD_DIR\"\n  - export DOCKER_RUN_CONTAINER=\"quay.io/bgruening/galaxy\"\n  - export INSTALL_REPO_ARG=\"\"\n  - export SAMPLE_TOOLS=$GALAXY_HOME/ephemeris/sample_tool_list.yaml\n  - travis_wait 30 cd \"$WORKING_DIR\" && docker build -t quay.io/bgruening/galaxy galaxy/\n  - |\n    ## define a container size check function, first parameter is the container name, second the max allowed size in MB\n    container_size_check () {\n\n        # check that the image size is not growing too much between releases\n        # the 19.05 monolithic image was around 1.500 MB\n        size=`docker image inspect $1 --format='{{.Size}}'`\n        size_in_mb=$(($size/(1024*1024)))\n        if [[ $size_in_mb -ge $2 ]]\n        then\n            echo \"The new compiled image ($1) is larger than allowed. $size_in_mb vs. $2\"\n            sleep 2\n            #exit\n        fi\n    }\n    container_size_check   quay.io/bgruening/galaxy  1500\n\n    mkdir local_folder\n    docker run -d -p 8080:80 -p 8021:21 -p 8022:22 \\\n        --name galaxy \\\n        --privileged=true \\\n        -v `pwd`/local_folder:/export/ \\\n        -e GALAXY_CONFIG_ALLOW_USER_DATASET_PURGE=True \\\n        -e GALAXY_CONFIG_ALLOW_PATH_PASTE=True \\\n        -e GALAXY_CONFIG_ALLOW_USER_DELETION=True \\\n        -e GALAXY_CONFIG_ENABLE_BETA_WORKFLOW_MODULES=True \\\n        -v /tmp/:/tmp/ \\\n        quay.io/bgruening/galaxy\n\n    sleep 30\n    docker logs galaxy\n    # Define start functions\n    docker_exec() {\n      cd $WORKING_DIR\n      docker exec -t -i galaxy \"$@\"\n    }\n    docker_exec_run() {\n      cd $WORKING_DIR\n      docker run quay.io/bgruening/galaxy \"$@\"\n    }\n    docker_run() {\n      cd $WORKING_DIR\n      docker run \"$@\"\n    }\n\n  - docker ps\n\nscript:\n  - set -e\n  # Test submitting jobs to an external slurm cluster\n  - cd $TRAVIS_BUILD_DIR/test/slurm/ && bash test.sh && cd $WORKING_DIR\n  # Test submitting jobs to an external gridengine cluster\n  # TODO 19.05, need to enable this again!\n  # - cd $TRAVIS_BUILD_DIR/test/gridengine/ && bash test.sh && cd $WORKING_DIR\n\n  - echo 'Waiting for Galaxy to come up.'\n  - galaxy-wait -g $BIOBLEND_GALAXY_URL --timeout 600\n\n  - curl -v --fail $BIOBLEND_GALAXY_URL/api/version\n\n  # Test self-signed HTTPS\n  - docker_run -d --name httpstest -p 443:443 -e \"USE_HTTPS=True\" $DOCKER_RUN_CONTAINER\n\n  - sleep 180s && curl -v -k --fail https://127.0.0.1:443/api/version\n  - echo | openssl s_client -connect 127.0.0.1:443 2>/dev/null | openssl x509 -issuer -noout| grep localhost\n\n  - docker logs httpstest && docker stop httpstest && docker rm httpstest\n\n  # Test FTP Server upload\n  - date > time.txt && curl -v --fail -T time.txt ftp://localhost:8021 --user $GALAXY_USER:$GALAXY_USER_PASSWD || true\n  # Test FTP Server get\n  - curl -v --fail ftp://localhost:8021 --user $GALAXY_USER:$GALAXY_USER_PASSWD\n\n  # Test CVMFS\n  - docker_exec bash -c \"service autofs start\"\n  - docker_exec bash -c \"cvmfs_config chksetup\"\n  - docker_exec bash -c \"ls /cvmfs/data.galaxyproject.org/byhand\"\n\n  # Test SFTP Server\n  - sshpass -p $GALAXY_USER_PASSWD sftp -v -P 8022 -o User=$GALAXY_USER -o \"StrictHostKeyChecking no\" localhost <<< $'put time.txt'\n\n  # Run a ton of BioBlend test against our servers.\n  - cd $TRAVIS_BUILD_DIR/test/bioblend/ && . ./test.sh && cd $WORKING_DIR/\n\n  # not working anymore in 18.01\n  # executing: /galaxy_venv/bin/uwsgi --yaml /etc/galaxy/galaxy.yml --master --daemonize2 galaxy.log --pidfile2 galaxy.pid  --log-file=galaxy_install.log --pid-file=galaxy_install.pid\n  # [uWSGI] getting YAML configuration from /etc/galaxy/galaxy.yml\n  # /galaxy_venv/bin/python: unrecognized option '--log-file=galaxy_install.log'\n  # getopt_long() error\n  # cat: galaxy_install.pid: No such file or directory\n  # tail: cannot open ‘galaxy_install.log’ for reading: No such file or directory\n  #- |\n  #  if [ \"${COMPOSE_SLURM}\" ] || [ \"${KUBE}\" ] || [ \"${COMPOSE_CONDOR_DOCKER}\" ] || [ \"${COMPOSE_SLURM_SINGULARITY}\" ]\n  #  then\n  #      # Test without install-repository wrapper\n  #      sleep 10\n  #      docker_exec_run bash -c 'cd $GALAXY_ROOT_DIR && python ./scripts/api/install_tool_shed_repositories.py --api admin -l http://localhost:80 --url https://toolshed.g2.bx.psu.edu -o devteam --name cut_columns --panel-section-name BEDTools'\n  #  fi\n\n\n  # Test the 'new' tool installation script\n  - docker_exec install-tools \"$SAMPLE_TOOLS\"\n  # Test the Conda installation\n  - docker_exec_run bash -c 'export PATH=$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR/_conda/bin/:$PATH && conda --version && conda install samtools -c bioconda --yes'\n\n\nafter_success:\n  - |\n    if [ \"$TRAVIS_PULL_REQUEST\" == \"false\" -a \"$TRAVIS_BRANCH\" == \"master\" ]\n    then\n        cd ${TRAVIS_BUILD_DIR}\n        echo \"Generate and deploy html documentation\"\n        ./docs/bin/deploy_docs\n    fi\n\n\nnotifications:\n  webhooks:\n    urls:\n      - https://webhooks.gitter.im/e/559f5480ac7a4ef238af\n    on_success: change\n    on_failure: always\n    on_start: never\n"
  },
  {
    "path": "Changelog.md",
    "content": "# Changelog\n\n## 0.1: Initial release!\n    - with Apache2, PostgreSQL and Tool Shed integration\n## 0.2: complete new Galaxy stack.\n   - with nginx, uwsgi, proftpd, docker, supervisord and SLURM\n## 0.3: Add Interactive Environments\n   - IPython in docker in Galaxy in docker\n   - advanged logging\n## 0.4:\n   - base the image on toolshed/requirements with all required Galaxy dependencies\n   - use Ansible roles to build large parts of the image\n   - export the supervisord web interface on port 9002\n   - enable Galaxy reports webapp\n## 15.07:\n  - `install-biojs` can install BioJS visualisations into Galaxy\n  - `add-tool-shed` can be used to activate third party Tool Sheds in child Dockerfiles\n  - many documentation improvements\n  - RStudio is now part of Galaxy and this Image\n  - configurable postgres UID/GID by @chambm\n  - smarter starting of postgres during Tool installations by @shiltemann\n## 15.10:\n  - new Galaxy 15.10 release\n  - fix https://github.com/bgruening/docker-galaxy-stable/issues/94\n## 16.01:\n  - enable Travis testing for all builds and PR\n  - offer new [yaml based tool installations](https://github.com/galaxyproject/ansible-galaxy-tools/blob/master/files/tool_list.yaml.sample)\n  - enable dynamic UWSGI processes and threads with `-e UWSGI_PROCESSES=2` and `-e UWSGI_THREADS=4`\n  - enable dynamic Galaxy handlers `-e GALAXY_HANDLER_NUMPROCS=2`\n  - Addition of a new `lite` mode contributed by @kellrott\n  - first release with Jupyter integration\n## 16.04:\n  - include a Galaxy-bare mode, enable with `-e BARE=True`\n  - first release with [HTCondor](https://research.cs.wisc.edu/htcondor/) installed and pre-configured\n## 16.07:\n  - documentation and tests updates for SLURM integration by @mvdbeek\n  - first version with initial Docker compose support (proftpd ✔️)\n  - SFTP support by @zfrenchee\n## 16.10:\n   - [HTTPS support](https://github.com/bgruening/docker-galaxy-stable/pull/240 ) by @zfrenchee and @mvdbeek\n## 17.01:\n  - enable Conda dependency resolution by default\n  - [new Galaxy version](https://docs.galaxyproject.org/en/master/releases/17.01_announce.html)\n  - more compose work (slurm, postgresql)\n## 17.05:\n   - add PROXY_PREFIX variable to enable automatic configuration of Galaxy running under some prefix (@abretaud)\n   - enable quota by default (just the funtionality, not any specific value)\n   - HT-Condor is now supported in compose with semi-autoscaling and BioContainers\n   - Galaxy Docker Compose is completely under Travis testing and available with SLURM and HT-Condor\n   - using Docker `build-arg`s for GALAXY_RELEASE and GALAXY_REPO\n## 17.09:\n   - much improved documentation about using Galaxy Docker and an external cluster (@rhpvorderman)\n   - CVMFS support - mounting in 4TB of pre-build reference data (@chambm)\n   - Singularity support and tests (compose only)\n   - more work on K8s support and testing (@jmchilton)\n   - using .env files to configure the compose setup for SLURM, Condor, K8s, SLURM-Singularity, Condor-Docker\n## 18.01:\n   - tracking the Galaxy release_18.01 branch\n   - uwsgi work to adopt to changes for 18.01\n   - remove nodejs-legacy & npm from Dockerfile and install latest version from ansible-extras\n   - initial galaxy.ini → galaxy.yml integration\n   - grafana and influxdb container (compose)\n   - Galaxy telegraf integration to push to influxdb (compose)\n   - added some documentation (compose)\n## 18.05:\n   - Nothing very special, but a awesome Galaxy release as usual\n## 18.09:\n   - new and more powerful orchestration build script (build-orchestration-images.sh) by @pcm32\n   - a lot of bug-fixes to the compose setup by @abretaud\n## 19.01:\n   - This is featuring the latest and greatest from the Galaxy community\n   - Please note that this release will be the last release which is based on `ubuntu:14.04` and PostgreSQL 9.3.\n     We will migrate to `ubuntu:18.04` and a newer PostgreSQL version in `19.05`. Furthermore, we will not\n     support old Galaxy tool dependencies.\n## 19.05:\n   - The image is now based on `ubuntu:18.04` (instead of ubuntu:14.04 previously) and PostgreSQL 11.5 (9.3 previously).\n     See [migration documention](#Postgresql-migration) to migrate the postgresql database from 9.3 to 11.5.\n   - We not longer support old Galaxy tool dependencies.\n## 20.05:\n   - Featuring Galaxy 20.05\n   - Completely reworked compose setup\n   - The default admin password and apikey (`GALAXY_DEFAULT_ADMIN_PASSWORD` and `GALAXY_DEFAULT_ADMIN_KEY`) have changed: the password is now `password` (instead of `admin`) and the apikey `fakekey` (instead of `admin`).\n## 20.09:\n   - Featuring Galaxy 20.09\n## 24.1:\n   - Deprecating the `compose` setup.\n   - Complete new setup, adjusting to the latest Galaxy stack.\n   - Base Ubuntu Image: Upgraded from version 18.04 to 22.04\n   - Galaxy: Upgraded from version 20.09 to 24.1\n   - PostgreSQL: Upgraded from version 11 to 15\n   - Python3: Upgraded from version 3.7 to 3.10 (Python 3.10 is set as the default interpreter)\n   - The dockerfile now uses a multi-stage build to reduce the final image size and include only necessary files.\n   - New Service Support:\n     - Gunicorn: Replaces uWSGI as the web server for Galaxy. Installed by default inside Galaxy's virtual environment. Configured Nginx to proxy Gunicorn enabled on port 4001.\n     - Celery: Installed by default inside Galaxy's virtual environment. Enabled Celery for distributed task queues and Celery Beat for periodic task running. RabbitMQ serves as the broker for Celery (if RabbitMQ is disabled, it defaults to PostgreSQL database connection).\n     - Redis is used as the backend for Celery (if Redis is disabled, it defaults to a SQLite database). Flower service is added for monitoring and debugging Celery.\n     - RabbitMQ Management: Enabled the RabbitMQ management plugin on port 15672 for managing and monitoring the RabbitMQ server. The dashboard is exposed via Nginx and is accessible at the /rabbitmq/ path. The default access credentials are admin:admin.\n     - Flower: Added Flower service on port 5555 for monitoring and debugging Celery. The dashboard is exposed via Nginx and is available at the /flower/ path. The default access credentials are admin:admin.\n     - TUSd: Added TUSd server on port 1080 to support fault-tolerant uploads; Nginx is configured to proxy TUSd.\n     - gx-it-proxy: Added gx-it-proxy service on port 4002 to support Interactive Tools.\n   - Ansible Playbooks:\n     - Migrated from galaxyextras git submodule to using mainatined ansible roles.\n     - Added configure_rabbitmq_users.yml Ansible playbook, which removes the default guest user and adds admin, galaxy, and flower users for RabbitMQ during container startup.\n   - Environment Variables:\n     - Added `GUNICORN_WORKERS` and `CELERY_WORKERS` magic environment variables to set the number of Gunicorn and Celery workers, respectively, during container startup.\n   - Configuration Changes:\n     - Replaced the Galaxy Reports sample configuration file.\n     - Removed galaxy_web, handlers, reports, and ie_proxy services from Supervisor.\n     - Added Gravity for managing Galaxy services such as Gunicorn, Celery, gx-it-proxy, TUSd, reports, and handlers. It uses Supervisor as the process manager, with the configuration file located at /etc/galaxy/gravity.yml.\n     - Added support for dynamic handlers (set as the default handler type).\n     - Redis and Flower services are now managed by Supervisor.\n     - Since Galaxy Interactive Environments are deprecated, they have been replaced by Interactive Tools (ITs). The sample configuration file tools_conf_interactive.xml.sample is placed inside GALAXY_CONFIG_DIR. Nginx is also configured to support both domain and path-based ITs.\n     - Switched to using the cvmfs-config.galaxyproject.org repository for automatic configuration and updates of Galaxy project CVMFS repositories. Updated tool data table config path to include CVMFS locations from data.galaxyproject.org in --privileged mode.\n     - Enabled IPv6 support in Nginx for ports 80 and 443.\n     - Added Subject Alternative Name (SAN) extension (DNS:localhost and IP:127.0.0.1) while generating a self-signed SSL certificate.\n     - Ensured the Nginx SSL certificate is trusted system-wide by adding it to the CA store.\n     - Updated Galaxy extra dependencies.\n     - Added docker_net, docker_auto_rm, and docker_set_user parameters for Docker-enabled job destinations.\n     - Added update_yaml_value.py script to update nested key values in a YAML file.\n     - Replaced ie_proxy with gx-it-proxy.\n     - Replaced nginx_upload_module with TUSd for delegated uploads.\n   - CI Tests\n     - Added dive tool for analyzing the docker image\n     - Added test for check data persistence\n"
  },
  {
    "path": "LICENSE",
    "content": "The MIT License (MIT)\n\nCopyright (c) 2014 Björn Grüning\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "[![DOI](https://zenodo.org/badge/5466/bgruening/docker-galaxy-stable.svg)](https://zenodo.org/badge/latestdoi/5466/bgruening/docker-galaxy-stable)\n[![Build Status](https://travis-ci.org/bgruening/docker-galaxy-stable.svg?branch=master)](https://travis-ci.org/bgruening/docker-galaxy-stable)\n[![Docker Repository on Quay](https://quay.io/repository/bgruening/galaxy/status \"Docker Repository on Quay\")](https://quay.io/repository/bgruening/galaxy)\n[![Gitter](https://badges.gitter.im/bgruening/docker-galaxy-stable.svg)](https://gitter.im/bgruening/docker-galaxy-stable?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)\n![docker pulls](https://img.shields.io/docker/pulls/bgruening/galaxy-stable.svg) ![docker stars](https://img.shields.io/docker/stars/bgruening/galaxy-stable.svg)\n[![docker image stats](https://images.microbadger.com/badges/image/bgruening/galaxy-stable.svg)](https://microbadger.com/images/bgruening/galaxy-stable \"Get your own image badge on microbadger.com\")\n\nGalaxy Docker Image\n===================\n\nThe [Galaxy](http://www.galaxyproject.org) [Docker](http://www.docker.io) Image is an easy distributable full-fledged Galaxy installation, that can be used for testing, teaching and presenting new tools and features.\n\nOne of the main goals is to make access to entire tool suites as easy as possible. Usually,\nthis includes the setup of a public available web-service that needs to be maintained, or that the Tool-user needs to either setup a Galaxy Server by its own or to have Admin access to a local Galaxy server.\nWith docker, tool developers can create their own Image with all dependencies and the user only needs to run it within docker.\n\nThe Image is based on [Ubuntu 24.04 LTS](http://releases.ubuntu.com/24.04/) and all recommended Galaxy requirements are installed. The following chart should illustrate the [Docker](http://www.docker.io) image hierarchy we have build to make is as easy as possible to build on different layers of our stack and create many exciting Galaxy flavors.\n\n![Docker hierarchy](https://raw.githubusercontent.com/bgruening/docker-galaxy-stable/master/chart.png)\n\nBreaking changes\n================\n\n:information_source: After a long pause, due to interesting times at the beginning of the \"golden 2020s\", we are finally back with release `24.1`. Many things have changed in Galaxy.\nIt is deployed completely differently and gained many new features with many new dependencies. We recommend starting with a fresh `/export` folder and contacting us if you encounter any problems. \n\n# Table of Contents <a name=\"toc\" />\n\n- [Usage](#Usage)\n  - [Upgrading images](#Upgrading-images)\n    - [PostgreSQL migration](#Postgresql-migration)\n  - [Enabling Interactive Tools in Galaxy](#Enabling-Interactive-Tools-in-Galaxy)\n  - [Using passive mode FTP or SFTP](#Using-passive-mode-FTP-or-SFTP)\n  - [Using Parent docker](#Using-Parent-docker)\n  - [RabbitMQ Management](#RabbitMQ-Management)\n  - [Flower Webapp](#Flower-Webapp)\n  - [Galaxy's config settings](#Galaxys-config-settings)\n  - [Configuring Galaxy's behind a proxy](#Galaxy-behind-proxy)\n  - [On-demand reference data with CVMFS](#cvmfs)\n  - [Personalize your Galaxy](#Personalize-your-Galaxy)\n  - [Deactivating services](#Deactivating-services)\n  - [Restarting Galaxy](#Restarting-Galaxy)\n  - [Advanced Logging](#Advanced-Logging)\n  - [Running on an external cluster (DRM)](#Running-on-an-external-cluster-(DRM))\n    - [Basic setup for the filesystem](#Basic-setup-for-the-filesystem)\n    - [Using an external Slurm cluster](#Using-an-external-Slurm-cluster)\n    - [Using an external Grid Engine cluster](#Using-an-external-Grid-Engine-cluster)\n    - [Tips for Running Jobs Outside the Container](#Tips-for-Running-Jobs-Outside-the-Container)\n- [Enable Galaxy to use BioContainers (Docker)](#auto-exec-tools-in-docker)\n- [Magic Environment variables](#Magic-Environment-variables)\n- [HTTPS Support](#HTTPS-Support)\n- [Lite Mode](#Lite-Mode)\n- [Extending the Docker Image](#Extending-the-Docker-Image)\n  - [List of Galaxy flavours](#List-of-Galaxy-flavours)\n- [Integrating non-Tool Shed tools into the container](#Integrating-non-Tool-Shed-tools-into-the-container)\n- [Users & Passwords](#Users-Passwords)\n- [Development](#Development)\n- [Requirements](#Requirements)\n- [Changelog](./Changelog.md)\n- [Support & Bug Reports](#Support-Bug-Reports)\n\n\n# Usage <a name=\"Usage\" /> [[toc]](#toc)\nThis chapter explains how to launch the container manually.\n\nAt first you need to install docker. Please follow the [very good instructions](https://docs.docker.com/installation/) from the Docker project.\n\nAfter the successful installation, all you need to do is:\n\n```sh\ndocker run -d -p 8080:80 -p 8021:21 -p 8022:22 quay.io/bgruening/galaxy\n```\n\nI will shortly explain the meaning of all the parameters. For a more detailed description please consult the [docker manual](http://docs.docker.io/), it's really worth reading.\n\nLet's start:\n- `docker run` will run the Image/Container for you.\n\n    In case you do not have the Container stored locally, docker will download it for you.\n\n- `-p 8080:80` will make the port 80 (inside of the container) available on port 8080 on your host. Same holds for port 8021 and 8022, that can be used to transfer data via the FTP or SFTP protocol, respectively.\n\n    Inside the container a nginx Webserver is running on port 80 and that port can be bound to a local port on your host computer. With this parameter you can access your Galaxy\n    instance via `http://localhost:8080` immediately after executing the command above. If you work with the [Docker Toolbox](https://www.docker.com/products/docker-toolbox) on Mac or Windows,\n    you need to connect to the machine generated by 'Docker Quickstart'. You get its IP address from `docker-machine ls` or from the first line in the terminal, e.g.: `docker is configured to use the default machine with IP 192.168.99.100`.\n\n- `quay.io/bgruening/galaxy` is the Image/Container name, that directs docker to the correct path in the [docker index](https://quay.io/repository/bgruening/galaxy?tab=tags).\n- `-d` will start the docker container in daemon mode.\n\nFor an interactive session, you can execute:\n\n```sh\ndocker run -i -t -p 8080:80 \\\n    quay.io/bgruening/galaxy \\\n    /bin/bash\n```\n\nand run the `startup` script by yourself, to start PostgreSQL, nginx and Galaxy.\n\nDocker images are \"read-only\", all your changes inside one session will be lost after restart. This mode is useful to present Galaxy to your colleagues or to run workshops with it. To install Tool Shed repositories or to save your data you need to export the calculated data to the host computer.\n\nFortunately, this is as easy as:\n\n```sh\ndocker run -d -p 8080:80 \\\n    -v /home/user/galaxy_storage/:/export/ \\\n    quay.io/bgruening/galaxy\n```\n\nWith the additional `-v /home/user/galaxy_storage/:/export/` parameter, Docker will mount the local folder `/home/user/galaxy_storage` into the Container under `/export/`. A `startup.sh` script, that is usually starting nginx, PostgreSQL and Galaxy, will recognize the export directory with one of the following outcomes:\n\n- In case of an empty `/export/` directory, it will move the [PostgreSQL](http://www.postgresql.org/) database, the Galaxy database directory, Shed Tools and Tool Dependencies and various config scripts to /export/ and symlink back to the original location.\n- In case of a non-empty `/export/`, for example if you continue a previous session within the same folder, nothing will be moved, but the symlinks will be created.\n\nThis enables you to have different export folders for different sessions - means real separation of your different projects.\n\nTo detect when the Galaxy distribution in the image changes, the container writes a marker at\n`/export/.galaxy_export_marker`. You can override the marker value with `GALAXY_EXPORT_MARKER` if you\nneed deterministic export refresh behavior.\n\nYou can also collect and store `/export/` data of Galaxy instances in a dedicated docker [Data  volume Container](https://docs.docker.com/engine/userguide/dockervolumes/) created by:\n\n```sh\ndocker create -v /export \\\n    --name galaxy-store \\\n    quay.io/bgruening/galaxy \\\n    /bin/true\n```\n\nTo mount this data volume in a Galaxy container, use the  `--volumes-from` parameter:\n\n```sh\ndocker run -d -p 8080:80 \\\n    --volumes-from galaxy-store \\\n    quay.io/bgruening/galaxy\n```\n\nThis also allows for data separation, but keeps everything encapsulated within the docker engine (e.g. on OS X within your `$HOME/.docker` folder - easy to backup, archive and restore. This approach, albeit at the expense of disk space, avoids the problems with permissions [reported](https://github.com/bgruening/docker-galaxy-stable/issues/68) for data export on non-Linux hosts.\n\n\n## Upgrading images <a name=\"Upgrading-images\" /> [[toc]](#toc)\n\nWe will release a new version of this image concurrent with every new Galaxy release. For upgrading an image to a new version we have assembled a few hints for you. Please, take in account that upgrading may vary depending on your Galaxy installation, and the changes in new versions. Use this example carefully!\n\n* Create a test instance with only the database and configuration files. This will allow testing to ensure that things run but won't require copying all of the data.\n* New unmodified configuration files are always stored in a hidden directory called `.distribution_config`. Use this folder to diff your configurations with the new configuration files shipped with Galaxy. This prevents needing to go through the change log files to find out which new files were added or which new features you can activate.\n\nHere are 2 suggested upgrade methods, a quick one, and a safer one.\n\n### The quick upgrade method\n\nThis method involves less data copying, which makes the process quicker, but makes it impossible to downgrade in case of problems.\n\nIf you are upgrading from <19.05 to >=19.05, you need to migrate the PostgreSQL database, have a look at [PostgreSQL migration](#Postgresql-migration).\n\n\n1. Stop the old Galaxy container\n\n```sh\ndocker stop <old_container_name>\ndocker pull quay.io/bgruening/galaxy\n```\n\n2. Run the container with the updated image\n\n```sh\ndocker run -p 8080:80 -v /data/galaxy-data:/export --name <new_container_name> quay.io/bgruening/galaxy\n```\n\n3. Use diff to find changes in the config files (only if you changed any config file).\n\n```sh\ncd /data/galaxy-data/.distribution_config\nfor f in *; do echo $f; diff $f ../galaxy/config/$f; read; done\n```\n\n4. Upgrade the database schema\n\n```sh\ndocker exec -it <new_container_name> bash\ngalaxyctl stop\nsh manage_db.sh upgrade\nexit\n```\n5. Restart Galaxy\n\n```sh\ndocker exec -it <new_container_name> galaxyctl start\n```\n\n(Alternatively, restart the whole container)\n\n\n### The safe upgrade method\n\nWith this method, you keep a backup in case you decide to downgrade, but requires some potentially long data copying.\n\n* Note that copying database and datasets can be expensive if you have many GB of data.\n* If you are upgrading from <19.05 to >=19.05, you need to migrate the PostgreSQL database, have a look at [PostgreSQL migration](#Postgresql-migration).\n\n1. Download newer version of the Galaxy image\n\n  ```\n  $ sudo docker pull quay.io/bgruening/galaxy\n  ```\n2. Stop and rename the current galaxy container\n\n  ```\n  $ sudo docker stop galaxy-instance\n  $ sudo docker rename galaxy-instance galaxy-instance-old\n  ```\n3. Rename the data directory (the one that is mounted to /export in the docker)\n\n  ```\n  $ sudo mv /data/galaxy-data /data/galaxy-data-old\n  ```\n4. Run a new Galaxy container using newer image and wait while Galaxy generates the default content for /export\n\n  ```\n  $ sudo docker run -p 8080:80 -v /data/galaxy-data:/export --name galaxy-instance quay.io/bgruening/galaxy\n  ```\n5. Stop the Galaxy container\n\n  ```\n  $ sudo docker stop galaxy-instance\n  ```\n6. Replace the content of the postgres database by the old db data\n\n  ```\n  $ sudo rm -r /data/galaxy-data/postgresql/\n  $ sudo rsync -var /data/galaxy-data-old/postgresql/  /data/galaxy-data/postgresql/\n  ```\n7. Use diff to find changes in the config files (only if you changed any config file).\n\n  ```\n  $ cd /data/galaxy-data/.distribution_config\n  $ for f in *; do echo $f; diff $f ../../galaxy-data-old/galaxy/config/$f; read; done\n  ```\n8. Copy all the users' datasets to the new instance\n\n  ```\n  $ sudo rsync -var /data/galaxy-data-old/galaxy/database/files/* /data/galaxy-data/galaxy/database/files/\n  ```\n9. Copy all the installed tools\n\n  ```\n  $ sudo rsync -var /data/galaxy-data-old/tool_deps/* /data/galaxy-data/tool_deps/\n  $ sudo rsync -var /data/galaxy-data-old/galaxy/database/shed_tools/* /data/galaxy-data/galaxy/database/shed_tools/\n  $ sudo rsync -var /data/galaxy-data-old/galaxy/database/config/* /data/galaxy-data/galaxy/database/config/\n  ```\n10. Copy the welcome page and all its files.\n\n  ```\n  $ sudo rsync -var /data/galaxy-data-old/welcome* /data/galaxy-data/\n  ```\n11. Create an auxiliary docker in interactive mode and upgrade the database.\n\n  ```\n  $ sudo docker run -it --rm -v /data/galaxy-data:/export quay.io/bgruening/galaxy /bin/bash\n  # Startup all processes\n  > startup &\n  #Upgrade the database to the most recent version\n  > sh manage_db.sh upgrade\n  #Logout\n  > exit\n  ```\n12. Start the docker and test\n\n  ```\n  $ sudo docker start galaxy-instance\n  ```\n13. Clean the old container and image\n\n\n### Postgresql migration <a name=\"Postgresql-migration\" /> [[toc]](#toc)\n\nIn the 19.05 version, Postgresql was updated from version 9.3 to version 11.5. If you are upgrading from a version <19.05, you will need to migrate the database.\nYou can do it the following way (based on the \"The quick upgrade method\" above):\n\n1. Stop Galaxy in the old container\n\n```sh\ndocker exec -it <old_container_name> galaxyctl stop\n```\n\n2. Dump the old database\n\n```sh\ndocker exec -it <old_container_name> bash\nsu postgres\npg_dumpall --clean > /export/postgresql/9.3dump.sql\nexit\nexit\n```\n\n3. Update the container (= step 1 of the \"The quick upgrade method\" above)\n\n```sh\ndocker stop <old_container_name>\ndocker pull quay.io/bgruening/galaxy\n```\n\n4. Run the container with the updated image (= step 2 of the \"The quick upgrade method\" above)\n\n```sh\ndocker run -p 8080:80 -v /data/galaxy-data:/export --name <new_container_name> quay.io/bgruening/galaxy\n```\n\n5. Restore the dump to the new postgres version\n\nWait for the startup process to finish (Galaxy should be accessible)\n\n```sh\ndocker exec -it <new_container_name> bash\ngalaxyctl stop\nsu postgres\npsql -f /export/postgresql/9.3dump.sql postgres\nexit\nexit\n```\n\n6. Use diff to find changes in the config files (only if you changed any config file). (= step 3 of the \"The quick upgrade method\" above)\n\n```sh\ncd /data/galaxy-data/.distribution_config\nfor f in *; do echo $f; diff $f ../galaxy/config/$f; read; done\n```\n\n7. Upgrade the database schema (= step 4 of the \"The quick upgrade method\" above)\n\n```sh\ndocker exec -it <new_container_name> bash\ngalaxyctl stop\nsh manage_db.sh upgrade\nexit\n```\n\n5. Restart Galaxy (= step 5 of the \"The quick upgrade method\" above)\n\n```sh\ndocker exec -it <new_container_name> galaxyctl start\n```\n\n(Alternatively, restart the whole container)\n\n6. Clean old files\n\nIf you are *very* sure that everything went well, you can delete `/export/postgresql/9.3dump.sql` and `/export/postgresql/9.3/` to save some space.\n\n## Enabling Interactive Tools in Galaxy <a name=\"Enabling-Interactive-Tools-in-Galaxy\" /> [[toc]](#toc)\n\nInteractive Tools (IT) are sophisticated ways to extend Galaxy with powerful services, like [Jupyter](http://jupyter.org/), in a secure and reproducible way.\n\nFor this we need to be able to launch Docker containers inside our Galaxy Docker container.\n\n```sh\ndocker run -d -p 8080:80 -p 8021:21 -p 4002:4002 \\\n    --privileged=true \\\n    -v /home/user/galaxy_storage/:/export/ \\\n    quay.io/bgruening/galaxy\n```\n\nThe port 4002 is the proxy port that is used to handle Interactive Tools. `--privileged` is needed to start docker containers inside docker.\n\nAdditionally, you can set the `GALAXY_DOMAIN` environment variable to specify the domain name for your Galaxy instance to ensure that domain-based ITs work correctly. By default, it is set to `localhost`. If you have your own domain, you can specify it instead.\n\nIf you're using the default job configuration, set the `GALAXY_DESTINATIONS_DEFAULT` environment variable to a Docker-enabled destination. By default, this is set to `slurm_cluster`, so you'll need to update it accordingly. Alternatively, you can also provide your own job configuration file. \n\n```sh\ndocker run -d -p 8080:80 -p 8021:21 -p 4002:4002 \\\n    --privileged=true \\\n    -v /home/user/galaxy_storage/:/export/ \\\n    -e \"GALAXY_DOMAIN=your.domain.com\" \\\n    -e \"GALAXY_DESTINATIONS_DEFAULT=slurm_cluster_docker\" \\\n    quay.io/bgruening/galaxy\n```\n\n\n## Using passive mode FTP or SFTP <a name=\"Using-passive-mode-FTP-or-SFTP\" /> [[toc]](#toc)\n\nBy default, FTP servers running inside of docker containers are not accessible via passive mode FTP, due to not being able to expose extra ports. To circumvent this, you can use the `--net=host` option to allow Docker to directly open ports on the host server:\n\n```sh\ndocker run -d \\\n    --net=host \\\n    -v /home/user/galaxy_storage/:/export/ \\\n    quay.io/bgruening/galaxy\n```\n\nNote that there is no need to specifically bind individual ports (e.g., `-p 80:80`) if you use `--net`.\n\nAn alternative to FTP and it's shortcomings it to use the SFTP protocol via port 22. Start your Galaxy container with a port binding to 22.\n\n```sh\ndocker run -i -t -p 8080:80 -p 8022:22 \\\n    -v /home/user/galaxy_storage/:/export/ \\\n    quay.io/bgruening/galaxy\n```\n\nAnd use for example [Filezilla](https://filezilla-project.org/) or the `sftp` program to transfer data:\n\n```sh\nsftp -v -P 8022 -o User=admin@example.org localhost <<< $'put <YOUR FILE HERE>'\n```\n\n\n## Using Parent docker <a name=\"Using-Parent-docker\" /> [[toc]](#toc)\n\nOn some linux distributions, Docker-In-Docker can run into issues (such as running out of loopback interfaces). If this is an issue, you can use a 'legacy' mode that use a docker socket for the parent docker installation mounted inside the container. To engage, set the environmental variable `DOCKER_PARENT`\n\n```sh\ndocker run -p 8080:80 -p 8021:21 \\\n    --privileged=true -e DOCKER_PARENT=True \\\n    -v /var/run/docker.sock:/var/run/docker.sock \\\n    -v /home/user/galaxy_storage/:/export/ \\\n    quay.io/bgruening/galaxy\n```\n\n## RabbitMQ Management <a name=\"RabbitMQ-Management\" /> [[toc]](#toc)\n\nRabbitMQ is used as the broker for services like Celery. RabbitMQ provides a dedicated web interface for managing message queues, accessible at `http://localhost:8080/rabbitmq/`. This interface allows you to monitor queues, exchanges, bindings, and more. By default, it is password protected with `admin:admin`, but the credentials can be changed after logging in.\n\nTo completely disable RabbitMQ, you can set the `NONUSE` environment variable during container startup.\n\n```sh\ndocker run -p 8080:80 \\\n    -e \"NONUSE=rabbitmq\" \\\n    quay.io/bgruening/galaxy\n```\n\n## Flower Webapp <a name=\"Flower-Webapp\" /> [[toc]](#toc)\n\nFlower is a web-based tool for monitoring and administering Celery. It is accessible at `http://localhost:8080/flower`. By default, this site is password protected with `admin:admin`. You can change this by providing a `common_htpasswd` file in `/home/user/galaxy_storage/`.\n\nThe Flower Webapp will only be available if both Celery and RabbitMQ are enabled, meaning the environment variable `NONUSE` does not include `celery` and `rabbitmq`. To completely disable the Flower Webapp, you can set the `NONUSE` environment variable during container startup.\n\n```sh\ndocker run -p 8080:80 \\\n    -e \"NONUSE=flower\" \\\n    quay.io/bgruening/galaxy\n```\n\n## Galaxy's config settings <a name=\"Galaxys-config-settings\" /> [[toc]](#toc)\n\nEvery Galaxy configuration parameter in `config/galaxy.yml` can be overwritten by passing an environment variable to the `docker run` command during startup. The name of the environment variable has to be:\n`GALAXY_CONFIG`+ *the_original_parameter_name_in_capital_letters*\nFor example, you can set the Galaxy session timeout to 5 minutes and set your own Galaxy brand by invoking the `docker run` like this:\n\n```sh\ndocker run -p 8080:80 \\\n    -e \"GALAXY_CONFIG_BRAND='My own Galaxy flavour'\" \\\n    -e \"GALAXY_CONFIG_SESSION_DURATION=5\" \\\n    quay.io/bgruening/galaxy\n```\n\nNote, that if you would like to run any of the [cleanup scripts](https://galaxyproject.org/admin/config/performance/purge-histories-and-datasets/), you will need to add the following to `/export/galaxy/config/galaxy.yml`:\n\n```\ndatabase_connection = postgresql://galaxy:galaxy@localhost:5432/galaxy\nfile_path = /export/galaxy/database/files\n```\n\n## Security Configuration\n\n*By default* the `admin_users` and `bootstrap_admin_api_key` variables are set to:\n\n```\nadmin_users: admin@example.org\nbootstrap_admin_api_key: HSNiugRFvgT574F43jZ7N9F3\n```\n\nAdditionally, Galaxy encodes various internal values that can be part of output using a secret string configurable as `id_secret` in the config file (use 5-65 bytes long string).\nThis prevents 'guessing' of Galaxy's internal database sequences. Example:\n\n```\nid_secret: d5c910cc6e32cad08599987ab64dcfae\n```\n\nYou should manually change all three configuration variables above in `/export/galaxy/config/galaxy.yml`.\n\nAlternatively, you can pass the security configuration when running the image but please note that it is a security problem.\nE.g. if a tool exposes all `env`'s your secret API key will also be exposed.\n\nIn addition with 24.2 we enabled Galaxy Vault configuration. This enables users to store secrets in a user-owned password safe, called vault.\nIt is highly recommended to change the pre-configured key under `$GALAXY_CONFIG_DIR/vault_conf.yml` following the instructions inside the file.\n\n\n## Configuring Galaxy's behind a proxy <a name=\"Galaxy-behind-proxy\" /> [[toc]](#toc)\n\nIf your Galaxy docker instance is running behind an HTTP proxy server, and if you're accessing it with a specific path prefix (e.g. http://www.example.org/some/prefix/), you need to make Galaxy aware of it. There is an environment variable available to do so:\n\n```\nPROXY_PREFIX=/some/prefix\n```\n\nYou can and should overwrite these during launching your container:\n\n```sh\ndocker run -p 8080:80 \\\n    -e \"PROXY_PREFIX=/some/prefix\" \\\n    quay.io/bgruening/galaxy\n```\n\n## On-demand reference data with CVMFS <a name=\"cvmfs\" /> [[toc]](#toc)\nBy default, Galaxy instances launched with this image will have on-demand access to approximately 4TB of\nreference genomes and indexes. These are the same reference data available on the main Galaxy server.\nThis is achieved by connecting to Galaxy's CernVM filesystem (CVMFS) at `cvmfs-config.galaxyproject.org` repository, which provides automatic configuration for all galaxyproject.org CVMFS repositories, including `data.galaxyproject.org`, and ensures they remain up to date.\nThe CVMFS capability doesn't add to the size of the Docker image, but when running, CVMFS maintains\na cache to keep the most recently used data on the local disk.\n\n*Note*: for CVMFS directories to be mounted-on-demand with `autofs`, you must launch Docker as `--privileged`.\nIf privileged mode is not an option, use the optional CVMFS sidecar in `galaxy/docker-compose.yaml`:\n\n```sh\ncd galaxy\nCVMFS_MOUNT_DIR=/cvmfs EXPORT_DIR=./export docker compose --profile cvmfs up\n```\n\nThis starts a dedicated CVMFS container that mounts the repositories and shares `/cvmfs` with the Galaxy\ncontainer. The CVMFS cache is persisted in `${EXPORT_DIR}/cvmfs-cache`.\n\n\n## Personalize your Galaxy <a name=\"Personalize-your-Galaxy\" /> [[toc]](#toc)\n\nThe Galaxy welcome screen can be changed by providing a `welcome.html` page in `/home/user/galaxy_storage/`. All files starting with `welcome` will be copied during startup and served as introduction page. If you want to include images or other media, name them `welcome_*` and link them relative to your `welcome.html` ([example](`https://github.com/bgruening/docker-galaxy-stable/blob/master/galaxy/welcome.html`)).\n\n\n## Deactivating services <a name=\"Deactivating-services\" /> [[toc]](#toc)\n\nNon-essential services can be deactivated during startup. Set the environment variable `NONUSE` to a comma separated list of services. Currently, `postgres`, `cron`, `proftp`, `nodejs`, `condor`, `slurmd`, `slurmctld`, `celery`, `rabbitmq`, `redis`, `flower` and `tusd` are supported.\n\n```sh\ndocker run -d -p 8080:80 -p 9002:9002 \\\n    -e \"NONUSE=cron,proftp,nodejs,condor,slurmd,slurmctld,celery,rabbitmq,redis,flower,tusd\" \\\n    quay.io/bgruening/galaxy\n```\n\nA graphical user interface for starting/stopping services is available on port `9002` if you map it (e.g. `-p 9002:9002`).\nThis is the Supervisor web UI and it is unauthenticated by default, so only expose it on trusted networks or adjust the\nSupervisor credentials in the image build.\n\n\n## Restarting Galaxy <a name=\"Restarting-Galaxy\" /> [[toc]](#toc)\n\nIf you want to restart Galaxy without restarting the entire Galaxy container you can use `docker exec` (docker > 1.3).\n\n```sh\ndocker exec <container name> galaxyctl restart\n```\n\nTo restart only web workers or handlers:\n\n```sh\ndocker exec <container name> galaxyctl restart gunicorn\ndocker exec <container name> galaxyctl restart handler\n```\n\nUse `galaxyctl --help` for service names available in your configuration.\n\nIn addition, you can start/stop every supervisord process using a web interface on port `9002`. Start your container with:\n\n```sh\ndocker run -p 9002:9002 quay.io/bgruening/galaxy\n```\n\n\n## Advanced Logging <a name=\"Advanced-Logging\" /> [[toc]](#toc)\n\nYou can set the environment variable $GALAXY_LOGGING to FULL to access all logs from supervisor. For example start your container with:\n\n```sh\ndocker run -d -p 8080:80 -p 8021:21 \\\n    -e \"GALAXY_LOGGING=full\" \\\n    quay.io/bgruening/galaxy\n```\n\nThen, you can access the supervisord web interface on port `9002` and get access to log files. To do so, start your container with:\n\n```sh\ndocker run -d -p 8080:80 -p 8021:21 -p 9002:9002 \\\n    -e \"GALAXY_LOGGING=full\" \\\n    quay.io/bgruening/galaxy\n```\n\nAlternatively, you can access the container directly using the following command:\n\n```sh\ndocker exec -it <container name> bash\n```\n\nOnce connected to the container, log files are available in `/home/galaxy/logs`.\n\nA volume can also be used to map this directory to one external to the container - for instance if logs need to be persisted for auditing reasons (security, debugging, performance testing, etc...).:\n\n```sh\nmkdir gx_logs\ndocker run -d -p 8080:80 -p 8021:21 -e \"GALAXY_LOGGING=full\" -v `pwd`/gx_logs:/home/galaxy/logs quay.io/bgruening/galaxy\n```\n\n## Running on an external cluster (DRM)  <a name=\"Running-on-an-external-cluster-(DRM)\" />[[toc]](#toc)\n\n### Basic setup for the filesystem  <a name=\"Basic-setup-for-the-filesystem\" /> [[toc]](#toc)\n\n#### The easy way\nThe easiest way is to create a `/export` mount point on the cluster and mount the container with `/export:/export`.\n\n#### Not using the /export mount point on the cluster.\nThe docker container sets up all its files on the /export directory, but this directory may not exist on the cluster filesystem. This can be solved with symbolic links on the cluster filesystem but it can also be solved within the container itself.\n\nIn this example configuration the cluster file system has a directory `/cluster_storage/galaxy_data` which is accessible for the galaxy user in the container (UID 1450) and the user starting the container.\n\nThe container should be started with the following settings configured:\n```bash\ndocker run -d -p 8080:80 -p 8021:21\n-v /cluster_storage/galaxy_data/galaxy_export:/export # This makes sure all galaxy files are on the cluster filesystem\n-v /cluster_storage/galaxy_data:/cluster_storage/galaxy_data # This ensures the links within the docker container and on the cluster fs are the same\n# The following settings make sure that each job is configured with the paths on the cluster fs instead of /export\n-e GALAXY_CONFIG_TOOL_DEPENDENCY_DIR=\"/cluster_storage/galaxy_data/galaxy_export/tool_deps\"\n-e GALAXY_CONFIG_TOOL_DEPENDENCY_CACHE_DIR=\"/cluster_storage/galaxy_data/galaxy_export/tool_deps/_cache\"\n-e GALAXY_CONFIG_FILE_PATH=\"/cluster_storage/galaxy_data/galaxy_export/galaxy/database/files\"\n-e GALAXY_CONFIG_TOOL_PATH=\"/cluster_storage/galaxy_data/galaxy_export/galaxy/tools\"\n-e GALAXY_CONFIG_TOOL_DATA_PATH=\"/cluster_storage/galaxy_data/galaxy_export/galaxy/tool-data\"\n-e GALAXY_CONFIG_SHED_TOOL_DATA_PATH=\"/cluster_storage/galaxy_data/galaxy_export/galaxy/tool-data\"\n# The following settings are for directories that can be anywhere on the cluster fs.\n-e GALAXY_CONFIG_JOB_WORKING_DIRECTORY=\"/cluster_storage/galaxy_data/galaxy_export/galaxy/database/job_working_directory\" #IMPORTANT: needs to be created manually. Can also be placed elsewhere, but is originally located here\n-e GALAXY_CONFIG_NEW_FILE_PATH=\"/cluster_storage/galaxy_data/tmp\" # IMPORTANT: needs to be created manually. This needs to be writable by UID=1450 and have its flippy bit set (chmod 1777 for world-writable with flippy bit)\n-e GALAXY_CONFIG_OUTPUTS_TO_WORKING_DIRECTORY=False # Writes Job scripts, stdout and stderr to job_working_directory.\n-e GALAXY_CONFIG_RETRY_JOB_OUTPUT_COLLECTION=5 #IF your cluster fs uses nfs this may introduce latency. You can set galaxy to retry if a job output is not yet created.\n# Conda settings. IMPORTANT!\n-e GALAXY_CONFIG_CONDA_PREFIX=\"/cluster_storage/galaxy_data/_conda\" # Can be anywhere EXCEPT cluster_storage/galaxy/galaxy_export!\n# Conda uses $PWD to determine where the virtual environment is. If placed inside the export directory conda will determine $PWD to be a subirectory of the  /export folder which does not exist on the cluster!\n-e GALAXY_CONFIG_CONDA_AUTO_INIT=True # When the necessary environment can not be found a new one will automatically be created\n```\n### Setting up a Python virtual environment on the cluster  <a name=\"Setting-up-a-python-virtual-environment-on-the-cluster\" />[[toc]](#toc)\nThe Python environment in the container is not accessible from the cluster. So it needs to be created beforehand.\nIn this example configuration the Python virtual environment is created on  `/cluster_storage/galaxy_data/galaxy_venv` and the export folder on `/cluster_storage/galaxy_data/galaxy_export`. To create the virtual environment:\n1. Create the virtual environment `virtualenv /cluster_storage/galaxy_data/galaxy_venv`\n2. Activate the virtual environment `source /cluster_storage/galaxy_data/galaxy_venv/bin/activate`\n3. Install the galaxy requirements `pip install --index-url https://wheels.galaxyproject.org/simple --only-binary all -r /cluster_storage/galaxy_data/galaxy/lib/galaxy/dependencies/pinned-requirements.txt`\n  * Make sure to upgrade the environment with the new requirements when a new version of galaxy is released.\n\nTo make the Python environment usable on the cluster, create your custom `job_conf.xml` file and put it in `/cluster_storage/galaxy_data/galaxy_export/galaxy/config`.\nIn the destination section the following code should be added:\n```xml\n<destinations default=\"cluster\">\n  <destination id=\"cluster\" runner=\"your_cluster_runner\">\n    <env file=\"/cluster_storage/galaxy_data/galaxy_venv/bin/activate\"/>\n    <env id=\"GALAXY_ROOT_DIR\">/cluster_storage/galaxy_data/galaxy_export/galaxy</env>\n    <env id=\"GALAXY_LIB\">/cluster_storage/galaxy_data/galaxy_export/galaxy/lib</env>\n    <env id=\"PYTHONPATH\">/cluster_storage/galaxy_data/galaxy_export/galaxy/lib</env>\n    <param id=\"embed_metadata_in_job\">True</param>\n  </destination>\n```\nIn this way, Python tools on the cluster are able to use the Galaxy libraries.\n\nMore information can be found [here](https://github.com/galaxyproject/galaxy/blob/dev/doc/source/admin/framework_dependencies.rst#managing-dependencies-manually)\nand\n[here](https://github.com/galaxyproject/galaxy/blob/dev/doc/source/admin/framework_dependencies.rst#galaxy-job-handlers).\n\n### Using an external Slurm cluster <a name=\"Using-an-external-Slurm-cluster\" /> [[toc]](#toc)\n\nIt is often convenient to configure Galaxy to use a high-performance cluster for running jobs. To do so, two files are required:\n\n1. munge.key\n2. slurm.conf\n\nThese files from the cluster must be copied to the `/export` mount point (i.e., `/cluster_storage/galaxy_data/galaxy_export/` on the host if using below command) accessible to Galaxy before starting the container. This must be done regardless of which Slurm daemons are running within Docker. At start, symbolic links will be created to these files to `/etc` within the container, allowing the various Slurm functions to communicate properly with your cluster. In such cases, there's no reason to run `slurmctld`, the Slurm controller daemon, from within Docker, so specify `-e \"NONUSE=slurmctld\"`. Unless you would like to also use Slurm (rather than the local job runner) to run jobs within the Docker container, then alternatively specify `-e \"NONUSE=slurmctld,slurmd\"`.\n\nImportantly, Slurm relies on a shared filesystem between the Docker container and the execution nodes. To allow things to function correctly, checkout the basic filesystem setup above.\n\nA brief note is in order regarding the version of Slurm installed. This Docker image uses Ubuntu 14.04 as its base image. The version of Slurm in the Ubuntu 14.04 repository is 2.6.5 and that is what is installed in this image. If your cluster is using an incompatible version of Slurm then you will likely need to modify this Docker image.\n\nThe following is an example for how to specify a destination in `job_conf.xml` that uses a custom partition (\"work\", rather than \"debug\") and 4 cores rather than 1:\n\n```\n<destination id=\"slurm4threads\" runner=\"slurm\">\n    <param id=\"embed_metadata_in_job\">False</param>\n    <param id=\"nativeSpecification\">-p work -n 4</param>\n</destination>\n```\n\nThe usage of `-n` can be confusing. Note that it will specify the number of cores, not the number of tasks (i.e., it's not equivalent to `srun -n 4`).\n\n### Using an external Grid Engine cluster <a name=\"Using-an-external-Grid-Engine-cluster\"/> [[toc]](#toc)\n\nSet up the filesystem on the cluster as mentioned above.\nTo use Grid Engine (Sun Grid Engine, Open Grid Scheduler), one configuration file and an environment variable are required:\n\n\n1. create an `act_qmaster` file in the /export folder.\n  * In ***act_qmaster*** is something like this.\n\n  ```\n  YOUR_GRIDENGINE_MASTER_HOST\n  ```\n  * this file will automatically be installed in the container's `/var/lib/gridengine` folder.\n2. set the environment variable `SGE_ROOT`\n  * By default\n  ```\n  -e SGE_ROOT=/var/lib/gridengine\n  ```\n3. Make sure that YOUR_GRIDENGINE_MASTER_HOST can be pinged from the docker container. If this is not the case you can put the qmaster's hostname and ip in the containers `/etc/hosts`\nYour Grid Engine needs to accept job submissions from inside the container. If your container is already on a host that can submit jobs, set the hostname of the container to be exactly the same as the host. (The hostname can be changed by using the --hostname flag when starting the container).\n\n\n Alternatively, you can add the container's hostname (default=galaxy-docker) to the /etc/hosts file on the gridengine head node. And setting the container's hostname as a submit host.\n\n\n### Tips for Running Jobs Outside the Container <a name=\"Tips-for-Running-Jobs-Outside-the-Container\"/> [[toc]](#toc)\n\nIn its default state Galaxy assumes both the Galaxy source code and\nvarious temporary files are available on shared file systems across the\ncluster. When using Condor or SLURM (as described above) to run jobs outside\nof the Docker container one can take steps to mitigate these assumptions.\n\nThe `embed_metadata_in_job` option on job destinations in `job_conf.xml`\nforces Galaxy collect metadata inside the container instead of on the\ncluster:\n\n```\n<param id=\"embed_metadata_in_job\">False</param>\n```\n\nThis has performance implications and may not scale as well as performing\nthese calculations on the remote cluster - but this should not be a problem\nfor most Galaxy instances.\n\n# Enable Galaxy to use BioContainers (Docker) <a name=\"auto-exec-tools-in-docker\"/> [[toc]](#toc)\nThis is a very cool feature where Galaxy automatically detects that your tool has an associated docker image, pulls it and runs it for you. These images (when available) have been generated using [mulled](https://docs.galaxyproject.org/en/latest/admin/special_topics/mulled_containers.html).\nTo test, install the [IUC bedtools](https://toolshed.g2.bx.psu.edu/repository?repository_id=8d84903cc667dbe7&changeset_revision=7b3aaff0d78c) from the toolshed. When you try to execute *ClusterBed* for example. You may get a missing dependancy error for *bedtools*. But bedtools has an associated docker image on [quay.io](https://quay.io/).  Now configure Galaxy as follows:\n\n- Add this environment variable to `docker run`: `-e GALAXY_CONFIG_ENABLE_MULLED_CONTAINERS=True`\n- Persist mulled Singularity caches by mounting `/export` and reusing `/export/container_cache/singularity/mulled` across runs.\n- In `job_conf.xml` configure a Docker enabled destination as follows:\n\n```xml\n<destination id=\"docker_local\" runner=\"local\">\n    <param id=\"docker_enabled\">true</param>\n    <param id=\"docker_volumes\">$galaxy_root:ro,$galaxy_root/database/tmp:rw,$tool_directory:ro,$job_directory:ro,$working_directory:rw,$default_file_path:rw</param>\n    <param id=\"docker_sudo\">false</param>\n</destination>\n```\n\nWhen you execute the tool again, Galaxy will pull the image from Biocontainers ([quay.io/biocontainers](https://quay.io/organization/biocontainers)), run the tool inside of this container to produce the desired output.\n\n# Magic Environment variables <a name=\"Magic-Environment-variables\"/> [[toc]](#toc)\n\n| Name   | Description   |\n|---|---|\n| `ENABLE_TTS_INSTALL`  | Enables the Test Tool Shed during container startup. This change is not persistent. (`ENABLE_TTS_INSTALL=True`)  |\n| `GALAXY_LOGGING` | Enables for verbose logging at Docker stdout. (`GALAXY_LOGGING=full`)  |\n| `BARE` | Disables all default Galaxy tools. (`BARE=True`)  |\n| `NONUSE` |  Disable services during container startup. (`NONUSE=cron,proftp,nodejs,condor,slurmd,slurmctld,celery,rabbitmq,redis,flower,tusd`) |\n| `GUNICORN_WORKERS` | Set the number of gunicorn workers (`GUNICORN_WORKERS=2`) |\n| `CELERY_WORKERS` | Set the number of celery workers (`CELERY_WORKERS=2`) |\n| `GALAXY_DOCKER_ENABLED` | Enable Galaxy to use Docker containers if annotated in tools (`GALAXY_DOCKER_ENABLED=False`) |\n| `GALAXY_DOCKER_VOLUMES` | Specify volumes that should be mounted into tool containers (`GALAXY_DOCKER_VOLUMES=\"\"`) |\n| `GALAXY_HANDLER_NUMPROCS` | Set the number of Galaxy handler (`GALAXY_HANDLER_NUMPROCS=2`) |\n| `LOAD_GALAXY_CONDITIONAL_DEPENDENCIES` | Installing optional dependencies into the Galaxy virtual environment |\n| `LOAD_PYTHON_DEV_DEPENDENCIES` | Installation of Galaxy's dev dependencies. Needs `LOAD_GALAXY_CONDITIONAL_DEPENDENCIES` as well |\n| `GALAXY_AUTO_UPDATE_DB` | Run the Galaxy database migration script during startup |\n| `GALAXY_EXPORT_MARKER` | Override the export marker used to refresh `/export/galaxy`. |\n\n\n# HTTPS Support <a name=\"HTTPS-Support\"/> [[toc]](#toc)\n\nIt's possible to automatically configure your container with HTTPS, either with\ncertificates of your own or by automatically requesting an HTTPS certificate from\nLetsencrypt with the following environment variables:\n\n| Name   | Description   |\n|---|---|\n| `USE_HTTPS` | Set `USE_HTTPS=True` to set up HTTPS via self-signed certificates (CN is set to the value of `GALAXY_DOMAIN` variable, defaulting to `localhost` if no value is provided). If you have your own certificates, copy them to `/export/{server.key,server.crt}`. |\n| `USE_HTTPS_LETSENCRYPT` | Set `USE_HTTPS_LETSENCRYPT=True` to automatically set up HTTPS using Letsencrypt as a certificate authority. (Requires you to also set `GALAXY_DOMAIN`) Note: only set one of `USE_HTTPS` and `USE_HTTPS_LETSENCRYPT` to true. |\n| `GALAXY_DOMAIN` | Set `GALAXY_DOMAIN=<your_domain>` so that Letsencrypt can test your that you own the domain you claim to own in order to issue you your HTTPS cert. |\n\n\n# Lite Mode <a name=\"Lite-Mode\" /> [[toc]](#toc)\n\nThe lite mode will only start postgresql and a single Galaxy process, without nginx, gunicorn or any other special feature from the normal mode. In particular there is no support for the export folder or any Magic Environment variables.\n\n```sh\ndocker run -i -t -p 8080:8080 quay.io/bgruening/galaxy startup_lite\n```\n\nThis will also use the standard `job_conf.xml.sample_basic` shipped by Galaxy. If you want to use the regular one from the normal mode you can pass `-j` to the `startup_lite` script.\n\n\n# Extending the Docker Image <a name=\"Extending-the-Docker-Image\" /> [[toc]](#toc)\n\nIf the desired tools are already included in the Tool Shed, building your own personalised Galaxy docker Image (Galaxy flavour) can be done using the following steps:\n\n1. Create a file named `Dockerfile`\n2. Include `FROM quay.io/bgruening/galaxy` at the top of the file. This means that you use the Galaxy Docker Image as base Image and build your own extensions on top of it.\n3. Supply the list of desired tools in a file (`my_tool_list.yml` below). See [this page](https://github.com/galaxyproject/ansible-galaxy-tools/blob/master/files/tool_list.yaml.sample) for the file format requirements.\n4. Execute `docker build -t my-docker-test .`\n4a. (if behind proxy). Add the ENV http_proxy and https_proxy variables as IPs (to avoid nameserver resolution problems) as in the example below.\n5. Run your container with `docker run -p 8080:80 my-docker-test`\n6. Open your web browser on `http://localhost:8080`\n\nFor a working example, have a look at these  Dockerfiles.\n- [deepTools](http://deeptools.github.io/) [Dockerfile](https://github.com/bgruening/docker-recipes/blob/master/galaxy-deeptools/Dockerfile)\n- [ChemicalToolBox](https://github.com/bgruening/galaxytools/tree/master/chemicaltoolbox) [Dockerfile](https://github.com/bgruening/docker-recipes/blob/master/galaxy-chemicaltoolbox/Dockerfile)\n\n```\n# Galaxy - deepTools\n#\n# VERSION       0.2\n\nFROM quay.io/bgruening/galaxy\n\nMAINTAINER Björn A. Grüning, bjoern.gruening@gmail.com\n\nENV GALAXY_CONFIG_BRAND deepTools\n# The following two lines are optional and can be given during runtime\n# with the -e http_proxy='http://yourproxyIP:8080' parameter\nENV http_proxy 'http://yourproxyIP:8080'\nENV https_proxy 'http://yourproxyIP:8080'\n\nWORKDIR /galaxy\n\nRUN add-tool-shed --url 'http://testtoolshed.g2.bx.psu.edu/' --name 'Test Tool Shed'\n\n# Install Visualisation\nRUN install-biojs msa\n\n# Adding the tool definitions to the container\nADD my_tool_list.yml $GALAXY_ROOT_DIR/my_tool_list.yml\n\n# Install deepTools\nRUN install-tools $GALAXY_ROOT_DIR/my_tool_list.yml\n\n# Mark folders as imported from the host.\nVOLUME [\"/export/\", \"/data/\", \"/var/lib/docker\"]\n\n# Expose port 80 (webserver), 21 (FTP server)\nEXPOSE :80\nEXPOSE :21\n\n# Autostart script that is invoked during container start\nCMD [\"/usr/bin/startup\"]\n```\n\nor the [RNA-workbench](https://github.com/bgruening/galaxy-rna-workbench/blob/master/Dockerfile).\nThe RNA-workbench has advanced examples about:\n\n- populating Galaxy data libraries\n\n  ```bash\n    setup-data-libraries -i $GALAXY_ROOT_DIR/library_data.yaml -g http://localhost:8080\n        -u $GALAXY_DEFAULT_ADMIN_USER -p $GALAXY_DEFAULT_ADMIN_PASSWORD\n  ```\n\nThe actual data is references in a YAML file similar this [one](https://github.com/bgruening/galaxy-rna-workbench/blob/master/library_data.yaml).\n\n- installing workflows\n\n  ```bash\n      workflow-install --workflow_path $GALAXY_HOME/workflows/ -g http://localhost:8080\n          -u $GALAXY_DEFAULT_ADMIN_USER -p $GALAXY_DEFAULT_ADMIN_PASSWORD\n  ```\n\nWhere all Galaxy workflows needs to be in one directory, here the `$GALAXY_HOME/workflows/`.\n\n- running Galaxy data-managers to create indices or download data\n\n  ```bash\n      run-data-managers -u $GALAXY_DEFAULT_ADMIN_USER -p $GALAXY_DEFAULT_ADMIN_PASSWORD -g http://localhost:8080\n          --config data_manager_rna_seq.yaml\n  ```\n\nThe data-managers can be configured and specified in a YAML file similar to this [one](https://github.com/galaxyproject/training-material/blob/master/RNA-Seq/docker/data_manager_rna_seq.yaml).\n\n\nIf you host your flavor on GitHub consider to test our build with Travis-CI. This project will help you:\nhttps://github.com/bgruening/galaxy-flavor-testing\n\n## Test matrix <a name=\"Test-matrix\" /> [[toc]](#toc)\n\nThe project includes local test scripts and CI workflows. Use the matrix below to decide what to run.\n\n| Area | Script / Workflow | Requires | Notes |\n| --- | --- | --- | --- |\n| Image build | `docker build -t galaxy:test galaxy/` | Docker | Baseline image build. |\n| Startup sanity | `docker run --rm --privileged galaxy:test /usr/bin/startup2` | Privileged | Confirms services start and CVMFS messaging is sane. |\n| Bioblend | `test/bioblend/test.sh` | Running Galaxy container | Uses a Bioblend test image against Galaxy. |\n| Slurm | `test/slurm/test.sh` | Docker, Slurm test image | Uses external Slurm container; set `GALAXY_IMAGE=galaxy:test` if needed. |\n| SGE (Grid Engine) | `test/gridengine/test.sh` | Docker, SGE test image | Uses ephemeris container to wait for Galaxy. |\n| CVMFS sidecar | `test/cvmfs/test.sh` | Privileged | Builds and validates mount propagation from sidecar. |\n| FTP/SFTP | `.github/workflows/single.sh` | Docker, sshpass (CI) | FTP and SFTP checks run in CI; local run skips SFTP if `sshpass` is missing. |\n| /export persistence | `startup.sh` / `startup2.sh` | `/export` volume | Export and cache relocation happens during startup; exercised by CI runs. |\n| HTTPS/TLS | `.github/workflows/single.sh` | Docker | Uses `curl` and `openssl s_client` against port 443. |\n| Tool install smoke | `.github/workflows/single.sh` | Docker | Installs sample tools and verifies tool availability. |\n| Container resolvers | `test/container_resolvers_conf.ci.yml` | Galaxy container | CI uses a minimal resolver config for toolbox resolution tests. |\n| Image analysis (optional) | `.github/workflows/single.sh` | `dive` | Runs only when `dive` is installed. |\n| Single-container CI | `.github/workflows/single_container.yml` | CI | Full container test (privileged). |\n| Multi-test CI | `.github/workflows/single.sh` | CI | Builds image + runs SLURM, SGE, Bioblend; uses buildx cache. |\n\nNotes:\n- If `/tmp` is small in CI, set `TMPDIR=/var/tmp` for test scripts.\n- CVMFS sidecar CI builds/pushes on tags; branch pushes run tests only when CVMFS paths change.\n\n\n\n\n## List of Galaxy flavours <a name=\"List-of-Galaxy-flavours\" /> [[toc]](#toc)\n\n- [Aurora Galaxy](https://github.com/statonlab/aurora-galaxy-tools)\n- [SNP analysis Workflows on Docker (sniplay)](https://github.com/ValentinMarcon/docker-galaxy-sniplay)\n- [NCBI-Blast](https://github.com/bgruening/docker-galaxy-blast)\n- [ChemicalToolBox](https://github.com/bgruening/docker-recipes/blob/master/galaxy-chemicaltoolbox)\n- [ballaxy](https://github.com/anhi/docker-scripts/tree/master/ballaxy)\n- [NGS-deepTools](https://github.com/bgruening/docker-recipes/blob/master/galaxy-deeptools)\n- [Galaxy ChIP-exo](https://github.com/gregvonkuster/docker-galaxy-ChIP-exo)\n- [Galaxy Proteomics](https://github.com/bgruening/docker-galaxyp)\n- [Imaging](https://github.com/bgruening/docker-galaxy-imaging)\n- [Constructive Solid Geometry](https://github.com/gregvonkuster/docker-galaxy-csg)\n- [Galaxy for metagenomics](https://github.com/bgruening/galaxy-metagenomics)\n- [Galaxy with the Language Application Grid tools](https://github.com/lappsgrid-incubator/docker-galaxy-lappsgrid)\n- [RNAcommender](https://github.com/gianlucacorrado/galaxy-RNAcommender)\n- [OpenMoleculeGenerator](https://github.com/bgruening/galaxy-open-molecule-generator)\n- [Workflow4Metabolomics](https://github.com/workflow4metabolomics/w4m-docker)\n- [HiC-Explorer](https://github.com/maxplanck-ie/docker-galaxy-hicexplorer)\n- [SNVPhyl](https://github.com/phac-nml/snvphyl-galaxy)\n- [GraphClust](https://github.com/BackofenLab/docker-galaxy-graphclust)\n- [RNA workbench](https://github.com/bgruening/galaxy-rna-workbench)\n- [Cancer Genomics Toolkit](https://github.com/morinlab/tools-morinlab/tree/master/docker)\n- [Clustered Heatmaps for Interactive Exploration of Molecular Profiling Data](http://cancerres.aacrjournals.org/content/77/21/e23)\n\n# Integrating non-Tool Shed tools into the container <a name=\"Integrating-non-Tool-Shed-tools-into-the-container\" /> [[toc]](#toc)\n\nWe recommend to use the [Main Galaxy Tool Shed](https://toolshed.g2.bx.psu.edu/) for all your tools and workflows that you would like to share.\nIn rare situations where you cannot share your tools but still want to include them into your Galaxy Docker instance, please follow the next steps.\n\n- Get your tools into the container.\n\n    Mount your tool directory into the container with a separate `-v /home/user/my_galaxy_tools/:/local_tools`.\n\n- Create a `tool_conf.xml` file for your tools.\n\n    This should look similar to the main [`tool_conf.xml`](https://github.com/galaxyproject/galaxy/blob/dev/lib/galaxy/config/sample/tool_conf.xml.sample) file, but references your tools from the new directory. In other words a tool entry should look like this `<tool file=\"/local_tools/application_foo/foo.xml\" />`.\n    Your `tool_conf.xml` should be available from inside of the container. We assume you have it stored under `/local_tools/my_tools.xml`.\n\n- Add the new tool config file to the Galaxy configuration.\n\n    To make Galaxy aware of your new tool configuration file you need to add the path to `tool_config_file`, which is set to `/etc/galaxy/tool_conf.xml`. You can do this during container start by setting the environment variable `-e GALAXY_CONFIG_TOOL_CONFIG_FILE=/etc/galaxy/tool_conf.xml,/local_tools/my_tools.xml`.\n\n\n# Users & Passwords <a name=\"Users-Passwords\" /> [[toc]](#toc)\n\nThe Galaxy Admin User has the username `admin@example.org` and the password `password`.\nThe PostgreSQL username is `galaxy`, the password is `galaxy` and the database name is `galaxy` (I know I was really creative ;)).\nIf you want to create new users, please make sure to use the `/export/` volume. Otherwise your user will be removed after your docker session is finished.\n\nThe proftpd server is configured to use the main galaxy PostgreSQL user to access the database and select the username and password. If you want to run the\ndocker container in production, please do not forget to change the user credentials in `/etc/proftpd/proftpd.conf` too.\n\nThe Flower Webapp is `htpasswd` protected with username and password set to `admin`.\n\nRabbitMQ is configured with:\n  - Admin username: `admin`\n  - Admin password: `admin`\n  - Galaxy vhost: `galaxy`\n  - Galaxy username: `galaxy`\n  - Galaxy password: `galaxy`\n  - Flower username: `flower`\n  - Flower password: `flower`\n\n\n# Development <a name=\"Development\" /> [[toc]](#toc)\n\nYou can clone this repository with:\n\n```sh\ngit clone https://github.com/bgruening/docker-galaxy-stable.git\n```\n\nThis repository uses various [Ansible](http://www.ansible.com/) roles as specified in [requirements.yml](galaxy/ansible/requirements.yml) to manage configurations and dependencies. You can install these roles with the following command:\n\n```sh\ncd galaxy/ansible/ && ansible-galaxy install -r requirements.yml -p roles\n```\n\nIf you simply want to change the Galaxy repository and/or the Galaxy branch, from which the container is build you can do this with Docker `--build-arg` during the `docker build` step. For example you can use these parameters during container build:\n\n```\n --build-arg GALAXY_RELEASE=install_workflow_and_tools\n --build-arg GALAXY_REPO=https://github.com/manabuishii/galaxy\n```\n\nTo keep docker images lean and optimize storage, we recommend using [Dive](https://github.com/wagoodman/dive). It provides an interactive UI that lets you explore each layer of the image, helping you quickly identify files and directories that take up significant space. To install Dive, follow the installation instructions provided in the [Dive GitHub repository](https://github.com/wagoodman/dive?tab=readme-ov-file#installation). After building your docker image, use Dive to analyze it:\n\n```bash\ndive <your-docker-image-name>\n```\n\n# Requirements <a name=\"Requirements\" /> [[toc]](#toc)\n\n- [Docker](https://www.docker.io/gettingstarted/#h_installation)\n\n\n# Support & Bug Reports <a name=\"Support-Bug-Reports\" /> [[toc]](#toc)\n\nYou can file an [github issue](https://github.com/bgruening/docker-galaxy-stable/issues) or ask\nus on the [Galaxy development list](http://lists.bx.psu.edu/listinfo/galaxy-dev).\n\nIf you like this service please fill out this survey: https://www.surveymonkey.de/r/denbi-service?sc=rbc&tool=galaxy-docker\n"
  },
  {
    "path": "compose/README.md",
    "content": "\n⚠️ \n\nThe `compose` version of this project is currently not maintained. We update the files and versions as we have time, but it's not a priority at the moment.\nWe will concentrate on the single-container version. If you want to deploy a composable version of Galaxy please have a look at https://github.com/galaxyproject/galaxy-helm or take over the maintainership of this version here :)\n\n⚠️\n\n# Galaxy Docker Compose\n\nThis setup is built on the idea of using a basic docker-compose file and extending it\nfor additional use cases. Therefore the `docker-compose.yml` is the base of the\nwhole setup. By concatenating additional files, you can extend it to use, for\nexample, HTCondor (see [Usage](#usage)).\n\nAll working data (database, virtual environment, etc.) is exported in the\n`EXPORT_DIR`, which defaults to ./export.\n\n\n## Usage\n### First startup\nWhen starting the setup for the first time, the Galaxy container will copy\na bunch of files into the `EXPORT_DIR`. This might take quite some time\nto finish (even 20 minutes or more). Please don't interrupt the setup in\nthis period, as this might result in a broken state of the `EXPORT_DIR`\n(see [Killing while first start up](#killing-while-first-start-up)).\n\n### Basic setup\nSimply run\n\n> docker-compose up\n\nto start Galaxy. In the basic setup, Galaxy together with Nginx as the proxy,\nPostgres as the DB, and RabbitMQ as the message queue is run.\n\nThe default username and password is \"admin\", \"password\" (API key \"fakekey\").\nThose credentials are set at first run and can be tweaked using the environment\nvariables `GALAXY_DEFAULT_ADMIN_USER`, `GALAXY_DEFAULT_ADMIN_EMAIL`,\n`GALAXY_DEFAULT_ADMIN_PASSWORD`, and `GALAXY_DEFAULT_ADMIN_KEY` in the\n`docker-compose.yml` file. If you want to change the email address of an admin,\nremember to update the `admin_users` setting of the Galaxy config (also\nsee [Configuration](#configuration) to learn how to configure Galaxy).\n\n### Running in background\nIf you want to run the setup in the background, use the detach option (`-d`):\n\n> docker-compose up -d\n\n### Upgrading to a newer Galaxy version\nWhen not setting `IMAGE_TAG` to a specific version, Docker-Compose will always\nfetch the newest image and therefore Galaxy version available. Depending\non the magnitude of the upgrade, you may need to delete the virtual\nenvironment of Galaxy (EXPORT_PATH/galaxy/.venv) before you start the\nsetup again. The DB migration depends on the `database_auto_migrate`\nsetting for Galaxy (which is not\nset on default and will therefore be `false` normally).\n\n\n## Extending the setup\nBeyond the basic usage, extending the setup is as easy as adding a additional\ndocker-compose extension file. This is done be the [standard docker-compose syntax](https://docs.docker.com/compose/extends/):\n`docker-compose -f docker-compose.yml -f docker-compose.EXTENSION.yml`. Simply\nconcatenate the extensions you want to use. The rest should be handled for you.\n\n### Running a HTCondor cluster\nThe `docker-compose.htcondor.yml` file is responsible to build up\nan HTCondor cluster. Simply run:\n\n> docker-compose -f docker-compose.yml -f docker-compose.htcondor.yml up\n\nThis will bring up a \"cluster\" with one master and one executor. Galaxy\nacts like the submit node. To scale\nthe cluster, run the up statement with a `--scale htcondor-executor=n` option.\nThe setup ships with a basic configuration for HTCondor (see the\n`base_config.yml` file). To customize the settings, set the appropriate\n`HTCONDOR_MASTER_CONFIG_`, `HTCONDOR_EXECUTOR_CONFIG_`, `HTCONDOR_GALAXY_CONFIG`\nenvironment variables (see [Configuration](#configuration)).\n\n### Running a SLURM cluster\nAppend the `docker-compose.slurm.yml` file to your `docker-compose up` command. This\nwill spin up a small Slurm cluster and configure Galaxy to schedule jobs there.\nTo scale the cluster, run the up statement with a `--scale slurm_node=n` option.\nAs all nodes need to be defined in the slurm.conf file, you will also need to\nset the env variable `SLURM_NODE_COUNT` to the correct node count.\nHere is an example for scaling to three nodes:\n`SLURM_NODE_COUNT=3 docker-compose -f docker-compose.yml -f docker-compose.slurm.yml up --scale slurm_node=3`.\n\nSome background info about the slurm.conf configuration: As said earlier, Slurm\nexpects to have all nodes be defined in the conf file, together with valid\nhostnames. Therefore `galaxy-configurator` automatically adds references\n(the names of the slurm_node-containers) to the nodes by utilizing `SLURM_NODE_COUNT`.\nAs the docker-compose containers can contain underscores, the names are not\nvalid as hostnames (even though they are resolvable from inside the containers).\nTo cope with this problem, the `galaxy-slurm-node-discovery`-container\nuses the Docker API to fetch the correct hostnames and replaces them on the\nfly inside the slurm.conf file.\n\n### Running a Kubernetes Cluster (with kind)\nIt is possible to start a small Kubernetes (k8s) cluster using [kind](https://kind.sigs.k8s.io)\n(Kubernetes in Docker) and let Galaxy run your jobs there. For this use the\n`docker-compose.k8s.yml` file. Note that this extension is only meant\nto run individually (so no Pulsar, HTCondor etc.).\n\nThe `galaxy-kind` container is responsible for starting up your local Kubernetes\ncluster and applying all the configuration the Galaxy-Configurator created. You can\nfind these files under `galaxy-configurator/templates/kind`. The `kind_config.yml`\nfile is used to configure Kind itself (also see https://kind.sigs.k8s.io/docs/user/configuration/),\nwhile the files in the `k8s_config` are the configs that will be applied to\nKubernetes using `kubectl apply -f <k8s_config>`. By default, k8s is configured\nto add some persistent volumes (PV) and persistent volume claims (PVC) so jobs\ncan access all the needed files from Galaxy.\nIt is relatively easy to add your own k8s_configs: Simply place your files into the\ntemplate folder (remember to add the `.j2` extension!) and mention it in the\n`kind_configs` variable in the run.sh file of the galaxy-configurator\n(see [Extend the Galaxy-Configurator](#extend-the-galaxy-configurator)).\n\nWhile Kind is starting up the cluster, it blocks Galaxy from starting itself.\nThis is needed as Galaxy will parse the KUBECONFIG (that is created after k8s has started)\nonly once on startup. So don't be surprised if Galaxy is quiet for some time :)\n\nNote that the cluster is being rebuilt on every start (to be more precise,\na `kind delete cluster` is called on shut down), so manual changes will\nbe overwritten if they are not defined in the k8s_config!\n\n### Using Singularity for dependency resolution\nConda is used as the default dependency resolution. To switch to using\nSingularity containers, add the `docker-compose.singularity.yml` file.\nThis will advice Galaxy to - if possible - stick with Singularity\nfor the dependency resolution. See the\n[Galaxy documentation](https://docs.galaxyproject.org/en/master/admin/special_topics/mulled_containers.html)\nfor more information.\n\n### Configuration\nThe `galaxy-configurator` is the central place for configuration\nand is used to configure Galaxy and its\nadditional services (currently Nginx, and Slurm). For this, it utilizes\nenvironment variables (set in the docker-compose file) for common configs,\nand the `base_config.yml` file, used for base-configuration that does not\nchange often. For environment variables, there are two categories of\nconfiguration: The ones that contain a `_CONFIG_`\n(like `GALAXY_CONFIG_ADMIN_USERS`) and the ones that don't (like\n`GALAXY_PROXY_PREFIX`). The first category contains configuration\noptions within the tools itself and they are simply mapped to the\ncorresponding config-file one-to-one (see for example\n[galaxy.yml.sample](https://github.com/galaxyproject/galaxy/blob/dev/lib/galaxy/config/sample/galaxy.yml.sample)\nfor reference). The other category contains options that have some\nlogic within the docker-compose setup. `GALAXY_PROXY_PREFIX`, for example,\ntouches multiple Galaxy and Nginx options, so you don't have to.\n\nThe base of the configrations are [Jinja2](https://jinja.palletsprojects.com/en/2.11.x/)\ntemplates, located at `galaxy-configurator/templates`.\nThe `galaxy-configurator` renders these\ntemplates on startup and saves them in the export-folder to be\nused by the other containers. A diff is created to surface changes\nthat will be applied. To disable the configurator, simply remove the\ncorresponding `*_OVERWRITE_CONFIG` environment variable\n(like `GALAXY_OVERWRITE_CONFIG`) or set it to `false`.\n\nAll options are discussed under [configuration reference](#configuration-reference).\n\n### Use specific Galaxy version or Docker images\nThe `IMAGE_TAG` environment variable allows to use specific versions of the\nsetup. Say, you want to stay with Galaxy v24.1 for now:\n\n> export IMAGE_TAG=24.1\n> docker-compose up\n\nWithout setting this variable, you will always get updated to the newest\nversion available.\n\n### Restarting\nTo restart the setup (for example after a configuration change), you can simply\nkill (CTRL-C) Docker Compose and re-run `docker-compose ... up`. Your data will\nnot be lost, as long as you keep the `export`-folder.\n\n### Using prefix\nIt is possible to host Galaxy under a prefix like example.com/galaxy. For that,\nset the env variable in the `galaxy-configurator` part to\n`GALAXY_PROXY_PREFIX=/your/wanted/prefix` (like `/galaxy`)\nand remember to also update `GALAXY_CONFIG_INFRASTRUCTURE_URL` accordingly.\n\n## More advanced stuff\n### \"SSH\"ing into a container\nWhen facing a bug it may be helpful to have command-line controle over a\ncontainer. This is as simple as running `docker exec -it CONTAINER_NAME /bin/bash`.\nFor the galaxy-server container that would mean:\n\n> docker exec -it compose_galaxy-server_1 /bin/bash\n\nNote that not all containers have bash shipped with them. In this case replace\nit by `/bin/sh`.\n\n### Build containers locally\nWhen developing locally, you may come to the point were you need to build\nimages yourself. In most cases adding a `--build` to the docker-compose statement\nshould be enough. It's\nrecommended to build the images using custom tags, so it's easy to switch between\nversions. Simply set `IMAGE_TAG` to something other than `latest`:\n\n> export IMAGE_TAG=bugfix1\n> docker-compose up --build\n\nMaybe you found a bug in Galaxy itself and you want to test it now. For this,\nyou can set the `GALAXY_REPO` and `GALAXY_RELEASE` build arguments to your\nown fork and branch.\n\n> docker build galaxy-server -t quay.io/bgruening/galaxy-server:$IMAGE_TAG --build-arg GALAXY_REPO=https://github.com/YOUR-USERNAME/galaxy --build-arg GALAXY_RELEASE=my_custom_branch\n\nSome containers use base-images that share some common dependencies (like\nDocker that is not only used for Galaxy, but also Pulsar, HTCondor, or Slurm).\nAfter re-building these images yourself, you may also need to add\n`--build-arg IMAGE_TAG=your_base_image_tag` and `SETUP_REPO` if your\nbase-images are tagged differently or are stored in a different repository.\n\n### Extend the Galaxy-Configurator\nIt is possible to extend the usage of the configurator, both in extending the\nJinja2 templates, but also in adding additional files.\n\nAll environment variables of the `galaxy-configurator` are accessible\nwithin the templates. Additionally,\nthe configurator parses specific `*_CONFIG_*`\nvariables and makes them accessible as a dict (for example `galaxy` or\n`gravity`). It may be helpful to understand the current use cases\nwithin the templates and how the `customize.py` file (actually just an\nextension of the [J2cli](https://github.com/kolypto/j2cli) parses env\nvariables.\n\nTo add more template files, have a look into the `run.sh` file. For example\nadding a configuration file for Galaxy is as simple as adding an entry\ninto the `galaxy_configs` array.\n\n### Adding additional containers or configurations\nSo you want to extend the setup to - for example - support a new\nWorkload Manager for Galaxy? Or you have a specific configuration\nof Galaxy in mind that goes out of the scope of the basic\n`docker-compose.yml` file? Aweseome!\nLet's have a look at two examples for how you can create a custom\nextension:\n**HTCondor**:\nThe `docker-compose.htcondor.yml` file is a good example of what\nthe idea of extensions are in the context of this setup.\nThe HTCondor \"cluster\" is based on a single image (`galaxy-htcondor`)\nand, depending on the containers purpose, it gets exposed to\ndifferent volumes. As Galaxy needs some addition files, one volume\nis added to its container. The `galaxy-configurator` part\noverwrites a single\nenvironment variable and sets a new one. The neat thing of this\napproach is that if you don't need\nto run HTCondor, the base setup will work just fine without\nmuch additional ballast. However, adding HTCondor isn't a hassle\neither.\n\n**Singularity**\nChanging a bunch of variables all the time, just to be able to switch\nbetween different setups can become a hassle quickly. The\n`docker-compose.singularity.yml` file is a good a example of how you\ncan avoid that. In normal cases, Galaxy should run jobs in the\nshell directly, changing that to Singularity requires some\ndifferent settings. The file is a good example in how you can\nquickly overwrite settings and be able to reuse it for different\noccasions (remember that by concatenating this file behind\nHTCondor, Slurm, or Pulsar enables Singularity the same way). Another\nexample would be to create a custom `docker-compose.debug.yml` file\nthat could be used to enable some debug flags or\nsetting `GALAXY_CONFIG_CLEANUP_JOB=never`.\n\n### Running the CI pipeline on your own fork\nThe GitHub Actions workflow used to build, test and deploy this setup\nis independent of any specific username or Docker Registry. To run\nthe workflow on your fork, simply\n[set the following secrets](https://help.github.com/en/actions/configuring-and-managing-workflows/creating-and-storing-encrypted-secrets):\n* `docker_registry`: The Registry the images should be pushed\nto (`docker.io`, for example)\n* `docker_registry_username`: Your username\n* `docker_registry_password`: Your password\n\n\n## Troubleshooting\n### Killing while first start up\nIf you kill (CTRL-C) Docker Compose while Galaxy is performing the first\nstartup, you may come into the situation where not all files have been properly\nexported. As the exporting is only done for the first start, this can result in\nmissing dependencies. In this case it is good to remove the whole\n`export`-folder (or at least Galaxy related files - the `postgres` folder can\nstay, if wanted).\n\n### Resetting the setup\nTo start from the beginning, you of course need to delete the `export`-folder.\nBut remember to also do a `docker-compose -f <COMPOSE-FILES..> down`, as this\nwill shut down and remove all containers. If you forget this, while still\ndeleting the `export`-folder, the Galaxy container may have problems with\nexporting all necessary files, as they are usually deleted within the container\nafter the first proper startup.\n\n## Testing\nThe setup provides a bunch of different integration tests to run against Galaxy.\nHave a look inside the `tests` folder. There you find the containers that run\nthe tests and their docker-compose files. The containers are essentially just\na wrapper around the test tools to simplify using them. Running a tests\nis the same as extending\nany other part of the setup: Just concatenate the test file at the end.\nTo run, for example, some Planemo Worklow tests against a Galaxy installation that\nis connected to a HTCondor cluster using Singularity, just enter:\n`docker-compose -f docker-compose.yml -f docker-compose.htcondor.yml\n-f docker-compose.singularity.yml -f tests/docker-compose.test.yml\n-f tests/docker-compose.test.workflows.yml up`. To stop the setup when a test\nhas finished, you may want to add the option `--exit-code-from galaxy-workflow-test`.\nThis returns the exit code of the test container (should be 0 if successful),\nwhich you could use for further automation.\n\nThe tests are run using GitHub Actions on every commit. So feel free to inspect\nthe `.github/workflows/compose.yml` file for more test cases and get inspired\nby them :)\n\n### Planemo workflow tests\nLike the name suggests, this runs [Planemo](https://planemo.readthedocs.io/en/latest/)\nworkflow tests. The container uses the tests from [UseGalaxy.eu](https://github.com/usegalaxy-eu/workflow-testing),\nbut you can mount any test you could think of inside the container at the `/src` path.\nBy default, it will run some select workflows, but you can choose your own\nby setting the `WORKFLOWS` env variable to a comma separated list of paths to some tests\n(e.g. `WORKFLOWS=test1/test1.ga,test2/test2.ga docker-compose ...`).\n\n### Selenium tests\nThe Selenium tests simulate a real user that is accessing Galaxy through the\nbrowser to perform some actions. For that it uses a headless Chrome to runs the\ntests from the [Galaxy repo](https://github.com/galaxyproject/galaxy/tree/dev/lib/galaxy_test/selenium).\nThe GitHub Actions currently just run a few of those. To select more tests,\nset the env variable `TESTS` to a comma separated list (like `TESTS=navigates_galaxy.py,login.py`).\nNote that you don't need to append the `test_` prefix for every\nsingle file!\n\n### BioBlend tests\nBioBlend has some tests that run against Galaxy. We are using some of them to test\nour setup too. Have a look into the `run.sh` file of the container to see\nwhich tests we have excluded (at least for now).\n\n\n## Configuration reference\nTool specific configuration can be applied via `base_config.yml` or the following\nenvironment variables:\n* `GALAXY_CONFIG_`\n* `GRAVITY_CONFIG_`\n* `NGINX_CONFIG_`\n* `PULSAR_CONFIG_`\n* `HTCONDOR_MASTER_CONFIG_`\n* `HTCONDOR_EXECUTOR_CONFIG_`\n* `HTCONDOR_GALAXY_CONFIG`\n* `SLURM_CONFIG_`\n\nThe following are settings specific to this docker-compose setup:\n### Galaxy\n| Variable                  | Description                                                                                                        |\n|---------------------------|--------------------------------------------------------------------------------------------------------------------|\n| `GALAXY_OVERWRITE_CONFIG` | Enable Galaxy-configurator, which may result in overwriting manual config changes done in `EXPORT_DIR/galaxy/config`.                                                                                                        |\n| `GALAXY_PROXY_PREFIX`     | Host Galaxy under a prefix (like example.com/galaxy). Note that you also need to update `GALAXY_CONFIG_INFRASTRUCTURE_URL` accordingly.                                                                                      |\n| `GALAXY_JOB_DESTINATION`  | The name of the preferred job destination (local, condor, slurm, singularity..) defined in `job_conf.xml`. Generally, this does not need to be changed, as the docker-compose extensions are already taking care of that. |\n| `GALAXY_JOB_RUNNER`       | The job runner Galaxy will use to process jobs. Can be `local`, `condor`, `slurm`, `pular_rest` or `pulsar_mq`, or `k8s`. |\n| `GALAXY_DEPENDENCY_RESOLUTION ` | Determines how Galaxy should resolve dependencies. You can choose between Conda (`conda`) or running them inside a Singularity container (`singularity`).|\n| `GALAXY_PULSAR_URL`       | The URL Galaxy will communicate with Pulsar, when choosing the `pulsar_rest` job runner. |\n| `GALAXY_JOB_METRICS_*`    | Enable the corresponding job metrics. Can be `CORE`, `CPUINFO` (`true` or `verbose`), `MEMINFO`, `UNAME`, and `ENV`, also see [job_metrics.xml.sample](https://github.com/galaxyproject/galaxy/blob/dev/lib/galaxy/config/sample/job_metrics_conf.xml.sample) for reference.\n\n### Nginx\n| Variable                  | Description                                                                                                        |\n|---------------------------|--------------------------------------------------------------------------------------------------------------------|\n| `NGINX_OVERWRITE_CONFIG`  | Also see `GALAXY_OVERWRITE_CONFIG`. |\n| `NGINX_PROXY_READ_TIMEOUT` | Determines how long Nginx will wait (in seconds) for Galaxy to respond to a request until it times out. Defaults to 180 seconds. |\n\n### Pulsar\n| Variable                  | Description                                                                                                        |\n|---------------------------|--------------------------------------------------------------------------------------------------------------------|\n| `PULSAR_OVERWRITE_CONFIG` | Also see `GALAXY_OVERWRITE_CONFIG`. |\n| `PULSAR_JOB_RUNNER`       | The job runner Pulsar will use to process jobs. Currently, only `local` is supported, but this will be extended to HTCondor and Slurm in the future. |\n| `PULSAR_NUM_CONCURRENT_JOBS ` | The number of jobs Pulsar will run concurrently. Defaults to 1. |\n| `PULSAR_GALAXY_URL`       | The URL Pulsar will use to send results back to Galaxy. Defaults to `http://nginx:80`. |\n| `PULSAR_HOSTNAME`         | The hostname Pulsar will listen to for requests. Defaults to `pulsar`. |\n| `PULSAR_PORT`             | The port Pulsar will listen to for requests. Defaults to 8913. |\n| `PULSAR_LOG_LEVEL`        | The log level (like `DEBUG` or `INFO`) of Pulsar. Defaults to `INFO`. |\n\n### Kind (Kubernetes in Docker)\n| Variable                  | Description                                                                                                        |\n|---------------------------|--------------------------------------------------------------------------------------------------------------------|\n| `KIND_OVERWRITE_CONFIG` | Also see `GALAXY_OVERWRITE_CONFIG`. |\n| `KIND_NODE_COUNT`       | The number of Kubernetes nodes kind should start. Defaults to 1. |\n| `KIND_PV_STORAGE_SIZE`  | The size limit (in Gi) of a Kubernetes Persistent Volume. Defaults to 100.  |\n| `GALAXY_KUBECONFIG`     | The path to the KUBECONFIG that Galaxy will use to connect to Kubernetes. Defaults to the one created with galaxy-kind. |\n| `GALAXY_K8S_PVC`        | The PVCs a job pod should mount. Defaults to `galaxy-root:/galaxy,galaxy-database:/galaxy/database,galaxy-tool-deps:/tool_deps`. |\n| `GALAXY_K8S_DOCKER_REPO_DEFAULT` | The Docker Repo/Registry to use if the resolver could not resolve the proper image for a job. Defaults to `docker.io`. |\n| `GALAXY_K8S_DOCKER_OWNER_DEFAULT` | The Owner/Username to use if the resolver could not resolve the proper image for a job. Is not set by default. |\n| `GALAXY_K8S_DOCKER_IMAGE_DEFAULT` | The Image to use if the resolver could not resolve the proper image for a job. Defaults to `ubuntu`. |\n| `GALAXY_K8S_DOCKER_TAG_DEFAULT` | The Image Tag to use if the resolver could not resolve the proper image for a job. Defaults to `22.04`. |\n\n### HTCondor\n| Variable                    | Description                                                                                                        |\n|-----------------------------|--------------------------------------------------------------------------------------------------------------------|\n| `HTCONDOR_OVERWRITE_CONFIG` | Also see `GALAXY_OVERWRITE_CONFIG`. |\n\n### Slurm\n| Variable                  | Description                                                                                                        |\n|---------------------------|--------------------------------------------------------------------------------------------------------------------|\n| `SLURM_OVERWRITE_CONFIG`  | Also see `GALAXY_OVERWRITE_CONFIG`. |\n| `SLURM_NODE_COUNT`        | The number of Slurm nodes running. This needs to be changed when scaling the setup (eg. `docker-compose up --scale slurm_node=n`) to let the Slurm controller know of all available nodes. |\n| `SLURM_NODE_CPUS`         | Number of CPUs per node. Defaults to 1. |\n| `SLURM_NODE_MEMORY`       | Amount of memory per node. Defaults to 1024. |\n| `SLURM_NODE_HOSTNAME`     | Docker Compose adds a prefix in front of the container names by default. Change this value to the name of your setup and `_slurm_node` (e.g. `compose_slurm_node`) to ensure a correct mapping of the Slurm nodes. |\n\n### Github Workflow Tests (Branch 24.1)\n| Setup                  | bioblend           | workflow ard       | workflow quality_control | workflow wf3-shed-tools (example1) | selenium           |\n|------------------------|--------------------|--------------------|--------------------------|------------------------------------|--------------------|\n| Galaxy Base            | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark:       | :heavy_check_mark:                 | :heavy_check_mark: |\n| Galaxy Proxy Prefix    | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark:       | :heavy_check_mark:                 | :x:                |\n| HTCondor               | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark:       | :heavy_check_mark:                 | :heavy_check_mark: |\n| Slurm                  | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark:       | :heavy_check_mark:                 | :heavy_check_mark: |\n| Pulsar-REST            | :heavy_check_mark: | :heavy_check_mark: | :x:                      | :heavy_check_mark:                 | :heavy_check_mark: |\n| Pulsar-MQ              | :heavy_check_mark: | :heavy_check_mark: | :x:                      | :heavy_check_mark:                 | :heavy_check_mark: |\n| k8s                    | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark:       | :heavy_check_mark:                 | :heavy_check_mark: |\n| Singularity            | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark:       | :heavy_check_mark:                 | :heavy_check_mark: |\n| Pulsar-MQ + Singularity| :heavy_check_mark: | :heavy_check_mark: | :x:                      | :heavy_check_mark:                 | :heavy_check_mark: |\n| Slurm + Singularity    | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark:       | :heavy_check_mark:                 | :heavy_check_mark: |\n| HTCondor + Singularity | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark:       | :heavy_check_mark:                 | :heavy_check_mark: |\n\n\nImplemented: :heavy_check_mark:   \nNot Implemented: :x:\n\n"
  },
  {
    "path": "compose/base-images/galaxy-cluster-base/Dockerfile",
    "content": "ARG DOCKER_REGISTRY=quay.io\nARG DOCKER_REGISTRY_USERNAME=bgruening\nARG IMAGE_TAG=latest\n\nFROM $DOCKER_REGISTRY/$DOCKER_REGISTRY_USERNAME/galaxy-container-base:$IMAGE_TAG\n\n# Base dependencies\nRUN apt update && apt install --no-install-recommends gnupg2 curl -y \\\n    && /usr/bin/common_cleanup.sh\n\n# Install HTCondor\nENV DEBIAN_FRONTEND=noninteractive\nRUN curl -fsSL https://research.cs.wisc.edu/htcondor/repo/keys/HTCondor-current-Key | apt-key add - \\\n    && echo \"deb https://research.cs.wisc.edu/htcondor/repo/ubuntu/current jammy main\" >> /etc/apt/sources.list \\\n    && apt update && apt install --no-install-recommends htcondor -y \\\n    && rm -f /etc/condor/condor_config.local \\\n    && /usr/bin/common_cleanup.sh\n\n# Install Slurm client\nENV MUNGE_USER=munge \\\n    MUNGE_UID=1200 \\\n    MUNGE_GID=1200\nRUN groupadd -r $MUNGE_USER -g $MUNGE_GID \\\n    && useradd -u $MUNGE_UID -r -g $MUNGE_USER $MUNGE_USER \\\n    && echo \"deb http://ppa.launchpad.net/natefoo/slurm-drmaa/ubuntu jammy main\" >> /etc/apt/sources.list \\\n    && echo \"deb-src http://ppa.launchpad.net/natefoo/slurm-drmaa/ubuntu jammy main\" >> /etc/apt/sources.list \\\n    && apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 8DE68488997C5C6BA19021136F2CC56412788738 \\\n    && apt update \\\n    && apt install --no-install-recommends python3-distutils slurm-client slurmd slurmctld slurm-drmaa1 -y \\\n    && apt --no-install-recommends install munge libmunge-dev -y \\\n    && ln -s /usr/lib/slurm-drmaa/lib/libdrmaa.so.1 /usr/lib/slurm-drmaa/lib/libdrmaa.so \\\n    && /usr/bin/common_cleanup.sh\n\n# Install CVMFS\nRUN apt update \\\n    && apt install wget lsb-release -y \\\n    && wget https://ecsft.cern.ch/dist/cvmfs/cvmfs-release/cvmfs-release-latest_all.deb \\\n    && dpkg -i cvmfs-release-latest_all.deb \\\n    && rm -f cvmfs-release-latest_all.deb \\\n    && apt update \\\n    && apt install --no-install-recommends cvmfs -y \\\n    && mkdir /srv/cvmfs \\\n    && /usr/bin/common_cleanup.sh\nCOPY files/cvmfs /etc/cvmfs\n"
  },
  {
    "path": "compose/base-images/galaxy-cluster-base/files/common_cleanup.sh",
    "content": "#!/bin/sh\n\nset -x\n\n# This usually drastically reduced the container size\n# at the cost of the startup time of your application\nfind / -name '*.pyc' -delete\n\nfind / -name '*.log' -delete\nfind / -name '.cache' -delete\nrm -rf /var/lib/apt/lists/*\nrm -rf /var/cache/*\n\n# https://askubuntu.com/questions/266738/how-to-truncate-all-logfiles\ntruncate -s 0 /var/log/*log || true\ntruncate -s 0 /var/log/**/*log || true\n"
  },
  {
    "path": "compose/base-images/galaxy-cluster-base/files/cvmfs/default.local",
    "content": "CVMFS_REPOSITORIES=\"data.galaxyproject.org,singularity.galaxyproject.org\"\nCVMFS_HTTP_PROXY=\"DIRECT\"\nCVMFS_QUOTA_LIMIT=\"4000\"\nCVMFS_CACHE_BASE=\"/srv/cvmfs/cache\"\n"
  },
  {
    "path": "compose/base-images/galaxy-cluster-base/files/cvmfs/domain.d/galaxyproject.org.conf",
    "content": "CVMFS_SERVER_URL=\"http://cvmfs1-psu0.galaxyproject.org/cvmfs/@fqrn@;http://cvmfs1-iu0.galaxyproject.org/cvmfs/@fqrn@;http://cvmfs1-tacc0.galaxyproject.org/cvmfs/@fqrn@;http://cvmfs1-mel0.gvl.org.au/cvmfs/@fqrn@;http://cvmfs1-ufr0.galaxyproject.eu/cvmfs/@fqrn@\"\nCVMFS_KEYS_DIR=/etc/cvmfs/keys/galaxyproject.org\nCVMFS_USE_GEOAPI=\"yes\"\n"
  },
  {
    "path": "compose/base-images/galaxy-cluster-base/files/cvmfs/keys/galaxyproject.org/data.galaxyproject.org.pub",
    "content": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5LHQuKWzcX5iBbCGsXGt\n6CRi9+a9cKZG4UlX/lJukEJ+3dSxVDWJs88PSdLk+E25494oU56hB8YeVq+W8AQE\n3LWx2K2ruRjEAI2o8sRgs/IbafjZ7cBuERzqj3Tn5qUIBFoKUMWMSIiWTQe2Sfnj\nGzfDoswr5TTk7aH/FIXUjLnLGGCOzPtUC244IhHARzu86bWYxQJUw0/kZl5wVGcH\nmaSgr39h1xPst0Vx1keJ95AH0wqxPbCcyBGtF1L6HQlLidmoIDqcCQpLsGJJEoOs\nNVNhhcb66OJHah5ppI1N3cZehdaKyr1XcF9eedwLFTvuiwTn6qMmttT/tHX7rcxT\nowIDAQAB\n-----END PUBLIC KEY-----"
  },
  {
    "path": "compose/base-images/galaxy-cluster-base/files/cvmfs/keys/galaxyproject.org/singularity.galaxyproject.org.pub",
    "content": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5LHQuKWzcX5iBbCGsXGt\n6CRi9+a9cKZG4UlX/lJukEJ+3dSxVDWJs88PSdLk+E25494oU56hB8YeVq+W8AQE\n3LWx2K2ruRjEAI2o8sRgs/IbafjZ7cBuERzqj3Tn5qUIBFoKUMWMSIiWTQe2Sfnj\nGzfDoswr5TTk7aH/FIXUjLnLGGCOzPtUC244IhHARzu86bWYxQJUw0/kZl5wVGcH\nmaSgr39h1xPst0Vx1keJ95AH0wqxPbCcyBGtF1L6HQlLidmoIDqcCQpLsGJJEoOs\nNVNhhcb66OJHah5ppI1N3cZehdaKyr1XcF9eedwLFTvuiwTn6qMmttT/tHX7rcxT\nowIDAQAB\n-----END PUBLIC KEY-----"
  },
  {
    "path": "compose/base-images/galaxy-container-base/Dockerfile",
    "content": "FROM buildpack-deps:22.04 as build_apptainer\n\nCOPY ./files/common_cleanup.sh /usr/bin/common_cleanup.sh\n\n# Install Go (only needed for building apptainer)\nENV GO_VERSION=1.22.7\nRUN apt update && apt install --no-install-recommends cryptsetup-bin uuid-dev libseccomp-dev libfuse-dev libfuse3-dev -y \\\n    && wget https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz \\\n    && tar -C /usr/local -xzvf go${GO_VERSION}.linux-amd64.tar.gz \\\n    && rm go${GO_VERSION}.linux-amd64.tar.gz \\\n    && /usr/bin/common_cleanup.sh\n\nENV PATH=/usr/local/go/bin:${PATH}\nENV APPTAINER_VERSION=1.3.4\nRUN wget https://github.com/apptainer/apptainer/releases/download/v${APPTAINER_VERSION}/apptainer-${APPTAINER_VERSION}.tar.gz \\\n    && mkdir -p apptainer \\\n    && tar -xzf apptainer-${APPTAINER_VERSION}.tar.gz --strip-components=1 -C apptainer \\\n    && cd apptainer \\\n    && ./mconfig --with-suid \\\n    && make -C builddir \\\n    && /usr/bin/common_cleanup.sh\n\n\n# --- Final image ---\nFROM ubuntu:22.04 as final\n\nCOPY ./files/common_cleanup.sh /usr/bin/common_cleanup.sh\n\n# Base dependencies\nRUN apt update && apt install --no-install-recommends ca-certificates python3-distutils squashfs-tools tzdata -y \\\n    && /usr/bin/common_cleanup.sh\n\n# Install Docker\nRUN apt update \\\n    && apt install --no-install-recommends docker.io -y \\\n    && /usr/bin/common_cleanup.sh\n\n# Install Apptainer\nCOPY --from=build_apptainer /apptainer /apptainer\nRUN apt update && apt install --no-install-recommends make -y \\\n    && make -C /apptainer/builddir install \\\n    && apt remove make -y \\\n    && rm -rf /apptainer \\\n    && sed -e '/bind path = \\/etc\\/localtime/s/^/#/g' -i /usr/local/etc/apptainer/apptainer.conf \\\n    && /usr/bin/common_cleanup.sh\n"
  },
  {
    "path": "compose/base-images/galaxy-container-base/files/common_cleanup.sh",
    "content": "#!/bin/sh\n\nset -x\n\n# This usually drastically reduced the container size\n# at the cost of the startup time of your application\nfind / -name '*.pyc' -delete\n\nfind / -name '*.log' -delete\nfind / -name '.cache' -delete\nrm -rf /var/lib/apt/lists/*\nrm -rf /var/cache/*\n\n# https://askubuntu.com/questions/266738/how-to-truncate-all-logfiles\ntruncate -s 0 /var/log/*log || true\ntruncate -s 0 /var/log/**/*log || true\n"
  },
  {
    "path": "compose/base_config.yml",
    "content": "gravity:\n  process_manager: supervisor\n  galaxy_root: /galaxy\n  virtualenv: /galaxy/.venv\n  gunicorn:\n    enable: True\n    bind: 0.0.0.0:5555\n    workers: 2\n  celery:\n    enable: true\n    enable_beat: true\n    concurrency: 2\n  handlers:\n    handler:\n      processes: 2\n      pools:\n        - job-handlers\n        - workflow-schedulers\n\ngalaxy:\n  tool_dependency_dir: /tool_deps\n  tool_data_table_config_path: /cvmfs/data.galaxyproject.org/byhand/location/tool_data_table_conf.xml,/cvmfs/data.galaxyproject.org/managed/location/tool_data_table_conf.xml\n  tus_upload_store: /tus_upload_store\n  enable_celery_tasks: true\n  celery_conf:\n    result_backend: redis://redis:6379/0\n\npulsar:\n  conda_auto_init: True\n  conda_auto_install: True\n  tool_dependency_dir: dependencies\n  dependency_resolution:\n    resolvers:\n      - type: conda\n        auto_init: true\n        auto_install: true\n      - type: conda\n        versionless: true\n\n# Probably needs more polishing, but at least it works..\nslurm:\n  SlurmctldHost: \"slurmctld\"\n  AuthType: \"auth/munge\"\n  CryptoType: \"crypto/munge\"\n  MpiDefault: \"none\"\n  ProctrackType: \"proctrack/pgid\"\n  ReturnToService: \"1\"\n  SlurmctldPidFile: \"/var/run/slurmctld.pid\"\n  SlurmctldPort: \"6817\"\n  SlurmdPidFile: \"/var/run/slurmd.pid\"\n  SlurmdPort: \"6818\"\n  SlurmdSpoolDir: \"/tmp/slurmd\"\n  SlurmUser: \"slurm\"\n  StateSaveLocation: \"/tmp/slurm\"\n  SwitchType: \"switch/none\"\n  TaskPlugin: \"task/none\"\n  InactiveLimit: \"0\"\n  KillWait: \"30\"\n  MinJobAge: \"300\"\n  SlurmctldTimeout: \"120\"\n  SlurmdTimeout: \"300\"\n  Waittime: \"0\"\n  SchedulerType: \"sched/backfill\"\n  SelectType: \"select/cons_res\"\n  SelectTypeParameters: \"CR_Core_Memory\"\n  AccountingStorageType: \"accounting_storage/none\"\n  AccountingStoreFlags: \"job_comment\"\n  ClusterName: \"Cluster\"\n  JobCompType: \"jobcomp/none\"\n  JobAcctGatherFrequency: \"30\"\n  JobAcctGatherType: \"jobacct_gather/none\"\n  SlurmctldDebug: info\n  SlurmdDebug: info\n\nhtcondor_galaxy:\n  CONDOR_HOST: \"htcondor-master\"\n  ALLOW_ADMINISTRATOR: \"*\"\n  ALLOW_OWNER: \"*\"\n  ALLOW_READ: \"*\"\n  ALLOW_WRITE: \"*\"\n  ALLOW_CLIENT: \"*\"\n  ALLOW_DAEMON: \"*\"\n  ALLOW_NEGOTIATOR: \"*\"\n  DAEMON_LIST: \"MASTER, SCHEDD\"\n  UID_DOMAIN: \"galaxy\"\n  DISCARD_SESSION_KEYRING_ON_STARTUP: \"False\"\n  TRUST_UID_DOMAIN: \"true\"\n  SEC_PASSWORD_FILE: \"/var/lib/condor/pool_password\"\n  SEC_DAEMON_AUTHENTICATION: \"REQUIRED\"\n  SEC_DAEMON_INTEGRITY: \"REQUIRED\"\n  SEC_DAEMON_AUTHENTICATION_METHODS: \"PASSWORD\"\n  SEC_NEGOTIATOR_AUTHENTICATION: \"REQUIRED\"\n  SEC_NEGOTIATOR_INTEGRITY: \"REQUIRED\"\n  SEC_NEGOTIATOR_AUTHENTICATION_METHODS: \"PASSWORD\"\n  SEC_CLIENT_AUTHENTICATION_METHODS: \"FS, PASSWORD\"\n\nhtcondor_master:\n  BASE_CGROUP: \"\"\n  CONDOR_HOST: \"$(FULL_HOSTNAME)\"\n  DAEMON_LIST: \"MASTER, COLLECTOR, NEGOTIATOR, SCHEDD\"\n  DISCARD_SESSION_KEYRING_ON_STARTUP: \"False\"\n  TRUST_UID_DOMAIN: \"True\"\n  ALLOW_ADMINISTRATOR: \"*\"\n  ALLOW_OWNER: \"*\"\n  ALLOW_READ: \"*\"\n  ALLOW_WRITE: \"*\"\n  ALLOW_NEGOTIATOR: \"*\"\n  ALLOW_NEGOTIATOR_SCHEDD: \"*\"\n  ALLOW_WRITE_COLLECTOR: \"*\"\n  ALLOW_WRITE_STARTD: \"*\"\n  ALLOW_READ_COLLECTOR: \"*\"\n  ALLOW_READ_STARTD: \"*\"\n  ALLOW_CLIENT: \"*\"\n  ALLOW_DAEMON: \"*\"\n  DOCKER_IMAGE_CACHE_SIZE: \"20\"\n  UID_DOMAIN: \"galaxy\"\n  TRUST_UID_DOMAIN: \"TRUE\"\n  SEC_PASSWORD_FILE: \"/var/lib/condor/pool_password\"\n  SEC_DAEMON_AUTHENTICATION: \"REQUIRED\"\n  SEC_DAEMON_INTEGRITY: \"REQUIRED\"\n  SEC_DAEMON_AUTHENTICATION_METHODS: \"PASSWORD\"\n  SEC_NEGOTIATOR_AUTHENTICATION: \"REQUIRED\"\n  SEC_NEGOTIATOR_INTEGRITY: \"REQUIRED\"\n  SEC_NEGOTIATOR_AUTHENTICATION_METHODS: \"PASSWORD\"\n  SEC_CLIENT_AUTHENTICATION_METHODS: \"FS, PASSWORD\"\n\nhtcondor_executor:\n  CONDOR_HOST: \"htcondor-master\"\n  DAEMON_LIST: \"MASTER, STARTD\"\n  DISCARD_SESSION_KEYRING_ON_STARTUP: \"False\"\n  TRUST_UID_DOMAIN: \"true\"\n  NUM_SLOTS: \"1\"\n  NUM_SLOTS_TYPE_1: \"1\"\n  BASE_CGROUP: \"\"\n  ALLOW_ADMINISTRATOR: \"*\"\n  ALLOW_OWNER: \"*\"\n  ALLOW_READ: \"*\"\n  ALLOW_WRITE: \"*\"\n  ALLOW_CLIENT: \"*\"\n  ALLOW_DAEMON: \"*\"\n  ALLOW_NEGOTIATOR_SCHEDD: \"*\"\n  ALLOW_WRITE_COLLECTOR: \"*\"\n  ALLOW_WRITE_STARTD: \"*\"\n  ALLOW_READ_COLLECTOR: \"*\"\n  ALLOW_READ_STARTD: \"*\"\n  UID_DOMAIN: \"galaxy\"\n  SCHED_NAME: \"htcondor-master\"\n  SEC_PASSWORD_FILE: \"/var/lib/condor/pool_password\"\n  SEC_DAEMON_AUTHENTICATION: \"REQUIRED\"\n  SEC_DAEMON_INTEGRITY: \"REQUIRED\"\n  SEC_DAEMON_AUTHENTICATION_METHODS: \"PASSWORD\"\n  SEC_NEGOTIATOR_AUTHENTICATION: \"REQUIRED\"\n  SEC_NEGOTIATOR_INTEGRITY: \"REQUIRED\"\n  SEC_NEGOTIATOR_AUTHENTICATION_METHODS: \"PASSWORD\"\n  SEC_CLIENT_AUTHENTICATION_METHODS: \"FS, PASSWORD\"\n"
  },
  {
    "path": "compose/docker-compose.htcondor.yml",
    "content": "# Extend Galaxy to run jobs using HTCondor.\n# Example: `docker-compose -f docker-compose.yml -f docker-compose.htcondor.yml up`\nservices:\n  galaxy-configurator:\n    environment:\n      - GALAXY_JOB_RUNNER=condor\n      - HTCONDOR_OVERWRITE_CONFIG=true\n    volumes:\n      - ${EXPORT_DIR:-./export}/htcondor:/htcondor\n  htcondor-master:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-htcondor:${IMAGE_TAG:-latest}\n    build: galaxy-htcondor\n    hostname: htcondor-master\n    environment:\n      - HTCONDOR_TYPE=master\n      - HTCONDOR_POOL_PASSWORD=123456789changeme\n    volumes:\n      - ${EXPORT_DIR:-./export}/htcondor:/config\n    networks:\n      - galaxy\n  htcondor-executor:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-htcondor:${IMAGE_TAG:-latest}\n    build: galaxy-htcondor\n    privileged: true\n    environment:\n      - HTCONDOR_TYPE=executor\n      - CONDOR_HOST=htcondor-master\n      - HTCONDOR_POOL_PASSWORD=123456789changeme\n    volumes:\n      - ${EXPORT_DIR:-./export}/htcondor:/config\n      - ${EXPORT_DIR:-./export}/galaxy/database:/galaxy/database\n      - ${EXPORT_DIR:-./export}/galaxy/lib/galaxy/tools:/galaxy/lib/galaxy/tools:ro\n      - ${EXPORT_DIR:-./export}/galaxy/tools:/galaxy/tools:ro\n      - ${EXPORT_DIR:-./export}/galaxy/tool-data:/galaxy/tool-data\n      - ${EXPORT_DIR:-./export}/galaxy/.venv:/galaxy/.venv\n      - ${EXPORT_DIR:-./export}/tool_deps:/tool_deps\n      - /var/run/docker.sock:/var/run/docker.sock\n    networks:\n      - galaxy\n  galaxy-server:\n    volumes:\n      - ${EXPORT_DIR:-./export}/htcondor:/htcondor_config\n"
  },
  {
    "path": "compose/docker-compose.k8s.yml",
    "content": "# Extend Galaxy to run jobs on Kubernetes.\n# This will set up Kubernetes using kind (https://kind.sigs.k8s.io).\n# Note that this extension is not compatible with others like Pulsar, HTCondor, Singularity, etc.\n# Example: `docker-compose -f docker-compose.yml -f docker-compose.k8s.yml up`\nservices:\n  galaxy-configurator:\n    environment:\n      - KIND_OVERWRITE_CONFIG=true\n      - GALAXY_JOB_RUNNER=k8s\n      - GALAXY_KUBECONFIG=/kind/.kube/config_in_docker\n    volumes:\n      - ${EXPORT_DIR:-./export}/kind:/kind\n  galaxy-server:\n    volumes:\n      - ${EXPORT_DIR:-./export}/kind:/kind\n    networks:\n      - kind\n  galaxy-kind:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-kind:${IMAGE_TAG:-latest}\n    build: galaxy-kind\n    privileged: true\n    volumes:\n      - ${EXPORT_DIR:-./export}/kind:/kind\n      - /var/run/docker.sock:/var/run/docker.sock\n    networks:\n      - galaxy\n      - kind\nnetworks:\n  kind:\n    name: kind\n"
  },
  {
    "path": "compose/docker-compose.pulsar.mq.yml",
    "content": "# Extend Pulsar to use RabbitMQ (Message Queue) instead of the REST API\n# for communicating with Galaxy.\n# Requirements: `docker-compose.pulsar.yml`\n# Example: `docker-compose -f docker-compose.yml -f docker-compose.pulsar.yml -f docker-compose.pulsar.mq.yml up`\nservices:\n  galaxy-configurator:\n    environment:\n      - GALAXY_JOB_RUNNER=pulsar_mq\n      - PULSAR_CONFIG_MESSAGE_QUEUE_URL=amqp://pulsar:8jfqi9uo2i30fqoifqfo09@pulsar-rabbitmq/pulsar\n      - PULSAR_GALAXY_URL=http://nginx:80\n  pulsar-rabbitmq:\n    image: rabbitmq:alpine\n    container_name: pulsar-rabbitmq\n    hostname: pulsar-rabbitmq\n    environment:\n      - RABBITMQ_DEFAULT_USER=pulsar\n      - RABBITMQ_DEFAULT_PASS=8jfqi9uo2i30fqoifqfo09\n      - RABBITMQ_DEFAULT_VHOST=pulsar\n    volumes:\n      - ${EXPORT_DIR:-./export}/pulsar_rabbitmq:/var/lib/rabbitmq:delegated\n    networks:\n      - galaxy\n"
  },
  {
    "path": "compose/docker-compose.pulsar.yml",
    "content": "# Extend Galaxy to run jobs using Pulsar. With this setup, you\n# don't need to share the `/galaxy/database` path with Galaxy.\n# Galaxy will send all the needed files for Pulsar, and Pulsar\n# will handle the rest locally on its side.\n# This docker-compose file enables for Galaxy and Pulsar to\n# communicate over HTTP. To enable the MQ, concatenate the\n# docker-compose.pulsar.mq.yml after this one.\n# Example: `docker-compose -f docker-compose.yml -f docker-compose.pulsar.yml up`\nservices:\n  galaxy-configurator:\n    environment:\n      - GALAXY_JOB_RUNNER=pulsar_rest\n      - GALAXY_PULSAR_TRANSPORT=${GALAXY_PULSAR_TRANSPORT:-curl}\n      - PULSAR_OVERWRITE_CONFIG=true\n      - PULSAR_JOB_RUNNER=local\n      - PULSAR_CONFIG_PRIVATE_TOKEN=changemeinproduction\n      - GALAXY_PULSAR_URL=http://pulsar:8913\n    volumes:\n      - ${EXPORT_DIR:-./export}/pulsar/config:/pulsar/config\n  pulsar:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/pulsar:${IMAGE_TAG:-latest}\n    build: pulsar\n    hostname: pulsar\n    privileged: true\n    volumes:\n      - ${EXPORT_DIR:-./export}/pulsar/config:/pulsar/config\n      - ${EXPORT_DIR:-./export}/pulsar/dependencies:/pulsar/dependencies\n      - ${EXPORT_DIR:-./export}/galaxy/database:/galaxy/database\n      - ${EXPORT_DIR:-./export}/galaxy/tool-data:/galaxy/tool-data\n    networks:\n      - galaxy\n"
  },
  {
    "path": "compose/docker-compose.singularity.yml",
    "content": "# Extend Galaxy to use Singularity for dependency resolution.\n# This is working with the base Galaxy, but also in combination\n# with different job runners, like HTCondor, or Slurm\n# (Pulsar is still WIP).\n# Examples:\n#  * `docker-compose -f docker-compose.yml -f docker-compose.singularity.yml up`\n#  * `docker-compose -f docker-compose.yml -f docker-compose.slurm.yml -f docker-compose.singularity.yml up`\nservices:\n  galaxy-configurator:\n    environment:\n      - GALAXY_DEPENDENCY_RESOLUTION=singularity\n      - GALAXY_CONFIG_CONDA_AUTO_INSTALL=false\n"
  },
  {
    "path": "compose/docker-compose.slurm.yml",
    "content": "# Extend Galaxy to run jobs using Slurm.\n# Example: `docker-compose -f docker-compose.yml -f docker-compose.slurm.yml up`\nservices:\n  galaxy-configurator:\n    environment:\n      - GALAXY_JOB_RUNNER=slurm\n      - SLURM_OVERWRITE_CONFIG=true\n      - SLURM_NODE_COUNT=${SLURM_NODE_COUNT:-1}\n      - SLURM_NODE_HOSTNAME=compose_slurm_node\n    volumes:\n      - ${EXPORT_DIR:-./export}/slurm_config:/etc/slurm\n  galaxy-server:\n    volumes:\n      - ${EXPORT_DIR:-./export}/munge:/etc/munge\n      - ${EXPORT_DIR:-./export}/slurm_config:/etc/slurm\n  slurmctld:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-slurm:${IMAGE_TAG:-latest}\n    build: galaxy-slurm\n    command: [\"slurmctld\"]\n    hostname: slurmctld\n    volumes:\n      - ${EXPORT_DIR:-./export}/slurm_config:/etc/slurm\n      - ${EXPORT_DIR:-./export}/munge:/etc/munge\n    networks:\n      - galaxy\n  slurm_node_discovery:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-slurm-node-discovery:${IMAGE_TAG:-latest}\n    build: galaxy-slurm-node-discovery\n    volumes:\n      - ${EXPORT_DIR:-./export}/slurm_config:/etc/slurm\n      - /var/run/docker.sock:/var/run/docker.sock\n  slurm_node:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-slurm:${IMAGE_TAG:-latest}\n    build: galaxy-slurm\n    command: [\"slurmd\"]\n    privileged: true\n    labels:\n      slurm_node: true\n    volumes:\n      - ${EXPORT_DIR:-./export}/galaxy/database:/galaxy/database\n      - ${EXPORT_DIR:-./export}/galaxy/tools:/galaxy/tools:ro\n      - ${EXPORT_DIR:-./export}/galaxy/lib/galaxy/tools:/galaxy/lib/galaxy/tools:ro\n      - ${EXPORT_DIR:-./export}/galaxy/tool-data:/galaxy/tool-data\n      - ${EXPORT_DIR:-./export}/galaxy/.venv:/galaxy/.venv\n      - ${EXPORT_DIR:-./export}/tool_deps:/tool_deps\n      - ${EXPORT_DIR:-./export}/slurm_config:/etc/slurm\n      - ${EXPORT_DIR:-./export}/munge:/etc/munge\n      - /var/run/docker.sock:/var/run/docker.sock\n    networks:\n      - galaxy\n"
  },
  {
    "path": "compose/docker-compose.yml",
    "content": "services:\n  galaxy-server:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-server:${IMAGE_TAG:-latest}\n    build: galaxy-server\n    environment:\n      - GALAXY_DEFAULT_ADMIN_USER=admin\n      - GALAXY_DEFAULT_ADMIN_EMAIL=admin@galaxy.org\n      - GALAXY_DEFAULT_ADMIN_PASSWORD=password\n      - GALAXY_DEFAULT_ADMIN_KEY=fakekey\n      - HTCONDOR_POOL_PASSWORD=123456789changeme\n    hostname: galaxy-server\n    privileged: True\n    volumes:\n      # This is the directory where all your files from Galaxy will be stored\n      # on your host system\n      - ${EXPORT_DIR:-./export}/:/export/:delegated\n      - ${EXPORT_DIR:-./export}/tus_upload_store:/tus_upload_store:delegated\n      - /var/run/docker.sock:/var/run/docker.sock\n    depends_on:\n      - postgres\n      - rabbitmq\n      - redis\n      - rustus\n    networks:\n      - galaxy\n  # The galaxy-configurator is responsible for the whole configuration of\n  # your setup and should be the central place of configuration.\n  galaxy-configurator:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-configurator:${IMAGE_TAG:-latest}\n    build: galaxy-configurator\n    environment:\n      - EXPORT_DIR=${EXPORT_DIR:-./export}\n      - HOST_PWD=$PWD\n      - GALAXY_OVERWRITE_CONFIG=true\n      - GALAXY_DEPENDENCY_RESOLUTION=conda\n      - GALAXY_JOB_RUNNER=local\n      - GALAXY_CONFIG_ADMIN_USERS=admin@galaxy.org\n      - GALAXY_CONFIG_DATABASE_CONNECTION=postgresql://galaxy:chaopagoosaequuashie@postgres/galaxy\n      - GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL:-http://localhost}\n      - GALAXY_CONFIG_CONDA_AUTO_INSTALL=true\n      - GALAXY_CONFIG_AMQP_INTERNAL_CONNECTION=amqp://galaxy:vaiJa3ieghai2ief0jao@rabbitmq/galaxy\n      - GALAXY_PROXY_PREFIX=${GALAXY_PROXY_PREFIX:-}\n      - GALAXY_CONFIG_CLEANUP_JOB=onsuccess\n      - NGINX_OVERWRITE_CONFIG=true\n    volumes:\n      - ${EXPORT_DIR:-./export}/galaxy/config:/galaxy/config\n      - ${EXPORT_DIR:-./export}/nginx:/etc/nginx\n      - ./base_config.yml:/base_config.yml\n      - ./galaxy-configurator/templates:/templates\n  # The database for Galaxy\n  postgres:\n    image: postgres:15\n    hostname: postgres\n    environment:\n      - POSTGRES_PASSWORD=chaopagoosaequuashie\n      - POSTGRES_USER=galaxy\n      - POSTGRES_DB=galaxy\n    volumes:\n      - ${EXPORT_DIR:-./export}/postgres/:/var/lib/postgresql/data:delegated\n    networks:\n      - galaxy\n  # The proxy server. All web-traffic is going through here, so we can\n  # offload static file serving\n  # (https://docs.galaxyproject.org/en/master/admin/production.html#using-a-proxy-server)\n  nginx:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-nginx:${IMAGE_TAG:-latest}\n    build: galaxy-nginx\n    ports:\n      - 80:80\n    volumes:\n      - ${EXPORT_DIR:-./export}/nginx:/config:ro\n      - ${EXPORT_DIR:-./export}/galaxy/static:/export/galaxy/static:ro\n      - ${EXPORT_DIR:-./export}/galaxy/config/plugins:/galaxy/config/plugins:ro\n    depends_on:\n      - galaxy-server\n    networks:\n      - galaxy\n  # Message queue for better performance\n  rabbitmq:\n    image: rabbitmq:alpine\n    container_name: galaxy-rabbitmq\n    hostname: rabbitmq\n    environment:\n      - RABBITMQ_DEFAULT_USER=galaxy\n      - RABBITMQ_DEFAULT_PASS=vaiJa3ieghai2ief0jao\n      - RABBITMQ_DEFAULT_VHOST=galaxy\n    volumes:\n      - ${EXPORT_DIR:-./export}/rabbitmq:/var/lib/rabbitmq:delegated\n    networks:\n      - galaxy\n  # Backend for Celery\n  redis:\n    image: redis:alpine\n    container_name: galaxy-redis\n    hostname: redis\n    volumes:\n      - ${EXPORT_DIR:-./export}/redis:/data:delegated\n    networks:\n      - galaxy\n  # For file uploads\n  rustus:\n    image: s3rius/rustus:0.7.6-alpine\n    container_name: galaxy-rustus\n    hostname: rustus\n    environment:\n      - RUSTUS_STORAGE=file-storage\n      - RUSTUS_DATA_DIR=/data/\n      - RUSTUS_URL=${GALAXY_PROXY_PREFIX:-}/api/upload/resumable_upload\n      - RUSTUS_HOOKS_HTTP_URLS=http://nginx${GALAXY_PROXY_PREFIX:-}/api/upload/hooks\n      - RUSTUS_HOOKS_HTTP_PROXY_HEADERS=X-Api-Key,Cookie\n      - RUSTUS_HOOKS=pre-create\n      - RUSTUS_HOOKS_FORMAT=tusd\n      - RUSTUS_INFO_STORAGE=redis-info-storage\n      - RUSTUS_INFO_DB_DSN=redis://redis:6379/1\n      - RUSTUS_MAX_BODY_SIZE=20000000\n      - RUSTUS_BEHIND_PROXY=true\n    volumes:\n      - ${EXPORT_DIR:-./export}/tus_upload_store:/data:delegated\n    depends_on:\n      - redis\n    networks:\n      - galaxy\nnetworks:\n  galaxy:\n"
  },
  {
    "path": "compose/galaxy-configurator/Dockerfile",
    "content": "FROM alpine:3.17\n\nRUN apk add --no-cache bash python3 py3-pip \\\n    && pip3 install j2cli[yaml] jinja2-ansible-filters\n\nCOPY ./templates /templates\nCOPY ./customize.py /customize.py\nCOPY ./run.sh /usr/bin/run.sh\n\nENTRYPOINT \"/usr/bin/run.sh\"\n"
  },
  {
    "path": "compose/galaxy-configurator/customize.py",
    "content": "import os\n\n\ndef j2_environment_params():\n    \"\"\" Extra parameters for the Jinja2 Environment\n    Add AnsibleCoreFiltersExtension for filters known in Ansible\n    like `to_nice_yaml`\n    \"\"\"\n    return dict(\n        extensions=('jinja2_ansible_filters.AnsibleCoreFiltersExtension',),\n    )\n\n\ndef alter_context(context):\n    \"\"\"\n    Translates env variables that start with a specific prefix\n    and combines them into one dict (like all GALAXY_CONFIG_*\n    are stored at galaxy.*).\n    Variables that are stored in an input file overwrite\n    the input from env.\n\n    TODO: Unit test\n    \"\"\"\n    new_context = dict(os.environ)\n\n    translations = {\n      \"GALAXY_CONFIG_\":         \"galaxy\",\n      \"GRAVITY_CONFIG_\":        \"gravity\",\n      \"GALAXY_JOB_METRICS_\":    \"galaxy_job_metrics\",\n      \"NGINX_CONFIG_\":          \"nginx\",\n      \"SLURM_CONFIG_\":          \"slurm\",\n      \"HTCONDOR_GALAXY_\":       \"htcondor_galaxy\",\n      \"HTCONDOR_MASTER_\":       \"htcondor_master\",\n      \"HTCONDOR_EXECUTOR_\":     \"htcondor_executor\",\n      \"PULSAR_CONFIG_\":         \"pulsar\"\n    }\n\n    # Add values from possible input file if existent\n    if context is not None and len(context) > 0:\n        new_context.update(context)\n\n    # Translate string-boolean to Python boolean\n    for key, value in new_context.items():\n        if not isinstance(value, str):\n            continue\n        if value.lower() == \"true\":\n            new_context[key] = True\n        elif value.lower() == \"false\":\n            new_context[key] = False\n\n    for to in translations.values():\n        if to not in new_context:\n            new_context[to] = {}\n\n    for key, value in new_context.items():\n        for frm, to in translations.items():\n            if key.startswith(frm):\n                # Format key depending on it being uppercase or not\n                # (to cope with different formatings: compare Slurm\n                # with Galaxy)\n                key = key[len(frm):]\n                if key.isupper():\n                    key = key.lower()\n\n                new_context[to][key] = value\n\n    context = new_context\n\n    # Set HOST_EXPORT_DIR depending on EXPORT_DIR being absolute or relative\n    if \"HOST_EXPORT_DIR\" not in context and \"EXPORT_DIR\" in context \\\n            and \"HOST_PWD\" in context:\n        if context[\"EXPORT_DIR\"].startswith(\"./\"):\n            context[\"HOST_EXPORT_DIR\"] = context[\"HOST_PWD\"] \\\n                                         + context[\"EXPORT_DIR\"][1:]\n        else:\n            context[\"HOST_EXPORT_DIR\"] = context[\"EXPORT_DIR\"]\n\n    return context\n"
  },
  {
    "path": "compose/galaxy-configurator/run.sh",
    "content": "#!/bin/bash\n\n# Set default config dirs\nexport GALAXY_CONF_DIR=${GALAXY_CONF_DIR:-/galaxy/config} \\\n       NGINX_CONF_DIR=${NGINX_CONF_DIR:-/etc/nginx/} \\\n       SLURM_CONF_DIR=${SLURM_CONF_DIR:-/etc/slurm} \\\n       HTCONDOR_CONF_DIR=${HTCONDOR_CONF_DIR:-/htcondor} \\\n       PULSAR_CONF_DIR=${PULSAR_CONF_DIR:-/pulsar/config} \\\n       KIND_CONF_DIR=${KIND_CONF_DIR:-/kind}\n\necho \"Locking all configurations\"\nlocks=(\"$GALAXY_CONF_DIR\" \"$SLURM_CONF_DIR\" \"$HTCONDOR_CONF_DIR\" \"$PULSAR_CONF_DIR\" \"$KIND_CONF_DIR\")\nfor lock in \"${locks[@]}\"; do\n  echo \"Locking $lock\"\n  touch \"${lock}/configurator.lock\"\ndone\n\n# Nginx configuration\nif [ \"$NGINX_OVERWRITE_CONFIG\" != \"true\" ]; then\n  echo \"NGINX_OVERWRITE_CONFIG is not true. Skipping configuration of Nginx\"\nelse\n  nginx_configs=( \"nginx.conf\" )\n\n  for conf in \"${nginx_configs[@]}\"; do\n    echo \"Configuring $conf\"\n    j2 --customize /customize.py --undefined -o \"/tmp/$conf\" \"/templates/nginx/$conf.j2\" /base_config.yml\n    echo \"The following changes will be applied to $conf:\"\n    diff \"${NGINX_CONF_DIR}/$conf\" \"/tmp/$conf\"\n    mv -f \"/tmp/$conf\" \"${NGINX_CONF_DIR}/$conf\"\n  done\nfi\n\n# Slurm configuration\nif [ \"$SLURM_OVERWRITE_CONFIG\" != \"true\" ]; then\n  echo \"SLURM_OVERWRITE_CONFIG is not true. Skipping configuration of Slurm\"\nelse\n  slurm_configs=( \"slurm.conf\" )\n\n  for conf in \"${slurm_configs[@]}\"; do\n    echo \"Configuring $conf\"\n    j2 --customize /customize.py --undefined -o \"/tmp/$conf\" \"/templates/slurm/$conf.j2\" /base_config.yml\n    echo \"The following changes will be applied to $conf:\"\n    diff \"${SLURM_CONF_DIR}/$conf\" \"/tmp/$conf\"\n    mv -f \"/tmp/$conf\" \"${SLURM_CONF_DIR}/$conf\"\n  done\n\n  rm \"${SLURM_CONF_DIR}/configurator.lock\"\n  echo \"Lock for Slurm config released\"\nfi\n\n# HTCondor configuration\nif [ \"$HTCONDOR_OVERWRITE_CONFIG\" != \"true\" ]; then\n  echo \"HTCONDOR_OVERWRITE_CONFIG is not true. Skipping configuration of HTCondor\"\nelse\n  htcondor_configs=( \"galaxy.conf\" \"master.conf\" \"executor.conf\" )\n\n  for conf in \"${htcondor_configs[@]}\"; do\n    echo \"Configuring $conf\"\n    j2 --customize /customize.py --undefined -o \"/tmp/$conf\" \"/templates/htcondor/$conf.j2\" /base_config.yml\n    echo \"The following changes will be applied to $conf:\"\n    diff \"${HTCONDOR_CONF_DIR}/$conf\" \"/tmp/$conf\"\n    mv -f \"/tmp/$conf\" \"${HTCONDOR_CONF_DIR}/$conf\"\n  done\n\n  rm \"${HTCONDOR_CONF_DIR}/configurator.lock\"\n  echo \"Lock for HTCondor config released\"\nfi\n\n# Pulsar configuration\nif [ \"$PULSAR_OVERWRITE_CONFIG\" != \"true\" ]; then\n  echo \"PULSAR_OVERWRITE_CONFIG is not true. Skipping configuration of Pulsar\"\nelse\n  pulsar_configs=( \"server.ini\" \"app.yml\" )\n\n  for conf in \"${pulsar_configs[@]}\"; do\n    echo \"Configuring $conf\"\n    j2 --customize /customize.py --undefined -o \"/tmp/$conf\" \"/templates/pulsar/$conf.j2\" /base_config.yml\n    echo \"The following changes will be applied to $conf:\"\n    diff \"${PULSAR_CONF_DIR}/$conf\" \"/tmp/$conf\"\n    mv -f \"/tmp/$conf\" \"${PULSAR_CONF_DIR}/$conf\"\n  done\n\n  rm \"${PULSAR_CONF_DIR}/configurator.lock\"\n  echo \"Lock for Pulsar config released\"\nfi\n\n# Kind configuration\nif [ \"$KIND_OVERWRITE_CONFIG\" != \"true\" ]; then\n  echo \"KIND_OVERWRITE_CONFIG is not true. Skipping configuration of Kind\"\nelse\n  kind_configs=( \"kind_config.yml\" \"k8s_config/persistent_volumes.yml\" \"k8s_config/pv_claims.yml\" )\n  mkdir /tmp/k8s_config\n  mkdir \"${KIND_CONF_DIR}/k8s_config\"\n\n  for conf in \"${kind_configs[@]}\"; do\n    echo \"Configuring $conf\"\n    j2 --customize /customize.py --undefined -o \"/tmp/$conf\" \"/templates/kind/$conf.j2\" /base_config.yml\n\n    echo \"The following changes will be applied to $conf:\"\n    diff \"${KIND_CONF_DIR}/$conf\" \"/tmp/$conf\"\n    mv -f \"/tmp/$conf\" \"${KIND_CONF_DIR}/$conf\"\n  done\n\n  rm \"${KIND_CONF_DIR}/configurator.lock\"\n  echo \"Lock for Kind config released\"\n  sleep 5\n  echo \"Waiting for Kind to create the cluster\"\n  until [ -f \"${GALAXY_KUBECONFIG:-${KIND_CONF_DIR}/.kube/config_in_docker}\" ] && echo Found KUBECONFIG; do\n    sleep 0.1;\n  done;\n  chmod a+r \"${GALAXY_KUBECONFIG:-${KIND_CONF_DIR}/.kube/config_in_docker}\"\nfi\n\necho \"Releasing all locks (except Galaxy) if it didn't happen already\"\nlocks=(\"$SLURM_CONF_DIR\" \"$HTCONDOR_CONF_DIR\" \"$PULSAR_CONF_DIR\" \"$KIND_CONF_DIR\")\nfor lock in \"${locks[@]}\"; do\n  echo \"Unlocking $lock\"\n  rm \"${lock}/configurator.lock\"\ndone\n\n# Galaxy configuration\nif [ \"$GALAXY_OVERWRITE_CONFIG\" != \"true\" ]; then\n  echo \"GALAXY_OVERWRITE_CONFIG is not true. Skipping configuration of Galaxy\"\n  echo \"Lock for Galaxy config released\"\n  rm \"${GALAXY_CONF_DIR}/configurator.lock\"\n  exit 0\nfi\n\ncd \"${GALAXY_CONF_DIR}\" || { echo \"Error: Could not find Galaxy config dir\"; exit 1; }\n\necho \"Waiting for Galaxy config dir to be initially populated (in case of first startup)\"\nuntil [ \"$(ls -p | grep -v /)\" != \"\" ] && echo Galaxy config populated; do\n  sleep 0.5;\ndone;\n\nif [ ! -f /base_config.yml ]; then\n  echo \"Warning: 'base_config.yml' does not exist. Configuration will solely happen through env!\"\n  touch /base_config.yml\nfi\n\ngalaxy_configs=( \"job_conf.xml\" \"galaxy.yml\" \"job_metrics.xml\" \"container_resolvers_conf.yml\" \"dependency_resolvers_conf.xml\" \"GALAXY_PROXY_PREFIX.txt\" )\n\nfor conf in \"${galaxy_configs[@]}\"; do\n  echo \"Configuring $conf\"\n  j2 --customize /customize.py --undefined -o \"/tmp/$conf\" \"/templates/galaxy/$conf.j2\" /base_config.yml\n  echo \"The following changes will be applied to $conf:\"\n  diff \"${GALAXY_CONF_DIR}/$conf\" \"/tmp/$conf\"\n  mv -f \"/tmp/$conf\" \"${GALAXY_CONF_DIR}/$conf\"\ndone\n\necho \"Finished configuring Galaxy\"\necho \"Lock for Galaxy config released\"\nrm \"${GALAXY_CONF_DIR}/configurator.lock\"\n\nif [ \"$DONT_EXIT\" = \"true\" ]; then\n  echo \"Integration test detected. Galaxy Configurator will go to sleep (to not interrupt docker-compose).\"\n  sleep infinity\nfi\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/galaxy/GALAXY_PROXY_PREFIX.txt.j2",
    "content": "{{ GALAXY_PROXY_PREFIX }}\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/galaxy/container_resolvers_conf.yml.j2",
    "content": "# Resolvers that are potentially used by default are uncommented (comments describe under \n# which premises they are in the defaults).\n\n# Note that commented yaml does not have a space after the #\n# while additional explanations do.\n\n# Explicit container resolvers\n# ============================\n\n# get a container description (URI) for an explicit singularity container requirement\n- type: explicit_singularity\n\n# get a cached container description (path) for singularity\n# pulls the container into a cache directory if not yet there\n- type: cached_explicit_singularity\n  # set the cache directory for storing images\n  #cache_directory: database/container_cache/singularity/explicit\n\n# Mulled container resolvers\n# ==========================\n\n# The following uncommented container resolvers are in the defaults\n# if ``enable_mulled_containers`` is set in ``galaxy.yml`` (which is the default).\n\n# get a container description for a cached mulled singularity container\n# checks if the image file exists in `cache_directory`\n- type: cached_mulled_singularity\n  #\n  #cache_directory: database/container_cache/singularity/mulled\n  #\n  # the method for caching directory listings (not the method for image caching)\n  # can be uncached or dir_mtime (the latter only determines the directory listing\n  # if the modification time of the directory changed)\n  #cache_directory_cacher_type: uncached\n\n# Resolves container images from quay.io/NAMESPACE/MULLED_HASH where the\n# mulled hash describes which packages and versions should be in the container\n#\n# These resolvers are generally listed after the cached_* resolvers, so that images\n# are not pulled if they are already cached.\n#\n# When pulling the image file will be stored in the configured cache dir.\n# If auto_install is True the result will point to the cached image file\n# and to quay.io/NAMESPACE/MULLED_HASH otherwise.\n- type: mulled_singularity\n  auto_install: False\n  #namespace: biocontainers\n  # In addition to the arguments of `mulled` there are cache_directory\n  # and cache_directory_cacher_type. See the description at `cached_explicit_singularity`\n  # and note the minor difference in the default for `cache_directory`\n  #cache_directory: database/container_cache/singularity/mulled\n  #cache_directory_cacher_type: uncached\n\n# Building container resolvers\n# ----------------------------\n#\n# The following uncommented container resolvers are included in the default\n# if ``docker`` is available\n\n- type: build_mulled_singularity\n  auto_install: False\n  #hash_func: v2\n  #cache_directory: database/container_cache/singularity/mulled\n  #cache_directory_cacher_type: uncached\n\n# Other explicit container resolvers\n# ----------------------------------\n\n#-type: fallback_singularity\n  #identifier: A_VALID_CONTAINER_IDENTIFIER\n#-type: fallback_no_requirements_singularity\n  #identifier: A_VALID_CONTAINER_IDENTIFIER\n#-type: requires_galaxy_environment_singularity\n  #identifier: A_VALID_CONTAINER_IDENTIFIER\n\n# The mapping container resolver allows to specify a list of mappings from tools\n# (tool_id) to containers (type and identifier).\n\n#-type: mapping\n  #mappings:\n  #- container_type: singularity\n     #tool_id: A_TOOL_ID\n     #identifier: A_VALID_CONTAINER_IDENTIFIER\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/galaxy/dependency_resolvers_conf.xml.j2",
    "content": "<dependency_resolvers>\n  {% if GALAXY_DEPENDENCY_RESOLUTION != 'singularity' %}\n  <!-- the default configuration, first look for dependencies installed from the toolshed -->\n  <tool_shed_packages />\n  <!-- then look for env.sh files in directories according to the \"galaxy packages\" schema.\n       These resolvers can take a base_path attribute to specify where to look for\n       package definitions, but by default look in the directory specified by tool_dependency_dir\n       in Galaxy's config/galaxy.ini -->\n  <galaxy_packages />\n  <!-- check whether the correct version has been installed via conda -->\n  <conda />\n  <!-- look for any version of the dependency installed via conda -->\n  <conda versionless=\"true\" />\n  <!-- look for a \"default\" symlink pointing to a directory containing an\n       env.sh file for the package in the \"galaxy packages\" schema -->\n  <galaxy_packages versionless=\"true\" />\n  {% endif %}\n</dependency_resolvers>\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/galaxy/galaxy.yml.j2",
    "content": "gravity:\n{{ gravity | to_nice_yaml(indent=2) | indent(2, first=True) }}\n\ngalaxy:\n{{ galaxy | to_nice_yaml(indent=2) | indent(2, first=True) }}\n\n  {% if GALAXY_PROXY_PREFIX %}\n  galaxy_url_prefix: /{{ GALAXY_PROXY_PREFIX | regex_replace(\"^/\", \"\") | regex_replace(\"/$\", \"\") }}\n  {% endif %}\n\n  {% if GALAXY_DEPENDENCY_RESOLUTION == 'singularity' %}\n  enable_mulled_containers: true\n  containers_resolvers_config_file: container_resolvers_conf.yml\n  {% endif %}\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/galaxy/job_conf.xml.j2",
    "content": "<?xml version=\"1.0\"?>\n<!-- A sample job config that explicitly configures job running the way it is configured by default (if there is no explicit config). -->\n<job_conf>\n    <plugins>\n        <plugin id=\"local\" type=\"runner\" load=\"galaxy.jobs.runners.local:LocalJobRunner\" workers=\"4\"/>\n        <plugin id=\"condor\" type=\"runner\" load=\"galaxy.jobs.runners.condor:CondorJobRunner\"/>\n        <plugin id=\"slurm\" type=\"runner\" load=\"galaxy.jobs.runners.slurm:SlurmJobRunner\">\n            <param id=\"drmaa_library_path\">/usr/lib/slurm-drmaa/lib/libdrmaa.so</param>\n        </plugin>\n        <plugin id=\"pulsar_rest\" type=\"runner\" load=\"galaxy.jobs.runners.pulsar:PulsarRESTJobRunner\">\n            <param id=\"transport\">{{ GALAXY_PULSAR_TRANSPORT | default('curl') }}</param>\n        </plugin>\n        {% if GALAXY_JOB_RUNNER == 'pulsar_mq' -%}\n        <plugin id=\"pulsar_mq\" type=\"runner\" load=\"galaxy.jobs.runners.pulsar:PulsarMQJobRunner\">\n            <param id=\"galaxy_url\">{{ PULSAR_GALAXY_URL }}</param>\n            <param id=\"amqp_url\">{{ PULSAR_CONFIG_MESSAGE_QUEUE_URL}}</param>\n            <param id=\"amqp_acknowledge\">True</param>\n            <param id=\"amqp_ack_republish_time\">30</param>\n            <param id=\"amqp_publish_retry\">True</param>\n        </plugin>\n        {% endif -%}\n        {% if GALAXY_JOB_RUNNER == 'k8s' -%}\n        <plugin id=\"k8s\" type=\"runner\" load=\"galaxy.jobs.runners.kubernetes:KubernetesJobRunner\">\n          <param id=\"k8s_config_path\">{{ GALAXY_KUBECONFIG }}</param>\n          <param id=\"k8s_persistent_volume_claims\">{{ GALAXY_K8S_PVC | default('galaxy-root:/galaxy,galaxy-database:/galaxy/database,galaxy-tool-deps:/tool_deps') }}</param>\n        </plugin>\n        {% endif -%}\n    </plugins>\n    <handlers assign_with=\"db-skip-locked\" />\n    <destinations default=\"{{ GALAXY_DEPENDENCY_RESOLUTION | default('conda') }}_{{ GALAXY_JOB_RUNNER | default('local') }}\">\n        <destination id=\"local\" runner=\"local\">\n            <env file=\"/galaxy/.venv/bin/activate\" />\n        </destination>\n        <destination id=\"{{ GALAXY_DEPENDENCY_RESOLUTION | default('conda') }}_{{ GALAXY_JOB_RUNNER | default('local') }}\" runner=\"{{ GALAXY_JOB_RUNNER | default('local') }}\">\n            {% if GALAXY_DEPENDENCY_RESOLUTION == 'singularity' -%}\n              <env file=\"/galaxy/.venv/bin/activate\" />\n              <env id=\"HOME\">/home/galaxy</env>\n              <env id=\"LC_ALL\">C</env>\n              <env id=\"APPTAINER_CACHEDIR\">/tmp/singularity</env>\n              <env id=\"APPTAINER_TMPDIR\">/tmp</env>\n              <param id=\"singularity_enabled\">true</param>\n              {% if GALAXY_JOB_RUNNER == 'local' -%}\n                <param id=\"singularity_volumes\">{{ EXPORT_DIR | regex_replace(\"^.\", \"\") }}/$galaxy_root:$galaxy_root:ro,{{ EXPORT_DIR | regex_replace(\"^.\", \"\") }}/$galaxy_root/database/tmp:$galaxy_root/database/tmp:rw,{{ EXPORT_DIR | regex_replace(\"^.\", \"\") }}/$tool_directory:$tool_directory:ro,{{ EXPORT_DIR | regex_replace(\"^.\", \"\") }}/$job_directory:$job_directory:rw,{{ EXPORT_DIR | regex_replace(\"^.\", \"\") }}/$working_directory:$working_directory:rw,{{ EXPORT_DIR | regex_replace(\"^.\", \"\") }}/$default_file_path:$default_file_path:rw</param>\n              {% endif -%}\n            {% elif GALAXY_DEPENDENCY_RESOLUTION == 'docker' -%}\n              <param id=\"docker_enabled\">true</param>\n              <param id=\"docker_sudo\">false</param>\n              <param id=\"docker_set_user\"></param>\n              {% if GALAXY_JOB_RUNNER == 'local' -%}\n                <param id=\"docker_volumes\">{{ HOST_EXPORT_DIR }}/$galaxy_root:$galaxy_root:ro,{{ HOST_EXPORT_DIR }}/$galaxy_root/database/tmp:$galaxy_root/database/tmp:rw,{{ HOST_EXPORT_DIR }}/$tool_directory:$tool_directory:ro,{{ HOST_EXPORT_DIR }}/$job_directory:$job_directory:rw,{{ HOST_EXPORT_DIR }}/$working_directory:$working_directory:rw,{{ HOST_EXPORT_DIR }}/$default_file_path:$default_file_path:rw</param>\n              {% endif -%}\n            {% elif not GALAXY_JOB_RUNNER.startswith('pulsar') and GALAXY_JOB_RUNNER != 'k8s' -%}\n              <env file=\"/galaxy/.venv/bin/activate\" />\n            {% endif -%}\n            {% if GALAXY_JOB_RUNNER == 'pulsar_rest' -%}\n              <param id=\"url\">{{ GALAXY_PULSAR_URL }}</param>\n              <param id=\"private_token\">{{ PULSAR_CONFIG_PRIVATE_TOKEN }}</param>\n              <param id=\"dependency_resolution\">remote</param>\n            {% endif -%}\n            {% if GALAXY_JOB_RUNNER == 'pulsar_mq' -%}\n              <param id=\"jobs_directory\">{{ PULSAR_JOBS_DIRECTORY | default('/pulsar/files/staging/') }}</param>\n            {% endif -%}\n            {% if GALAXY_JOB_RUNNER == 'k8s' -%}\n              <param id=\"docker_repo_default\">{{ GALAXY_K8S_DOCKER_REPO_DEFAULT | default('docker.io') }}</param>\n              {% if GALAXY_K8S_DOCKER_OWNER_DEFAULT -%}<param id=\"docker_owner_default\">{{ GALAXY_K8S_DOCKER_OWNER_DEFAULT }}</param>{% endif -%}\n              <param id=\"docker_image_default\">{{ GALAXY_K8S_DOCKER_IMAGE_DEFAULT | default('python') }}</param>\n              <param id=\"docker_tag_default\">{{ GALAXY_K8S_DOCKER_TAG_DEFAULT | default('3.10.15') }}</param>\n              <param id=\"docker_enabled\">true</param>\n            {% endif -%}\n        </destination>\n    </destinations>\n    <tools>\n        <tool id=\"upload1\" destination=\"local\" />\n        <tool id=\"__SET_METADATA__\" destination=\"local\" />\n    </tools>\n</job_conf>\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/galaxy/job_metrics.xml.j2",
    "content": "<?xml version=\"1.0\"?>\n<job_metrics>\n{% if galaxy_job_metrics.core %}\n  <core />\n{% endif %}\n{% if galaxy_job_metrics.cpuinfo and galaxy_job_metrics.cpuinfo == \"verbose\" %}\n  <cpuinfo verbose=\"true\" />\n{% elif galaxy_job_metrics.cpuinfo %}\n  <cpuinfo />\n{% endif %}\n{% if galaxy_job_metrics.meminfo %}\n  <meminfo />\n{% endif %}\n{% if galaxy_job_metrics.uname %}\n  <uname />\n{% endif %}\n{% if galaxy_job_metrics.env %}\n  <env />\n{% endif %}\n</job_metrics>\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/htcondor/executor.conf.j2",
    "content": "{% for key, value in htcondor_executor.items() -%}\n{{ key }}={{ value }}\n{% endfor %}\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/htcondor/galaxy.conf.j2",
    "content": "{% for key, value in htcondor_galaxy.items() -%}\n{{ key }}={{ value }}\n{% endfor %}\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/htcondor/master.conf.j2",
    "content": "{% for key, value in htcondor_master.items() -%}\n{{ key }}={{ value }}\n{% endfor %}\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/kind/k8s_config/persistent_volumes.yml.j2",
    "content": "kind: PersistentVolume\napiVersion: v1\nmetadata:\n  name: galaxy-root\nspec:\n  storageClassName: standard\n  capacity:\n    storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi\n  accessModes:\n    - ReadWriteMany\n  persistentVolumeReclaimPolicy: Retain\n  hostPath:\n    path: {{ HOST_EXPORT_DIR }}/galaxy\n---\nkind: PersistentVolume\napiVersion: v1\nmetadata:\n  name: galaxy-database\nspec:\n  storageClassName: standard\n  capacity:\n    storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi\n  accessModes:\n    - ReadWriteMany\n  persistentVolumeReclaimPolicy: Retain\n  hostPath:\n    path: {{ HOST_EXPORT_DIR }}/galaxy/database\n---\nkind: PersistentVolume\napiVersion: v1\nmetadata:\n  name: galaxy-tool-deps\nspec:\n  storageClassName: standard\n  capacity:\n    storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi\n  accessModes:\n    - ReadWriteMany\n  persistentVolumeReclaimPolicy: Retain\n  hostPath:\n    path: {{ HOST_EXPORT_DIR }}/tool_deps\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/kind/k8s_config/pv_claims.yml.j2",
    "content": "kind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: galaxy-root\nspec:\n  storageClassName: standard\n  accessModes:\n    - ReadWriteMany\n  volumeName: galaxy-root\n  resources:\n    requests:\n      storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi\n---\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: galaxy-database\nspec:\n  storageClassName: standard\n  accessModes:\n    - ReadWriteMany\n  volumeName: galaxy-database\n  resources:\n    requests:\n      storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi\n---\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: galaxy-tool-deps\nspec:\n  storageClassName: standard\n  accessModes:\n    - ReadWriteMany\n  volumeName: galaxy-tool-deps\n  resources:\n    requests:\n      storage: {{ KIND_PV_STORAGE_SIZE | default(100) }}Gi\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/kind/kind_config.yml.j2",
    "content": "kind: Cluster\napiVersion: kind.x-k8s.io/v1alpha4\nnodes:\n- role: control-plane\n  extraMounts:\n  - hostPath: {{ HOST_EXPORT_DIR }}/galaxy\n    containerPath: {{ HOST_EXPORT_DIR }}/galaxy\n  - hostPath: {{ HOST_EXPORT_DIR }}/tool_deps\n    containerPath: {{ HOST_EXPORT_DIR }}/tool_deps\n{% set kind_node_count = KIND_NODE_COUNT | default(1) | int -%}\n{% for i in range(1, kind_node_count + 1) -%}\n- role: worker\n  extraMounts:\n  - hostPath: {{ HOST_EXPORT_DIR }}/galaxy\n    containerPath: {{ HOST_EXPORT_DIR }}/galaxy\n  - hostPath: {{ HOST_EXPORT_DIR }}/tool_deps\n    containerPath: {{ HOST_EXPORT_DIR }}/tool_deps\n{% endfor %}\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/nginx/nginx.conf.j2",
    "content": "events { }\n\nhttp {\n  include mime.types;\n  # See https://docs.galaxyproject.org/en/latest/admin/nginx.html#serving-galaxy-at-the-web-server-root\n\n  # compress responses whenever possible\n  gzip on;\n  gzip_http_version 1.1;\n  gzip_vary on;\n  gzip_comp_level 6;\n  gzip_proxied any;\n  gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;\n  gzip_buffers 16 8k;\n\n  # allow up to 3 minutes for Galaxy to respond to slow requests before timing out\n  proxy_read_timeout {{ NGINX_PROXY_READ_TIMEOUT | default(180, true) }};\n\n  proxy_buffers 8 16k;\n  proxy_buffer_size 16k;\n\n  # maximum file upload size\n  client_max_body_size 10g;\n\n  server {\n    listen 80 default_server;\n    listen [::]:80 default_server;\n    server_name _;\n\n    # use a variable for convenience\n    set $galaxy_static /export/galaxy/static;\n    set $galaxy_root /export/galaxy;\n\n    # proxy all requests not matching other locations to gunicorn\n    location /{{ GALAXY_PROXY_PREFIX | regex_replace(\"^/\", \"\") | regex_replace(\"/$\", \"\") }} {\n      proxy_pass http://galaxy-server:5555;\n      proxy_set_header Host $http_host;\n      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n      proxy_set_header X-Forwarded-Proto $scheme;\n      proxy_set_header Upgrade $http_upgrade;\n    }\n\n    # serve framework static content\n    location {{ GALAXY_PROXY_PREFIX | regex_replace(\"/$\", \"\") }}/static {\n      alias $galaxy_static;\n      expires 24h;\n    }\n    location {{ GALAXY_PROXY_PREFIX | regex_replace(\"/$\", \"\") }}/robots.txt {\n      alias $galaxy_static/robots.txt;\n      expires 24h;\n    }\n    location {{ GALAXY_PROXY_PREFIX | regex_replace(\"/$\", \"\") }}/favicon.ico {\n      alias $galaxy_static/favicon.ico;\n      expires 24h;\n    }\n\n    # serve visualization plugin static content\n    location ~ ^{{ GALAXY_PROXY_PREFIX | regex_replace(\"/$\", \"\") }}/plugins/(?<plug_type>[^/]+?)/((?<vis_d>[^/_]*)_?)?(?<vis_name>[^/]*?)/static/(?<static_file>.*?)$ {\n      alias $galaxy_root/config/plugins/$plug_type/;\n      try_files $vis_d/${vis_d}_${vis_name}/static/$static_file\n        $vis_d/static/$static_file =404;\n    }\n\n    # delegated uploads\n    location {{ GALAXY_PROXY_PREFIX | regex_replace(\"/$\", \"\") }}/api/upload/resumable_upload {\n      # Disable request and response buffering\n      proxy_request_buffering off;\n      proxy_buffering off;\n      proxy_http_version 1.1;\n\n      # Add X-Forwarded-* headers\n      proxy_set_header X-Forwarded-Host $http_host;\n      proxy_set_header X-Forwarded-Proto $scheme;\n          \n      proxy_set_header Upgrade $http_upgrade;\n      proxy_set_header Connection \"upgrade\";\n      client_max_body_size 0;\n      proxy_pass http://rustus:1081;\n    }\n\n    rewrite ^/{{ GALAXY_PROXY_PREFIX | regex_replace(\"^/\", \"\") | regex_replace(\"/$\", \"\") }}$ /{{ GALAXY_PROXY_PREFIX | regex_replace(\"^/\", \"\") | regex_replace(\"/$\", \"\") }}/ last;\n  }\n}\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/pulsar/app.yml.j2",
    "content": "managers:\n  {% if PULSAR_JOB_RUNNER == 'local' -%}\n  _default_:\n    type: queued_python\n    num_concurrent_jobs: {{ PULSAR_NUM_CONCURRENT_JOBS | default(1) }}\n  {% endif %}\n\n{{ pulsar | to_nice_yaml(indent=2) }}\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/pulsar/server.ini.j2",
    "content": "[server:main]\nuse = egg:Paste#http\nport = {{ PULSAR_PORT | default(8913) }}\nhost = {{ PULSAR_HOSTNAME | default('pulsar') }}\n\n[app:main]\npaste.app_factory = pulsar.web.wsgi:app_factory\napp_config = %(here)s/app.yml\n\n## Configure uWSGI (if used).\n[uwsgi]\nmaster = True\npaste-logger = true\nhttp = {{ PULSAR_HOSTNAME | default('pulsar') }}:{{ PULSAR_PORT | default(8913) }}\nprocesses = 1\nenable-threads = True\n\n[watcher:web]\ncmd = chaussette --fd $(circus.sockets.web) paste:server.ini\nuse_sockets = True\n# Pulsar must be single-process for now...\nnumprocesses = 1\n\n[socket:web]\nhost = localhost\nport = 8913\n\n## Configure Python loggers.\n[loggers]\nkeys = root,pulsar\n\n[handlers]\nkeys = console\n\n[formatters]\nkeys = generic\n\n[logger_root]\nlevel = {{ PULSAR_LOG_LEVEL | default('INFO') }}\nhandlers = console\n\n[logger_pulsar]\nlevel = {{ PULSAR_LOG_LEVEL | default('INFO') }}\nhandlers = console\nqualname = pulsar\npropagate = 1\n\n[handler_console]\nclass = StreamHandler\nargs = (sys.stderr,)\nlevel = {{ PULSAR_LOG_LEVEL | default('INFO') }}\nformatter = generic\n\n[formatter_generic]\nformat = %(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s\n"
  },
  {
    "path": "compose/galaxy-configurator/templates/slurm/slurm.conf.j2",
    "content": "{% for key, value in slurm.items() -%}\n{{ key }}={{ value }}\n{% endfor %}\n\n{% set slurm_node_count = SLURM_NODE_COUNT | int -%}\n{% for i in range(1, slurm_node_count + 1) -%}\nNodeName={{ SLURM_NODE_HOSTNAME }}_{{ i }} NodeAddr={{ SLURM_NODE_HOSTNAME }}_{{ i }} NodeHostname={{ SLURM_NODE_HOSTNAME }}_{{ i }} CPUs={{ SLURM_NODE_CPUS | default(1, true) }} RealMemory={{ SLURM_NODE_MEMORY | default(1024, true) }} State=UNKNOWN\n{% endfor %}\nPartitionName=work Nodes={% for i in range(1, slurm_node_count + 1) -%}{{ SLURM_NODE_HOSTNAME }}_{{ i }}{%- if not loop.last -%},{% endif %}{% endfor %} Default=YES MaxTime=INFINITE State=UP Shared=YES # TODO\n"
  },
  {
    "path": "compose/galaxy-htcondor/Dockerfile",
    "content": "ARG DOCKER_REGISTRY=quay.io\nARG DOCKER_REGISTRY_USERNAME=bgruening\nARG IMAGE_TAG=latest\n\nFROM buildpack-deps:22.04 as galaxy_dependencies\n\nARG GALAXY_RELEASE=release_24.1\nARG GALAXY_REPO=https://github.com/galaxyproject/galaxy\n\nENV GALAXY_ROOT_DIR=/galaxy\nENV GALAXY_LIBRARY=$GALAXY_ROOT_DIR/lib\n\n# Download Galaxy source, but only keep necessary dependencies\nRUN mkdir \"${GALAXY_ROOT_DIR}\" \\\n    && curl -L -s $GALAXY_REPO/archive/$GALAXY_RELEASE.tar.gz | tar xzf - --strip-components=1 -C $GALAXY_ROOT_DIR \\\n    && cd $GALAXY_ROOT_DIR \\\n    && ls . | grep -v \"lib\" | xargs rm -rf \\\n    && cd $GALAXY_ROOT_DIR/lib \\\n    && ls . | grep -v \"galaxy\\|galaxy_ext\" | xargs rm -rf \\\n    && cd $GALAXY_ROOT_DIR/lib/galaxy \\\n    && ls . | grep -v \"__init__.py\\|datatypes\\|exceptions\\|files\\|metadata\\|model\\|util\\|security\" | xargs rm -rf\n\n\nFROM $DOCKER_REGISTRY/$DOCKER_REGISTRY_USERNAME/galaxy-container-base:$IMAGE_TAG as final\n\nENV DEBIAN_FRONTEND=noninteractive\n\nENV GALAXY_USER=galaxy \\\n    GALAXY_GROUP=galaxy \\\n    GALAXY_UID=1450 \\\n    GALAXY_GID=1450 \\\n    GALAXY_HOME=/home/galaxy \\\n    GALAXY_ROOT_DIR=/galaxy\n\nRUN groupadd -r $GALAXY_USER -g $GALAXY_GID \\\n    && useradd -u $GALAXY_UID -r -g $GALAXY_USER -d $GALAXY_HOME -c \"Galaxy user\" --shell /bin/bash $GALAXY_USER \\\n    && mkdir $GALAXY_HOME \\\n    && chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_HOME\n\nENV EXPORT_DIR=/export \\\n    # Setting a standard encoding. This can get important for things like the unix sort tool.\n    LC_ALL=en_US.UTF-8 \\\n    LANG=en_US.UTF-8\n\nENV CONDOR_CPUS=1 \\\n    CONDOR_MEMORY=1024\n\n# Condor master\nRUN echo \"force-unsafe-io\" > /etc/dpkg/dpkg.cfg.d/02apt-speedup \\\n    && echo 'Acquire::http::Timeout \"20\";' > /etc/apt/apt.conf.d/98AcquireTimeout \\\n    && echo 'Acquire::Retries \"5\";' > /etc/apt/apt.conf.d/99AcquireRetries \\\n    && apt-get update -qq && apt-get install -y --no-install-recommends locales gnupg2 curl \\\n    && locale-gen en_US.UTF-8 && dpkg-reconfigure locales \\\n    && curl -fsSL https://research.cs.wisc.edu/htcondor/repo/keys/HTCondor-current-Key | apt-key add - \\\n    && echo \"deb https://research.cs.wisc.edu/htcondor/repo/ubuntu/current jammy main\" >> /etc/apt/sources.list \\\n    && apt-get update -qq && apt-get install -y --no-install-recommends \\\n        supervisor \\\n        htcondor \\\n        wget \\\n    && touch /var/log/condor/StartLog /var/log/condor/StarterLog /var/log/condor/CollectorLog /var/log/condor/NegotiatorLog \\\n    && mkdir -p /var/run/condor/ /var/lock/condor/ \\\n    && chown -R condor: /var/log/condor/StartLog /var/log/condor/StarterLog /var/log/condor/CollectorLog /var/log/condor/NegotiatorLog /var/run/condor/ /var/lock/condor/\n\nADD supervisord.conf /etc/supervisord.conf\n\n# Copy Galaxy dependencies\nCOPY --chown=$GALAXY_USER:$GALAXY_USER --from=galaxy_dependencies $GALAXY_ROOT_DIR $GALAXY_ROOT_DIR\n\nCOPY start.sh /usr/bin/start.sh\nRUN apt update && apt install python3 -y\n\nRUN update-alternatives --install /usr/bin/python python /usr/bin/python3 10 \n\nENTRYPOINT /usr/bin/start.sh\n"
  },
  {
    "path": "compose/galaxy-htcondor/start.sh",
    "content": "#!/bin/bash\n\nsleep 5\necho \"Waiting for Galaxy configurator to finish and release lock\"\nuntil [ ! -f /config/configurator.lock ] && echo Lock released; do\n  sleep 0.1;\ndone;\n\ncp -f \"/config/$HTCONDOR_TYPE.conf\" /etc/condor/condor_config.local\ncondor_store_cred -p \"$HTCONDOR_POOL_PASSWORD\" -f /var/lib/condor/pool_password\n\n/usr/bin/supervisord\n"
  },
  {
    "path": "compose/galaxy-htcondor/supervisord.conf",
    "content": "[unix_http_server]\nfile=/var/run/supervisor.sock   ; (the path to the socket file)\nchmod=0700                       ; sockef file mode (default 0700)\n\n[supervisord]\nnodaemon = true\n\n[program:htcondor]\ncommand=/usr/sbin/condor_master -pidfile /var/run/condor/condor.pid -f -t\n#stdout_logfile=/var/log/htcondor.log\n#stderr_logfile=/var/log/htcondor.log\nstdout_logfile=/dev/stdout\nstdout_logfile_maxbytes=0\nstderr_logfile=/dev/stderr\nstderr_logfile_maxbytes=0\nstopwaitsecs=1\nstartretries=0\nautostart=true\nautorestart=false\n\n[program:log-condor-collector]\ncommand=tail -f -n1000 /var/log/condor/CollectorLog\nstdout_logfile=/dev/stdout\nstdout_logfile_maxbytes=0\nstderr_logfile=/dev/stderr\nstderr_logfile_maxbytes=0\nstopwaitsecs=1\nstartretries=5\nautostart=true\nautorestart=false\nuser=condor\n\n[program:log-condor-negotiator]\ncommand=tail -f -n1000 /var/log/condor/NegotiatorLog\nstdout_logfile=/dev/stdout\nstdout_logfile_maxbytes=0\nstderr_logfile=/dev/stderr\nstderr_logfile_maxbytes=0\nstopwaitsecs=1\nstartretries=5\nautostart=true\nautorestart=false\nuser=condor\n\n# [program:telegraf]\n# command=/usr/bin/telegraf --config /etc/telegraf/telegraf.conf\n# stdout_logfile=/dev/stdout\n# stdout_logfile_maxbytes=0\n# stderr_logfile=/dev/stderr\n# stderr_logfile_maxbytes=0\n# stopwaitsecs=1\n# startretries=5\n# autostart=true\n# autorestart=false\n# user=root\n\n\n[rpcinterface:supervisor]\nsupervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface\n\n[supervisorctl]\nserverurl=unix:///var/run/supervisor.sock ; use a unix:// URL  for a unix socket\n"
  },
  {
    "path": "compose/galaxy-kind/Dockerfile",
    "content": "FROM alpine:3.17\n\nARG KIND_RELEASE=v0.24.0\nARG KUBECTL_RELEASE=v1.31.1\n\nRUN apk add --no-cache docker\n\nRUN apk add --no-cache --virtual build-deps wget \\\n    && apk add --no-cache bash \\\n    && wget -O /usr/bin/kind https://kind.sigs.k8s.io/dl/${KIND_RELEASE}/kind-linux-amd64 \\\n    && chmod +x /usr/bin/kind \\\n    && wget -O /usr/bin/kubectl https://dl.k8s.io/release/${KUBECTL_RELEASE}/bin/linux/amd64/kubectl \\\n    && chmod +x /usr/bin/kubectl \\\n    && apk del build-deps\n\nENV KIND_CONFIG_DIR=/kind\nENV KUBECONFIG=${KIND_CONFIG_DIR}/.kube/config\n\nCOPY docker-entrypoint.sh /usr/bin/docker-entrypoint.sh\n\nENTRYPOINT [ \"/usr/bin/docker-entrypoint.sh\" ]\n"
  },
  {
    "path": "compose/galaxy-kind/docker-entrypoint.sh",
    "content": "#!/bin/bash\n\n_term() {\n  echo \"Caught SIGTERM signal!\"\n  echo \"Trying to stop Kind cluster\"\n  kind delete cluster --name \"${K8S_CLUSTER_NAME:-galaxy}\" || true\n  exit 0\n}\ntrap _term SIGTERM\n\nif [ -z \"$KIND_SKIP_CONFIG_LOCK\" ]; then\n  sleep 2\n  echo \"Waiting for Galaxy configurator to finish and release lock\"\n  until [ ! -f \"$KIND_CONFIG_DIR/configurator.lock\" ] && echo Lock released; do\n  sleep 0.1;\n  done;\nfi\nrm \"${KUBECONFIG}_in_docker\" || true\n\nkind delete cluster --name \"${K8S_CLUSTER_NAME:-galaxy}\" || true\nkind create cluster --config \"$KIND_CONFIG_DIR/kind_config.yml\" --kubeconfig \"$KUBECONFIG\" --name \"${K8S_CLUSTER_NAME:-galaxy}\" || true\n\n# Create custom kubeconfig, that allows to reach the control-plane from inside the containers\nREAL_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' \"${K8S_CLUSTER_NAME:-galaxy}-control-plane\")\ncp \"${KUBECONFIG}\" \"${KUBECONFIG}_in_docker\"\nsed -i \"s/127.0.0.1:[0-9]*$/${REAL_IP}:6443/g\" \"${KUBECONFIG}_in_docker\"\n\nexport KUBECONFIG=\"${KUBECONFIG}_in_docker\"\nkubectl cluster-info\n\n# Not all resources can be easily updated, therefore it is easier\n# to remove the resources first, while the whole setup is\n# still starting up\nls \"$KIND_CONFIG_DIR/k8s_config\"\nkubectl delete -f \"$KIND_CONFIG_DIR/k8s_config\" || true\nkubectl apply -f \"$KIND_CONFIG_DIR/k8s_config\"\n\n# Wait for SIGTERM and delete cluster\nsleep inf & wait\n"
  },
  {
    "path": "compose/galaxy-nginx/Dockerfile",
    "content": "FROM nginx:1.27-alpine\n\nCOPY start.sh /usr/bin/start.sh\n\nCMD [ \"/bin/sh\", \"/usr/bin/start.sh\"]\n"
  },
  {
    "path": "compose/galaxy-nginx/start.sh",
    "content": "#!/bin/bash\nsleep 5 # ToDo: Use locking or so to be sure we really have the newest version\necho \"Waiting for Nginx config\"\nuntil [ \"$(ls -p | grep -v /config)\" != \"\" ] && echo Nginx config found; do\n  sleep 0.5;\ndone;\n\ncp -f /config/* /etc/nginx\n\necho \"Running nginx startup command\"\nnginx -g \"daemon off;\"\n"
  },
  {
    "path": "compose/galaxy-server/Dockerfile",
    "content": "ARG DOCKER_REGISTRY=quay.io\nARG DOCKER_REGISTRY_USERNAME=bgruening\nARG IMAGE_TAG=latest\n\nFROM buildpack-deps:22.04 as build_base\n\nENV EXPORT_DIR=/export \\\n    GALAXY_ROOT_DIR=/galaxy \\\n    HTCONDOR_ROOT=/opt/htcondor\n\nENV GALAXY_STATIC_DIR=$GALAXY_ROOT_DIR/static \\\n    GALAXY_EXPORT=$EXPORT_DIR/galaxy \\\n    GALAXY_CONFIG_DIR=$GALAXY_ROOT_DIR/config \\\n    GALAXY_CONFIG_TOOL_DEPENDENCY_DIR=/tool_deps \\\n    GALAXY_CONFIG_TOOL_PATH=$GALAXY_ROOT_DIR/tools \\\n    GALAXY_CONFIG_TOOL_DATA_PATH=$GALAXY_ROOT_DIR/tool-data \\\n    GALAXY_VIRTUAL_ENV=$GALAXY_ROOT_DIR/.venv \\\n    GALAXY_DATABASE_PATH=$GALAXY_ROOT_DIR/database\n\nENV GALAXY_USER=galaxy \\\n    GALAXY_GROUP=galaxy \\\n    GALAXY_UID=1450 \\\n    GALAXY_GID=1450 \\\n    GALAXY_HOME=/home/galaxy\n\nENV GALAXY_CONDA_PREFIX=$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR/_conda \\\n    MINIFORGE_VERSION=24.3.0-0\n\nRUN groupadd -r $GALAXY_USER -g $GALAXY_GID \\\n    && useradd -u $GALAXY_UID -r -g $GALAXY_USER -d $GALAXY_HOME -c \"Galaxy user\" --shell /bin/bash $GALAXY_USER \\\n    && mkdir $GALAXY_HOME \\\n    && chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_HOME\n\nFROM build_base as build_miniforge\nCOPY ./files/common_cleanup.sh /usr/bin/common_cleanup.sh\n\n# Install Miniforge\nRUN curl -s -L https://github.com/conda-forge/miniforge/releases/download/$MINIFORGE_VERSION/Miniforge3-$MINIFORGE_VERSION-Linux-x86_64.sh > ~/miniforge.sh \\\n    && /bin/bash ~/miniforge.sh -b -p $GALAXY_CONDA_PREFIX/ \\\n    && rm ~/miniforge.sh \\\n    && ln -s $GALAXY_CONDA_PREFIX/etc/profile.d/conda.sh /etc/profile.d/conda.sh \\\n    && echo \". $GALAXY_CONDA_PREFIX/etc/profile.d/conda.sh\" >> $GALAXY_HOME/.bashrc \\\n    && echo \"conda activate base\" >> $GALAXY_HOME/.bashrc \\\n    && export PATH=$GALAXY_CONDA_PREFIX/bin/:$PATH \\\n    && conda config --add channels bioconda \\\n    && conda install virtualenv pip ephemeris \\\n    && conda clean --packages -t -i \\\n    && cp -r ~/.conda $GALAXY_HOME && cp ~/.condarc $GALAXY_HOME \\\n    && /usr/bin/common_cleanup.sh\n\nFROM build_base as build_galaxy\n\nARG GALAXY_RELEASE=release_24.1\nARG GALAXY_REPO=https://github.com/galaxyproject/galaxy\n\nCOPY ./files/common_cleanup.sh /usr/bin/common_cleanup.sh\n# Install Galaxy\nRUN apt update && apt install --no-install-recommends libcurl4-openssl-dev libssl-dev python3-dev python3-pip -y \\\n    && update-alternatives --install /usr/bin/python python /usr/bin/python3 10 \\\n    && mkdir \"${GALAXY_ROOT_DIR}\" \\\n    && curl -L -s $GALAXY_REPO/archive/$GALAXY_RELEASE.tar.gz | tar xzf - --strip-components=1 -C $GALAXY_ROOT_DIR \\\n    && cd $GALAXY_ROOT_DIR \\\n    && ./scripts/common_startup.sh \\\n    && . $GALAXY_ROOT_DIR/.venv/bin/activate \\\n    && pip3 install drmaa psycopg2 pycurl pykube redis \\\n    && pip3 install importlib-metadata importlib-resources pathlib2 ruamel.yaml.clib typing zipp \\\n    && deactivate \\\n    && rm -rf .ci .circleci .coveragerc .gitignore .travis.yml CITATION CODE_OF_CONDUCT.md CONTRIBUTING.md CONTRIBUTORS.md \\\n              LICENSE.txt Makefile README.rst SECURITY_POLICY.md pytest.ini tox.ini \\\n              contrib doc config/plugins lib/galaxy_test test test-data \\\n              .venv/lib/node_modules .venv/src/node-v10.15.3-linux-x64 \\\n              .venv/include/node .venv/bin/node .venv/bin/nodeenv \\\n    && /usr/bin/common_cleanup.sh\n\n# --- Final image ---\nFROM $DOCKER_REGISTRY/$DOCKER_REGISTRY_USERNAME/galaxy-cluster-base:$IMAGE_TAG as final\n\nCOPY ./files/common_cleanup.sh /usr/bin/common_cleanup.sh\nCOPY ./files/create_galaxy_user.py /usr/local/bin/create_galaxy_user.py\n\nENV EXPORT_DIR=/export \\\n    GALAXY_ROOT_DIR=/galaxy \\\n    GALAXY_PYTHON=/usr/bin/python3 \\\n    HTCONDOR_ROOT=/opt/htcondor\n\nENV GALAXY_RELEASE=${GALAXY_RELEASE:-release_24.1} \\\n    GALAXY_REPO=${GALAXY_REPO:-https://github.com/galaxyproject/galaxy} \\\n    GALAXY_STATIC_DIR=$GALAXY_ROOT_DIR/static \\\n    GALAXY_EXPORT=$EXPORT_DIR/galaxy \\\n    GALAXY_CONFIG_DIR=$GALAXY_ROOT_DIR/config \\\n    GALAXY_CONFIG_TOOL_DEPENDENCY_DIR=/tool_deps \\\n    GALAXY_CONFIG_TOOL_PATH=$GALAXY_ROOT_DIR/tools \\\n    GALAXY_CONFIG_TOOL_DATA_PATH=$GALAXY_ROOT_DIR/tool-data \\\n    GALAXY_VIRTUAL_ENV=$GALAXY_ROOT_DIR/.venv \\\n    GALAXY_DATABASE_PATH=$GALAXY_ROOT_DIR/database\n\nENV GALAXY_USER=galaxy \\\n    GALAXY_GROUP=galaxy \\\n    GALAXY_UID=1450 \\\n    GALAXY_GID=1450 \\\n    GALAXY_HOME=/home/galaxy\n\nENV GALAXY_CONFIG_FILE=$GALAXY_CONFIG_DIR/galaxy.yml\n\n# Set permissions\nRUN groupadd -r $GALAXY_USER -g $GALAXY_GID \\\n    && useradd -u $GALAXY_UID -r -g $GALAXY_USER -d $GALAXY_HOME -c \"Galaxy user\" --shell /bin/bash $GALAXY_USER \\\n    && /usr/bin/common_cleanup.sh\n\n# Install remaining dependencies\nRUN apt update && apt install --no-install-recommends curl gcc gnupg2 libgomp1 liblzma-dev libbz2-dev libpq-dev \\\n                                                      libcurl4-openssl-dev libssl-dev \\\n                                                      mercurial make netcat python3-dev python3-setuptools python3-pip \\\n                                                      zlib1g-dev sudo -y \\\n    # Cython and wheel are needed to later install pysam..\n    && pip3 install Cython wheel \\\n    && pip3 install pysam \\\n    && /usr/bin/common_cleanup.sh\n\n# GALAXY_USER should be able to run docker without root permissions\nRUN usermod -aG docker $GALAXY_USER\n\n# Make Python3 standard\nRUN update-alternatives --install /usr/bin/python python /usr/bin/python3 10\n\nCOPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_galaxy ${GALAXY_ROOT_DIR} ${GALAXY_ROOT_DIR}\nCOPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_miniforge ${GALAXY_CONFIG_TOOL_DEPENDENCY_DIR} ${GALAXY_CONFIG_TOOL_DEPENDENCY_DIR}\nCOPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_miniforge ${GALAXY_HOME} ${GALAXY_HOME}\nCOPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_miniforge /etc/profile.d/conda.sh /etc/profile.d/conda.sh\n\nCOPY ./files/start.sh /usr/bin/start.sh\n\nEXPOSE 80\n\nENTRYPOINT \"/usr/bin/start.sh\"\n"
  },
  {
    "path": "compose/galaxy-server/files/common_cleanup.sh",
    "content": "#!/bin/sh\n\nset -x\n\n# This usually drastically reduced the container size\n# at the cost of the startup time of your application\nfind / -name '*.pyc' -delete\n\nfind / -name '*.log' -delete\nfind / -name '.cache' -delete\nrm -rf /var/lib/apt/lists/*\nrm -rf /var/cache/*\n\n# https://askubuntu.com/questions/266738/how-to-truncate-all-logfiles\ntruncate -s 0 /var/log/*log || true\ntruncate -s 0 /var/log/**/*log || true\n"
  },
  {
    "path": "compose/galaxy-server/files/create_galaxy_user.py",
    "content": "#!/usr/bin/env python\nimport sys\nsys.path.insert(1,'/galaxy')\nsys.path.insert(1,'/galaxy/lib')\n\nfrom galaxy.model import User, APIKeys\nfrom galaxy.model.mapping import init\nfrom galaxy.model.orm.scripts import get_config\nimport argparse\n\ndef add_user(sa_session, security_agent, email, password, key=None, username=\"admin\"):\n    \"\"\"\n        Add Galaxy User.\n        From John https://gist.github.com/jmchilton/4475646\n    \"\"\"\n    query = sa_session.query( User ).filter_by( email=email )\n    if query.count() > 0:\n        return query.first()\n    else:\n        User.use_pbkdf2 = True\n        user = User(email)\n        user.username = username\n        user.set_password_cleartext(password)\n        sa_session.add(user)\n        sa_session.flush()\n\n        security_agent.create_private_user_role( user )\n        if not user.default_permissions:\n            security_agent.user_set_default_permissions( user, history=True, dataset=True )\n\n        if key is not None:\n            api_key = APIKeys()\n            api_key.user_id = user.id\n            api_key.key = key\n            sa_session.add(api_key)\n            sa_session.flush()\n        sa_session.commit()\n        return user\n\n\nif __name__ == \"__main__\":\n    db_url = get_config(sys.argv, use_argparse=False)['db_url']\n\n    parser = argparse.ArgumentParser(description='Create Galaxy Admin User.')\n\n    parser.add_argument(\"--user\", required=True,\n                    help=\"Username, it should be an email address.\")\n    parser.add_argument(\"--password\", required=True,\n                    help=\"Password.\")\n    parser.add_argument(\"--key\", help=\"API-Key.\")\n    parser.add_argument(\"--username\", default=\"admin\",\n                    help=\"The public username. Public names must be at least three characters in length and contain only lower-case letters, numbers, and the '-' character.\")\n    parser.add_argument('args', nargs=argparse.REMAINDER)\n\n    options = parser.parse_args()\n\n    mapping = init('/tmp/', db_url)\n    sa_session = mapping.context\n    security_agent = mapping.security_agent\n\n    add_user(sa_session, security_agent, options.user, options.password, key=options.key, username=options.username)\n"
  },
  {
    "path": "compose/galaxy-server/files/start.sh",
    "content": "#!/bin/bash\n\ncreate_user() {\n  GALAXY_PROXY_PREFIX=$(cat $GALAXY_CONFIG_DIR/GALAXY_PROXY_PREFIX.txt)\n  echo \"Waiting for Galaxy...\"\n  until [ \"$(curl -s -o /dev/null -w '%{http_code}' ${GALAXY_URL:-nginx}$GALAXY_PROXY_PREFIX)\" -eq \"200\" ] && echo Galaxy started; do\n    sleep 0.1;\n  done;\n  echo \"Creating admin user $GALAXY_DEFAULT_ADMIN_USER with key $GALAXY_DEFAULT_ADMIN_KEY and password $GALAXY_DEFAULT_ADMIN_PASSWORD if not existing\"\n  . $GALAXY_VIRTUAL_ENV/bin/activate\n  python /usr/local/bin/create_galaxy_user.py --user \"$GALAXY_DEFAULT_ADMIN_EMAIL\" --password \"$GALAXY_DEFAULT_ADMIN_PASSWORD\" \\\n  -c \"$GALAXY_CONFIG_FILE\" --username \"$GALAXY_DEFAULT_ADMIN_USER\" --key \"$GALAXY_DEFAULT_ADMIN_KEY\"\n  deactivate\n}\n\n# start copy lib/tools. Looks very hacky.\ntools_dir=\"/galaxy/lib/galaxy/tools/\"\nexp_dir=\"/export$tools_dir\"\nmkdir -p $exp_dir\nchown \"$GALAXY_USER:$GALAXY_USER\" $exp_dir\ncp -rf $tools_dir/* $exp_dir\n# end copy lib/tools.\n\n# First start?? Check if something exists that indicates that environment is not new.. Config file? Something in DB maybe??\n\necho \"Initialization: Check if files already exist, export otherwise.\"\n\n# Create initial $GALAXY_ROOT_DIR in $EXPORT_DIR if not already existent\nmkdir -p \"$EXPORT_DIR/$GALAXY_ROOT_DIR\"\n\ndeclare -A exports=( [\"$GALAXY_STATIC_DIR\"]=\"$EXPORT_DIR/$GALAXY_STATIC_DIR\" \\\n                     [\"$GALAXY_CONFIG_TOOL_PATH\"]=\"$EXPORT_DIR/$GALAXY_CONFIG_TOOL_PATH\" \\\n                     [\"$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR\"]=\"$EXPORT_DIR/$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR\" \\\n                     [\"$GALAXY_CONFIG_TOOL_DATA_PATH\"]=\"$EXPORT_DIR/$GALAXY_CONFIG_TOOL_DATA_PATH\" \\\n                     [\"$GALAXY_VIRTUAL_ENV\"]=\"$EXPORT_DIR/$GALAXY_VIRTUAL_ENV\" )\n\n# shellcheck disable=SC2143,SC2086,SC2010\nfor galaxy_dir in \"${!exports[@]}\"; do\n  exp_dir=${exports[$galaxy_dir]}\n  if [ ! -d  $exp_dir ] || [ -z \"$(ls -A $exp_dir)\" ]; then\n    echo \"Exporting $galaxy_dir to $exp_dir\"\n    mkdir $exp_dir\n    chown \"$GALAXY_USER:$GALAXY_USER\" $exp_dir\n    cp -rpf $galaxy_dir/* $exp_dir\n  fi\n  rm -rf $galaxy_dir\n  ln -v -s $exp_dir $galaxy_dir\n  chown -h \"$GALAXY_USER:$GALAXY_USER\" $galaxy_dir\ndone\n\n# Export galaxy_config seperately (special treatment because of plugins-dir)\n# shellcheck disable=SC2143,SC2086,SC2010\nif [ ! -d  \"$EXPORT_DIR/$GALAXY_CONFIG_DIR\" ] || [ -z \"$(ls -p $EXPORT_DIR/$GALAXY_CONFIG_DIR | grep -v /)\" ]; then\n  # Move config to $EXPORT_DIR and create symlink\n  mkdir \"$EXPORT_DIR/$GALAXY_CONFIG_DIR\"\n  chown \"$GALAXY_USER:$GALAXY_USER\" \"$EXPORT_DIR/$GALAXY_CONFIG_DIR\"\n  cp -rpf $GALAXY_CONFIG_DIR/* $EXPORT_DIR/$GALAXY_CONFIG_DIR\n  cp -rpf $GALAXY_CONFIG_DIR/plugins/* $EXPORT_DIR/$GALAXY_CONFIG_DIR/plugins\nfi\nrm -rf \"$GALAXY_CONFIG_DIR\"\nln -v -s \"$EXPORT_DIR/$GALAXY_CONFIG_DIR\" \"$GALAXY_CONFIG_DIR\"\nchown -h \"$GALAXY_USER:$GALAXY_USER\" \"$GALAXY_CONFIG_DIR\"\n\n# Export database-folder (used for job files etc)\nrm -rf \"$GALAXY_DATABASE_PATH\"\nmkdir -p \"$EXPORT_DIR/$GALAXY_DATABASE_PATH\"\nchown \"$GALAXY_USER:$GALAXY_USER\" \"$EXPORT_DIR/$GALAXY_DATABASE_PATH\"\nln -v -s \"$EXPORT_DIR/$GALAXY_DATABASE_PATH\" \"$GALAXY_DATABASE_PATH\"\nchown -h \"$GALAXY_USER:$GALAXY_USER\" \"$GALAXY_DATABASE_PATH\"\n\n# Try to guess if we are running under --privileged mode\nif mount | grep \"/proc/kcore\"; then\n  PRIVILEGED=false\nelse\n  PRIVILEGED=true\n  echo \"Privileged mode detected\"\n  chmod 666 /var/run/docker.sock\nfi\n\nif $PRIVILEGED; then\n  echo \"Mounting CVMFS\"\n  chmod 666 /dev/fuse\n  mkdir /cvmfs/data.galaxyproject.org\n  mount -t cvmfs data.galaxyproject.org /cvmfs/data.galaxyproject.org\n  mkdir /cvmfs/singularity.galaxyproject.org\n  mount -t cvmfs singularity.galaxyproject.org /cvmfs/singularity.galaxyproject.org\nfi\n\necho \"Finished initialization\"\n\necho \"Waiting for RabbitMQ...\"\nuntil nc -z -w 2 rabbitmq 5672 && echo RabbitMQ started; do\n  sleep 1;\ndone;\n\necho \"Waiting for Postgres...\"\nuntil nc -z -w 2 postgres 5432 && echo Postgres started; do\n  sleep 1;\ndone;\n\nif [ \"$SKIP_LOCKING\" != \"true\" ]; then\n  echo \"Waiting for Galaxy configurator to finish and release lock\"\n  until [ ! -f \"$GALAXY_CONFIG_DIR/configurator.lock\" ] && echo Lock released; do\n    sleep 0.1;\n  done;\nfi\n\nif [ -f \"/htcondor_config/galaxy.conf\" ]; then\n  echo \"HTCondor config file found\"\n\n  cp -f \"/htcondor_config/galaxy.conf\" /etc/condor/condor_config.local\n  condor_store_cred -p \"$HTCONDOR_POOL_PASSWORD\" -f /var/lib/condor/pool_password\n  echo \"Starting HTCondor..\"\n  /usr/sbin/condor_master -b\nfi\n\nif [ -f /etc/munge/munge.key ]; then\n  echo \"Munge key found\"\n  echo \"Starting Munge..\"\n  /etc/init.d/munge start\nfi\n\n# In case the user wants the default admin to be created, do so.\nif [[ -n $GALAXY_DEFAULT_ADMIN_USER ]]; then\n  # Run in background and wait for Galaxy having finished starting up\n  create_user &\nfi\n\n# Ensure proper permission (the configurator might have changed them \"by mistake\")\nchown -RL \"$GALAXY_USER:$GALAXY_GROUP\" \"$GALAXY_CONFIG_DIR\"\n\necho \"Starting Galaxy now..\"\ncd \"$GALAXY_ROOT_DIR\" || { echo \"Error: Could not change to $GALAXY_ROOT_DIR\"; exit 1; }\nsudo -E -H -u $GALAXY_USER \"$GALAXY_VIRTUAL_ENV/bin/galaxy\" --config-file \"$GALAXY_CONFIG_FILE\"\n"
  },
  {
    "path": "compose/galaxy-slurm/Dockerfile",
    "content": "ARG DOCKER_REGISTRY=quay.io\nARG DOCKER_REGISTRY_USERNAME=bgruening\nARG IMAGE_TAG=latest\n\nFROM buildpack-deps:22.04 as galaxy_dependencies\n\nARG GALAXY_RELEASE=release_24.1\nARG GALAXY_REPO=https://github.com/galaxyproject/galaxy\n\nENV GALAXY_ROOT_DIR=/galaxy\n\n# Download Galaxy source, but only keep necessary dependencies\nRUN mkdir \"${GALAXY_ROOT_DIR}\" \\\n    && curl -L -s $GALAXY_REPO/archive/$GALAXY_RELEASE.tar.gz | tar xzf - --strip-components=1 -C $GALAXY_ROOT_DIR \\\n    && cd $GALAXY_ROOT_DIR \\\n    && ls . | grep -v \"lib\" | xargs rm -rf \\\n    && cd $GALAXY_ROOT_DIR/lib \\\n    && ls . | grep -v \"galaxy\\|galaxy_ext\" | xargs rm -rf \\\n    && cd $GALAXY_ROOT_DIR/lib/galaxy \\\n    && ls . | grep -v \"__init__.py\\|datatypes\\|exceptions\\|files\\|metadata\\|model\\|util\\|security\" | xargs rm -rf\n\n\nFROM $DOCKER_REGISTRY/$DOCKER_REGISTRY_USERNAME/galaxy-container-base:$IMAGE_TAG as final\n\nENV GALAXY_USER=galaxy \\\n    GALAXY_GROUP=galaxy \\\n    GALAXY_UID=1450 \\\n    GALAXY_GID=1450 \\\n    GALAXY_HOME=/home/galaxy \\\n    GALAXY_ROOT_DIR=/galaxy\n\nRUN groupadd -r $GALAXY_USER -g $GALAXY_GID \\\n    && useradd -u $GALAXY_UID -r -g $GALAXY_USER -d $GALAXY_HOME -c \"Galaxy user\" --shell /bin/bash $GALAXY_USER \\\n    && mkdir $GALAXY_HOME \\\n    && chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_HOME\n\n# Install Slurm\nENV SLURM_USER=galaxy \\\n    SLURM_UID=1450 \\\n    SLURM_GID=1450 \\\n    MUNGE_USER=munge \\\n    MUNGE_UID=1200 \\\n    MUNGE_GID=1200\n\nRUN groupadd -r $MUNGE_USER -g $MUNGE_GID \\\n    && useradd -u $MUNGE_UID -r -g $MUNGE_USER $MUNGE_USER \\\n    && apt update \\\n    && apt install --no-install-recommends gosu munge python3 python3-dev slurm-wlm -y \\\n    && rm -rf /var/lib/apt/lists/* && rm -rf /var/cache/* && find / -name '*.pyc' -delete\n\n# Copy Galaxy dependencies\nCOPY --chown=$GALAXY_USER:$GALAXY_USER --from=galaxy_dependencies $GALAXY_ROOT_DIR $GALAXY_ROOT_DIR\n# Make Python3 standard\nRUN update-alternatives --install /usr/bin/python python /usr/bin/python3 10\n\nCOPY start.sh /usr/bin/start.sh\n\nENTRYPOINT [ \"/usr/bin/start.sh\" ]\n"
  },
  {
    "path": "compose/galaxy-slurm/start.sh",
    "content": "#!/bin/bash\n\n# Inspired by: https://github.com/giovtorres/slurm-docker-cluster\n\nsleep 10 # ToDo: Use locking or so to be sure we really have the newest version\necho \"Waiting for Slurm config\"\nuntil [ -f /etc/slurm/slurm.conf ] && echo Config found; do\n  sleep 0.5;\ndone;\n\nif [ \"$1\" = \"slurmctld\" ]; then\n  if [ ! -f /etc/munge/munge.key ]; then\n    chown -R \"$MUNGE_USER\":\"$MUNGE_USER\" /etc/munge\n    gosu \"$MUNGE_USER\" /usr/sbin/mungekey\n  fi\n  echo \"Starting Munge..\"\n  /etc/init.d/munge start\n\n  echo \"Starting Slurmctld\"\n  exec /usr/sbin/slurmctld -D\nfi\n\nif [ \"$1\" = \"slurmd\" ]; then\n  echo \"Waiting for munge.key\"\n  until [ -f /etc/munge/munge.key ] && echo munge.key found; do\n    sleep 0.5;\n  done;\n  sleep 1\n\n  echo \"Starting Munge..\"\n  /etc/init.d/munge start\n\n  echo \"Starting Slurmd\"\n  exec /usr/sbin/slurmd -D\nfi\n\nexec \"$@\"\n"
  },
  {
    "path": "compose/galaxy-slurm-node-discovery/Dockerfile",
    "content": "FROM alpine:3.17\n\nRUN apk add curl jq\n\nCOPY run.sh /usr/bin/run.sh\n\nENTRYPOINT /usr/bin/run.sh\n"
  },
  {
    "path": "compose/galaxy-slurm-node-discovery/run.sh",
    "content": "#!/bin/sh\n\n# This script is used to replace the container name of a slurm node\n# with its correct hostname. This is needed, as a hostname can not\n# include '_', which is the case for docker-compose.\nsleep 5\necho \"Waiting for Galaxy configurator to finish and release lock\"\nuntil [ ! -f /etc/slurm/configurator.lock ] && echo Lock released; do\n  sleep 0.1;\ndone;\n\ngrep < /etc/slurm/slurm.conf \"NodeName=\" | while read -r line; do\n  node=$(echo \"$line\" | sed \"s/NodeName=\\(.*\\) \\(NodeAddr.*\\)/\\1/\")\n  node_hostname=$(curl -s --unix-socket /var/run/docker.sock -XGET \\\n                       -H \"Content-Type: application/json\" http://v1.40/containers/json \\\n                       -G --data-urlencode \"filters={\\\"name\\\":[\\\"$node\\\"]}\" \\\n                  | jq -r '.[0] | .[\"Id\"]' | head -c 12)\n  sed -i \"s/$node/$node_hostname/g\" /etc/slurm/slurm.conf\ndone\n\nsleep infinity\n"
  },
  {
    "path": "compose/pulsar/Dockerfile",
    "content": "ARG DOCKER_REGISTRY=quay.io\nARG DOCKER_REGISTRY_USERNAME=bgruening\nARG IMAGE_TAG=latest\n\nFROM buildpack-deps:22.04 as build_pulsar\n\nARG PULSAR_RELEASE=0.15.6\nARG PULSAR_REPO=https://github.com/galaxyproject/pulsar\n\nENV PULSAR_ROOT=/pulsar\nENV PULSAR_VIRTUALENV=$PULSAR_ROOT/.venv\n\nRUN apt update \\\n    && apt install --no-install-recommends curl python3 python3-dev python3-pip python3-setuptools python3-venv -y\n\nRUN mkdir /tmp/pulsar \\\n    && curl -L -s $PULSAR_REPO/archive/$PULSAR_RELEASE.tar.gz | tar xzf - --strip-components=1 -C /tmp/pulsar \\\n    && mkdir $PULSAR_ROOT \\\n    && pip3 install wheel \\\n    && python3 -m venv $PULSAR_VIRTUALENV \\\n    && . $PULSAR_VIRTUALENV/bin/activate \\\n    && pip3 install drmaa kombu pastescript pastedeploy pycurl uwsgi pydantic \"aiohttp==3.10.9\" \\\n    && cd /tmp/pulsar \\\n    && python3 /tmp/pulsar/setup.py install\n\n\n# --- Final image ---\nFROM $DOCKER_REGISTRY/$DOCKER_REGISTRY_USERNAME/galaxy-cluster-base:$IMAGE_TAG as final\nCOPY files/common_cleanup.sh /usr/bin/common_cleanup.sh\n\nENV PULSAR_ROOT=/pulsar\nENV PULSAR_VIRTUALENV=$PULSAR_ROOT/.venv \\\n    PULSAR_CONFIG_DIR=$PULSAR_ROOT/config \\\n    PULSAR_TOOL_DEPENDENCY_DIR=$PULSAR_ROOT/dependencies\n\nRUN apt update \\\n    && apt install --no-install-recommends ca-certificates curl libxml2-dev python3 -y \\\n    && /usr/bin/common_cleanup.sh\n\nCOPY --from=build_pulsar /pulsar /pulsar\n\nCOPY docker-entrypoint.sh /docker-entrypoint.sh\n\nENTRYPOINT [\"/docker-entrypoint.sh\"]\n"
  },
  {
    "path": "compose/pulsar/docker-entrypoint.sh",
    "content": "#!/bin/bash\n\nif [ -z \"$PULSAR_SKIP_CONFIG_LOCK\" ]; then\n  sleep 10\n  echo \"Waiting for Galaxy configurator to finish and release lock\"\n  until [ ! -f \"$PULSAR_CONFIG_DIR/configurator.lock\" ] && echo Lock released; do\n    sleep 0.1;\n  done;\nfi\n\n# Try to guess if we are running under --privileged mode\nif mount | grep \"/proc/kcore\"; then\n  PRIVILEGED=false\nelse\n  PRIVILEGED=true\n  echo \"Privileged mode detected\"\n  chmod 666 /var/run/docker.sock\nfi\n\nif $PRIVILEGED; then\n  echo \"Mounting CVMFS\"\n  chmod 666 /dev/fuse\n  mkdir /cvmfs/data.galaxyproject.org\n  mount -t cvmfs data.galaxyproject.org /cvmfs/data.galaxyproject.org\n  mkdir /cvmfs/singularity.galaxyproject.org\n  mount -t cvmfs singularity.galaxyproject.org /cvmfs/singularity.galaxyproject.org\nfi\n\ncd \"$PULSAR_ROOT\" ||exit 1\n\n. \"$PULSAR_VIRTUALENV/bin/activate\"\n\npulsar --mode \"${PULSAR_MODE:-paster}\"\n"
  },
  {
    "path": "compose/pulsar/files/common_cleanup.sh",
    "content": "#!/bin/sh\n\nset -x\n\n# This usually drastically reduced the container size\n# at the cost of the startup time of your application\nfind / -name '*.pyc' -delete\n\nfind / -name '*.log' -delete\nfind / -name '.cache' -delete\nrm -rf /var/lib/apt/lists/*\nrm -rf /var/cache/*\n\n# https://askubuntu.com/questions/266738/how-to-truncate-all-logfiles\ntruncate -s 0 /var/log/*log || true\ntruncate -s 0 /var/log/**/*log || true\n"
  },
  {
    "path": "compose/tests/docker-compose.test.bioblend.yml",
    "content": "services:\n  galaxy-bioblend-test:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-bioblend-test:${IMAGE_TAG:-latest}\n    build: tests/galaxy-bioblend-test\n    environment:\n      - GALAXY_VERSION=${GALAXY_VERSION:-release_24.1} # TODO: Change to GALAXY_RELEASE\n      - GALAXY_URL=http://nginx${GALAXY_PROXY_PREFIX:-}\n      - EXTRA_SKIP_TESTS_BIOBLEND=${EXTRA_SKIP_TESTS_BIOBLEND:-}\n    networks:\n      - galaxy\n"
  },
  {
    "path": "compose/tests/docker-compose.test.selenium.yml",
    "content": "services:\n  galaxy-selenium-test:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-selenium-test:${IMAGE_TAG:-latest}\n    build: tests/galaxy-selenium-test\n    environment:\n      - TESTS=${TESTS:-navigates_galaxy.py,login.py}\n      - GALAXY_URL=http://nginx${GALAXY_PROXY_PREFIX:-}\n      - SE_ENABLE_TRACING=false\n      - SE_SESSION_REQUEST_TIMEOUT=1800\n    volumes:\n      - ${EXPORT_DIR:-./../export}/galaxy/database:/galaxy/database\n    networks:\n      - galaxy\n"
  },
  {
    "path": "compose/tests/docker-compose.test.workflows.yml",
    "content": "services:\n  galaxy-workflow-test:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy-workflow-test:${IMAGE_TAG:-latest}\n    build: tests/galaxy-workflow-test\n    environment:\n      - GALAXY_URL=http://nginx${GALAXY_PROXY_PREFIX:-}\n      - WORKFLOWS=${WORKFLOWS:-training/sequence-analysis/quality-control/quality_control.ga,sklearn/ard/ard.ga,example1/wf3-shed-tools.ga}\n    volumes:\n      - ${EXPORT_DIR:-./../export}/galaxy/database:/galaxy/database\n    networks:\n      - galaxy\n"
  },
  {
    "path": "compose/tests/docker-compose.test.yml",
    "content": "services:\n  galaxy-configurator:\n    environment:\n      - GALAXY_CONFIG_CLEANUP_JOB=never\n      - NGINX_PROXY_READ_TIMEOUT=3600\n      - DONT_EXIT=true\n  # Terminates the container after $TIMEOUT minutes\n  # which results in the whole setup terminating if --exit-code-from\n  # is set (see CI)\n  timeout:\n    image: alpine:3.17\n    environment:\n      - TIMEOUT=${TIMEOUT:-120}\n    command: sh -c \"echo \\\"Setting timeout to $$TIMEOUT minutes\\\" && sleep $$((( $$TIMEOUT * 60 ))) && echo \\\"Timeout after $$TIMEOUT minutes!\\\" && exit 1\"\n"
  },
  {
    "path": "compose/tests/galaxy-bioblend-test/Dockerfile",
    "content": "FROM alpine:3.17 as build\n\nENV BIOBLEND_VERSION=1.3.0\n\nADD \"https://github.com/galaxyproject/bioblend/archive/v$BIOBLEND_VERSION.zip\" /src/bioblend.zip\nRUN apk update && apk add curl python3-dev unzip \\\n    && python3 -m ensurepip --upgrade \\\n    && pip3 install pep8 tox \"aiohttp==3.10.9\" \\\n    && cd /src \\\n    && unzip bioblend.zip && rm bioblend.zip \\\n    && mv \"bioblend-$BIOBLEND_VERSION\" bioblend \\\n    && cd bioblend \\\n    && python3 setup.py install\n\nWORKDIR /src/bioblend\n\nRUN tox -e py310 --notest\n\nCOPY ./run.sh /src/bioblend/run.sh\n\nENTRYPOINT ./run.sh\n"
  },
  {
    "path": "compose/tests/galaxy-bioblend-test/run.sh",
    "content": "#!/bin/sh\n\necho \"Waiting for Galaxy...\"\nuntil [ \"$(curl -s -o /dev/null -w '%{http_code}' ${GALAXY_URL:-nginx}/api/users/current\\?key\\=${GALAXY_DEFAULT_ADMIN_KEY:-fakekey})\" -eq \"200\" ] && echo Galaxy started; do\n  sleep 1;\ndone;\n\nexport BIOBLEND_GALAXY_URL=${GALAXY_URL:-http://nginx}\nexport BIOBLEND_GALAXY_API_KEY=${GALAXY_DEFAULT_ADMIN_KEY:-fakekey}\nexport BIOBLEND_TEST_JOB_TIMEOUT=${BIOBLEND_TEST_JOB_TIMEOUT:-240}\n\n# default skip tests\nDEFAULT_SKIP_TESTS=\"not test_rerun_and_remap and not test_create_quota and not test_get_quotas and not test_delete_undelete_quota and not test_update_quota and not test_update_non_default_quota and not test_upload_from_galaxy_filesystem and not test_get_datasets and not test_datasets_from_fs and not test_existing_history and not test_new_history and not test_params and not test_tool_dependency_install and not test_download_history and not test_export_and_download and not test_cancel_invocation and not test_run_step_actions and not test_extract_workflow_from_history\"\n\nEXTRA_SKIP_TESTS_BIOBLEND=${EXTRA_SKIP_TESTS_BIOBLEND:-\"\"}\n\n# Combine default skip tests with extra skip tests, if provided\nSKIP_TESTS=\"$DEFAULT_SKIP_TESTS\"\n[ -n \"$EXTRA_SKIP_TESTS_BIOBLEND\" ] && SKIP_TESTS=\"$SKIP_TESTS and $EXTRA_SKIP_TESTS_BIOBLEND\"\n\ntox -e py310 -- -k \"$SKIP_TESTS\"\n"
  },
  {
    "path": "compose/tests/galaxy-selenium-test/Dockerfile",
    "content": "FROM selenium/standalone-chrome:4.25.0\n\nARG GALAXY_RELEASE=release_24.1\nARG GALAXY_REPO=https://github.com/galaxyproject/galaxy\n\nENV GALAXY_ROOT_DIR=/galaxy\nENV GALAXY_PYTHON=/usr/bin/python3\n\nUSER root\nRUN apt update && apt install --no-install-recommends python3-dev python3-pip libpq-dev 2to3 -y && rm -rf /var/lib/apt/lists/* \\\n    && mkdir \"${GALAXY_ROOT_DIR}\" \\\n    && chown seluser \"${GALAXY_ROOT_DIR}\"\n\nUSER seluser\nRUN mkdir -p $GALAXY_ROOT_DIR && \\\n    curl -L -s $GALAXY_REPO/archive/$GALAXY_RELEASE.tar.gz | tar xzf - --strip-components=1 -C $GALAXY_ROOT_DIR \\\n    && cd \"${GALAXY_ROOT_DIR}\" \\\n    && ./scripts/common_startup.sh --skip-client-build --dev-wheels\n\nCOPY run.sh /usr/bin/run.sh\n\nCMD \"/usr/bin/run.sh\"\n"
  },
  {
    "path": "compose/tests/galaxy-selenium-test/run.sh",
    "content": "#!/bin/bash\nset -e # Stop script, if a test fails\n\nsupervisord &\n\nsleep 5\n\necho \"Waiting for Galaxy...\"\nuntil [ \"$(curl -s -o /dev/null -w '%{http_code}' ${GALAXY_URL:-nginx}/api/users/current\\?key\\=${GALAXY_DEFAULT_ADMIN_KEY:-fakekey})\" -eq \"200\" ] && echo Galaxy started; do\n  sleep 1;\ndone;\n\nexport GALAXY_TEST_SELENIUM_REMOTE=1\nexport GALAXY_TEST_SELENIUM_REMOTE_HOST=localhost\nexport GALAXY_TEST_SELENIUM_REMOTE_PORT=4444\nexport GALAXY_TEST_EXTERNAL_FROM_SELENIUM=${GALAXY_URL:-http://nginx}\nexport GALAXY_TEST_EXTERNAL=${GALAXY_URL:-http://nginx}\nexport GALAXY_CONFIG_BOOTSTRAP_ADMIN_API_KEY=${GALAXY_DEFAULT_ADMIN_KEY:-fakekey}\n\n\nfor test in $(echo \"$TESTS\" | sed \"s/,/ /g\"); do\n  echo \"Running test $test\"\n  ./galaxy/run_tests.sh --skip-common-startup -selenium \"/galaxy/lib/galaxy_test/selenium/test_$test\"\ndone\n"
  },
  {
    "path": "compose/tests/galaxy-workflow-test/Dockerfile",
    "content": "FROM alpine:3.17\n\nENV TEST_REPO=${TEST_REPO:-https://github.com/jyotipm29/workflow-testing} \\\n    TEST_RELEASE=${TEST_RELEASE:-24.1}\n\nRUN apk add --no-cache bash python3 py3-pip curl \\\n    && apk add --no-cache --virtual build-dep gcc make libc-dev xz-dev bzip2-dev hdf5-dev musl-dev linux-headers python3-dev \\\n    && pip3 install planemo \\\n    && mkdir /src && cd /src \\\n    && curl -L -s $TEST_REPO/archive/$TEST_RELEASE.tar.gz | tar xzf - --strip-components=1 \\\n    && apk del build-dep\n\nADD ./run.sh /usr/bin/run.sh\n\nWORKDIR /src\n\nENTRYPOINT /usr/bin/run.sh\n"
  },
  {
    "path": "compose/tests/galaxy-workflow-test/run.sh",
    "content": "#!/bin/bash\nset -e # Stop script, if a test fails\n\necho \"Waiting for Galaxy...\"\nuntil [ \"$(curl -s -o /dev/null -w '%{http_code}' ${GALAXY_URL:-nginx}/api/users/current\\?key\\=${GALAXY_DEFAULT_ADMIN_KEY:-fakekey})\" -eq \"200\" ] && echo Galaxy started; do\n  sleep 1;\ndone;\n\nfor workflow in $(echo $WORKFLOWS | sed \"s/,/ /g\")\ndo\n  echo \"Running test $workflow\"\n  planemo $PLANEMO_OPTIONS test \\\n    --galaxy_url \"${GALAXY_URL:-nginx}\" \\\n    --galaxy_admin_key \"${GALAXY_USER_KEY:-fakekey}\" \\\n    --shed_install \\\n    --engine external_galaxy \\\n    --test_output ${GALAXY_ROOT_DIR:-/galaxy}/database/tool_test_output.html \\\n    --test_output_json ${GALAXY_ROOT_DIR:-/galaxy}/database/tool_test_output.json \\\n    \"$workflow\";\ndone\n"
  },
  {
    "path": "cvmfs/Dockerfile",
    "content": "FROM ubuntu:24.04\n\nENV DEBIAN_FRONTEND=noninteractive\n\nRUN apt-get update \\\n    && apt-get install -y --no-install-recommends \\\n        ansible \\\n        ca-certificates \\\n        curl \\\n        dirmngr \\\n        fuse3 \\\n        git \\\n        gpg \\\n        python3 \\\n        python3-apt \\\n        python3-venv \\\n    && rm -rf /var/lib/apt/lists/*\n\nCOPY ansible/ /ansible/\n\nRUN ansible-galaxy install -r /ansible/requirements.yml -p /ansible/roles \\\n    && rm -rf /root/.ansible\n\nCOPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh\nRUN chmod 0755 /usr/local/bin/docker-entrypoint.sh\n\nENTRYPOINT [\"/usr/local/bin/docker-entrypoint.sh\"]\nCMD [\"bash\", \"-lc\", \"tail -F /var/log/autofs.log /var/log/cvmfs.log 2>/dev/null\"]\n"
  },
  {
    "path": "cvmfs/README.md",
    "content": "# CVMFS sidecar for Galaxy\n\nThis container provides a full CVMFS client (no cvmfsexec) and is intended to be used as an optional sidecar for the\nGalaxy container in `galaxy/docker-compose.yaml`.\n\n## What it does\n\n- Installs and configures the CVMFS client using the `galaxyproject.cvmfs` Ansible role.\n- Enables the Galaxy CVMFS repositories (including `data.galaxyproject.org` and\n  `singularity.galaxyproject.org`).\n- Starts autofs and warms the mount points so the CVMFS mounts are shared to the Galaxy container.\n\n## Build\n\nFrom the repository root:\n\n```bash\ndocker build -t galaxy-cvmfs ./cvmfs\n```\n\n## Usage with docker-compose\n\nThe `galaxy/docker-compose.yaml` file contains an optional `cvmfs` service (profile: `cvmfs`).\nStart both containers with:\n\n```bash\ncd galaxy\nCVMFS_MOUNT_DIR=/cvmfs EXPORT_DIR=./export docker compose --profile cvmfs up\n```\n\nNotes:\n- The sidecar runs privileged so the CVMFS mount can be propagated to the host.\n- The `/cvmfs` mount is shared between the sidecar and the Galaxy container.\n- The CVMFS cache is stored in `${EXPORT_DIR}/cvmfs-cache` to keep it persistent.\n\n## Basic check\n\nOnce running, verify the mount from the Galaxy container:\n\n```bash\ndocker exec -it galaxy-server ls /cvmfs/data.galaxyproject.org/byhand\n```\n\nIf the directory lists, CVMFS is mounted.\n\n## Environment variables\n\n- `CVMFS_REPOSITORIES`: Space- or comma-separated list of repositories to warm up.\n  Default: `data.galaxyproject.org singularity.galaxyproject.org`\n- `CVMFS_CACHE_BASE`: Cache directory inside the sidecar. Default: `/var/lib/cvmfs`\n"
  },
  {
    "path": "cvmfs/ansible/playbook.yml",
    "content": "---\n- hosts: localhost\n  connection: local\n  gather_facts: true\n  vars:\n    cvmfs_role: client\n    galaxy_cvmfs_repos_enabled: true\n    cvmfs_http_proxies:\n      - DIRECT\n    cvmfs_cache_base: /var/lib/cvmfs\n    cvmfs_quota_limit: 4000\n    cvmfs_packages:\n      client:\n        - cvmfs\n        - autofs\n  roles:\n    - role: galaxyproject.cvmfs\n"
  },
  {
    "path": "cvmfs/ansible/requirements.yml",
    "content": "---\nroles:\n  - name: galaxyproject.cvmfs\n    src: https://github.com/galaxyproject/ansible-cvmfs\n    version: main\n"
  },
  {
    "path": "cvmfs/docker-entrypoint.sh",
    "content": "#!/usr/bin/env bash\nset -euo pipefail\n\nrepos=\"${CVMFS_REPOSITORIES:-data.galaxyproject.org singularity.galaxyproject.org}\"\nrepos=\"${repos//,/ }\"\n\nmkdir -p /cvmfs\nmkdir -p \"${CVMFS_CACHE_BASE:-/var/lib/cvmfs}\"\ntouch /var/log/autofs.log /var/log/cvmfs.log\n\nif [[ ! -f \"${CVMFS_CACHE_BASE:-/var/lib/cvmfs}/.configured\" ]]; then\n    ansible-playbook /ansible/playbook.yml\n    touch \"${CVMFS_CACHE_BASE:-/var/lib/cvmfs}/.configured\"\nfi\n\nif command -v service >/dev/null 2>&1; then\n    service autofs start || true\nelse\n    autofs -f || true\nfi\n\nfor repo in $repos; do\n    mkdir -p \"/cvmfs/$repo\"\n    ls \"/cvmfs/$repo\" >/dev/null 2>&1 || true\ndone\n\nexec \"$@\"\n"
  },
  {
    "path": "docs/README.md",
    "content": "Documentation\n=============\n\nThe documentation is automatically generated when the main [`README.md`](https://github.com/bgruening/docker-galaxy/blob/main/README.md) is changed on the `main` branch.\n\nFor information, this automatic generation uses a [Python script](src/generate_docs.py) to transform the markdown in the `README.md` into the HTML files.\nThis generation is automatically launched by a [GitHub Action Workflow](https://github.com/bgruening/docker-galaxy/actions/workflows/update-site.yml).\n\nSo, if you see any error in the [online documentation](http://bgruening.github.io/docker-galaxy), you can first check the `README.md`. If the error does not come from the `README.md`, you can either file an issue or check the [Python](src/generate_docs.py) script used to generate the HTML files.\n"
  },
  {
    "path": "docs/Running_jobs_outside_of_the_container.md",
    "content": "Using an external Slurm cluster\n-------------------------------\n\nIt is often convenient to configure Galaxy to use a high-performance cluster for running jobs. To do so, two files are required:\n\n 1. munge.key\n 2. slurm.conf\n\nThese files from the cluster must be copied to the `/export` mount point (i.e., `/data/galaxy-data` on the host if using below command) accessible to Galaxy before starting the container. This must be done regardless of which Slurm daemons are running within Docker. At start, symbolic links will be created to these files to `/etc` within the container, allowing the various Slurm functions to communicate properly with your cluster. In such cases, there's no reason to run `slurmctld`, the Slurm controller daemon, from within Docker, so specify `-e \"NONUSE=slurmctld\"`. Unless you would like to also use Slurm (rather than the local job runner) to run jobs within the Docker container, then alternatively specify `-e \"NONUSE=slurmctld,slurmd\"`.\n\nImportantly, Slurm relies on a shared filesystem between the Docker container and the execution nodes. To allow things to function correctly, each of the execution nodes will need `/export` and `/galaxy` directories to point to the appropriate places. Suppose you ran the following command to start the Docker image:\n\n  ```sh\n  docker run -d -e \"NONUSE=slurmd,slurmctld\" -p 80:80 -v /data/galaxy-data:/export bgruening/galaxy-stable\n  ```\n\nYou would then need the following symbolic links on each of the nodes:\n\n 1. `/export`  → `/data/galaxy-data`\n 2. `/galaxy`  → `/data/galaxy-data/galaxy`\n\nA brief note is in order regarding the version of Slurm installed. This Docker image uses Ubuntu 14.04 as its base image. The version of Slurm in the Unbuntu 14.04 repository is 2.6.5 and that is what is installed in this image. If your cluster is using an incompatible version of Slurm then you will likely need to modify this Docker image.\n\nThe following is an example for how to specify a destination in `job_conf.xml` that uses a custom partition (\"work\", rather than \"debug\") and 4 cores rather than 1:\n\n```xml\n    <destination id=\"slurm4threads\" runner=\"slurm\">\n        <param id=\"embed_metadata_in_job\">False</param>\n        <param id=\"nativeSpecification\">-p work -n 4</param>\n    </destination>\n```\n\nThe usage of `-n` can be confusing. Note that it will specify the number of cores, not the number of tasks (i.e., it's not equivalent to `srun -n 4`).\n\nTips for Running Jobs Outside the Container\n---------------------------------------------\n\nIn its default state Galaxy assumes both the Galaxy source code and\nvarious temporary files are available on shared file systems across the\ncluster, and uses the Galaxy source code to calculate metadata about the\nfiles that have been produced.\nWhen using Condor or SLURM (as described above) to run jobs outside\nof the Docker container one can disable the metadata generation on the cluster,\nor synchronize the files required for generating these.\n\nThe ``embed_metadata_in_job`` option on job destinations in `job_conf.xml`\nforces Galaxy collect metadata inside the container instead of on the\ncluster:\n\n```xml\n    <param id=\"embed_metadata_in_job\">False</param>\n```\n\nThis has performance implications and may not scale as well as performing\nthese calculations on the remote cluster - but this should not be a problem\nfor most Galaxy instances.\n\nAdditionally, many framework tools depend on Galaxy's Python virtual\nenvironment being avaiable. This should be created outside of the container\non a shared filesystem available to your cluster using the instructions\n[here](https://github.com/galaxyproject/galaxy/blob/dev/doc/source/admin/framework_dependencies.rst#managing-dependencies-manually). Job destinations\ncan then source these virtual environments using the instructions outlined\n[here](https://github.com/galaxyproject/galaxy/blob/dev/doc/source/admin/framework_dependencies.rst#galaxy-job-handlers). In other words, by adding\na line such as this to each job destination:\n\n```xml\n    <env file=\"/path/to/shared/galaxy/venv\" />\n```\n\nA Hands-on example of running SLURM on an external cluster container\n--------------------------------------------------------------------\n\nIn the [/test/slurm](../test/slurm/) folder you will find a Dockerfile\nthat can be used to build a SLURM docker image and to test the integration\nof docker galaxy with SLURM.\n\nTo build the image, go the [/test/slurm](../test/slurm/) folder and type:\n```sh\ndocker build -t slurm .\n```\nAs explained above, to connect galaxy with the SLURM cluster, the slurm.conf\nand munge.key files are needed. These file will be automatically generated by the\ndocker slurm container and placed into the /export folder.\n\nTo make them available to the galaxy container, we start the slurm container\nwith a host directory (`/data/galaxy-data`) mounted to `/export`.\n(If there is a real cluster available, this would be a network share):\n```sh\ndocker run -d -v /data/galaxy-data:/export \\\n           --name slurm \\\n           --hostname slurm \\\n           slurm\n```\nWe are also using the `--hostname slurm`, which allows the galaxy container\nto reach the slurm container use the `slurm` hostname.\n\nYou should see a `slurm.conf` and `munge.key` key file in the export folder.\nWe can now start and connect galaxy to the slurm cluster:\n```sh\ndocker run -d -e \"NONUSE=slurmd,slurmctld\" \\\n           --name galaxy-slurm-test \\\n           --link slurm \\\n           -p 80:80 \\\n           -v /data/galaxy-data:/export \\\n           bgruening/galaxy-stable\n```\nNote the --link slurm, this will allow the galaxy container to talk to the slurm container.\nOn a real network this would not be necessary.\nAfter a moment, we can enter the the docker container and submit a simple job using the srun utility:\n```\ndocker exec galaxy-slurm-test srun hostname\n```\nThis should return the hostname of the slurm container, slurm.\nBut we still need to instruct galaxy on how to interface with slurm.\nWe therefore need to adjust the job_conf.xml file.\nA sample job_conf.xml is in [/test/slurm/job_conf.xml](../test/slurm/job_conf.xml).\nWe can copy this file to /data/galaxy-data/galaxy/config:\n```\ncp job_conf.xml /data/galaxy-data/galaxy/config\n```\nWe restart galaxy inside the container\n```sh\ndocker exec galaxy-slurm-test galaxyctl restart\n```\n\nWe should now be able to submit galaxy jobs through the slurm container.\nTo verify this you can install the printenv tools from the toolshed\n(do not forget to restart galaxy after installing tools!)\nand look at its output.\n\nBonus points\n------------\n\nIn the [job_conf.xml](../test/slurm/job_conf.xml) we are disabling metadata generation\non the cluster, since this requires a set of galaxy's dependencies.\nWe can install these on the cluster, since the docker image copies galaxy's requirements.txt\nand the galaxy's lib folder to /export.\nWe enter the slurm container and install these dependencies:\n\n```sh\ndocker exec -it slurm bash\n```\nInside the container we switch to the galaxy user, source the virtualenv, upgrade pip and install\nthe required dependencies:\n```sh\nsource /galaxy/.venv/bin/activate && pip install --upgrade pip\npip install -r /galaxy/requirements.txt --index-url https://wheels.galaxyproject.org/simple\n```\nNow quit the slurm container, edit the job_conf.xml and set\n```\n<param id=\"embed_metadata_in_job\">True</param>\n```\nand finally restart galaxy:\n```\ndocker exec galaxy-slurm-test galaxyctl restart\n```\n"
  },
  {
    "path": "docs/css/landing_page.css",
    "content": "@font-face {\n  font-family: 'Noto Sans';\n  font-weight: 400;\n  font-style: normal;\n  src: url('../fonts/Noto-Sans-regular/Noto-Sans-regular.eot');\n  src: url('../fonts/Noto-Sans-regular/Noto-Sans-regular.eot?#iefix') format('embedded-opentype'),\n       local('Noto Sans'),\n       local('Noto-Sans-regular'),\n       url('../fonts/Noto-Sans-regular/Noto-Sans-regular.woff2') format('woff2'),\n       url('../fonts/Noto-Sans-regular/Noto-Sans-regular.woff') format('woff'),\n       url('../fonts/Noto-Sans-regular/Noto-Sans-regular.ttf') format('truetype'),\n       url('../fonts/Noto-Sans-regular/Noto-Sans-regular.svg#NotoSans') format('svg');\n}\n\n@font-face {\n  font-family: 'Noto Sans';\n  font-weight: 700;\n  font-style: normal;\n  src: url('../fonts/Noto-Sans-700/Noto-Sans-700.eot');\n  src: url('../fonts/Noto-Sans-700/Noto-Sans-700.eot?#iefix') format('embedded-opentype'),\n       local('Noto Sans Bold'),\n       local('Noto-Sans-700'),\n       url('../fonts/Noto-Sans-700/Noto-Sans-700.woff2') format('woff2'),\n       url('../fonts/Noto-Sans-700/Noto-Sans-700.woff') format('woff'),\n       url('../fonts/Noto-Sans-700/Noto-Sans-700.ttf') format('truetype'),\n       url('../fonts/Noto-Sans-700/Noto-Sans-700.svg#NotoSans') format('svg');\n}\n\n@font-face {\n  font-family: 'Noto Sans';\n  font-weight: 400;\n  font-style: italic;\n  src: url('../fonts/Noto-Sans-italic/Noto-Sans-italic.eot');\n  src: url('../fonts/Noto-Sans-italic/Noto-Sans-italic.eot?#iefix') format('embedded-opentype'),\n       local('Noto Sans Italic'),\n       local('Noto-Sans-italic'),\n       url('../fonts/Noto-Sans-italic/Noto-Sans-italic.woff2') format('woff2'),\n       url('../fonts/Noto-Sans-italic/Noto-Sans-italic.woff') format('woff'),\n       url('../fonts/Noto-Sans-italic/Noto-Sans-italic.ttf') format('truetype'),\n       url('../fonts/Noto-Sans-italic/Noto-Sans-italic.svg#NotoSans') format('svg');\n}\n\n@font-face {\n  font-family: 'Noto Sans';\n  font-weight: 700;\n  font-style: italic;\n  src: url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.eot');\n  src: url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.eot?#iefix') format('embedded-opentype'),\n       local('Noto Sans Bold Italic'),\n       local('Noto-Sans-700italic'),\n       url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.woff2') format('woff2'),\n       url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.woff') format('woff'),\n       url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.ttf') format('truetype'),\n       url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.svg#NotoSans') format('svg');\n}\n\nbody {\n  background-color: #fff;\n  padding:50px;\n  font: 14px/1.5 \"Noto Sans\", \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  color:#727272;\n  font-weight:400;\n}\n\nh1, h2, h3, h4, h5, h6 {\n  color:#222;\n  margin:0 0 20px;\n}\n\np, ul, ol, table, pre, dl {\n  margin:0 0 20px;\n}\n\nh1, h2, h3 {\n  line-height:1.1;\n}\n\nh1 {\n  font-size:28px;\n}\n\nh2 {\n  color:#393939;\n}\n\nh3, h4, h5, h6 {\n  color:#494949;\n}\n\na {\n  color:#39c;\n  text-decoration:none;\n}\n\na:hover {\n  color:#069;\n}\n\na small {\n  font-size:11px;\n  color:#777;\n  margin-top:-0.3em;\n  display:block;\n}\n\na:hover small {\n  color:#777;\n}\n\n.wrapper {\n  width:860px;\n  margin:0 auto;\n}\n\nblockquote {\n  border-left:1px solid #e5e5e5;\n  margin:0;\n  padding:0 0 0 20px;\n  font-style:italic;\n}\n\ncode, pre {\n  font-family:Monaco, Bitstream Vera Sans Mono, Lucida Console, Terminal, Consolas, Liberation Mono, DejaVu Sans Mono, Courier New, monospace;\n  color:#333;\n  font-size:12px;\n}\n\npre {\n  padding:8px 15px;\n  background: #f8f8f8;\n  border-radius:5px;\n  border:1px solid #e5e5e5;\n  overflow-x: auto;\n}\n\ntable {\n  width:100%;\n  border-collapse:collapse;\n}\n\nth, td {\n  text-align:left;\n  padding:5px 10px;\n  border-bottom:1px solid #e5e5e5;\n}\n\ndt {\n  color:#444;\n  font-weight:700;\n}\n\nth {\n  color:#444;\n}\n\nimg {\n  max-width:100%;\n}\n\nheader {\n  width:270px;\n  float:left;\n  position:fixed;\n  -webkit-font-smoothing:subpixel-antialiased;\n}\n\nheader ul.box {\n  list-style:none;\n  height:40px;\n  padding:0;\n  background: #f4f4f4;\n  border-radius:5px;\n  border:1px solid #e0e0e0;\n  width:270px;\n}\n\nheader li.box {\n  width:89px;\n  float:left;\n  border-right:1px solid #e0e0e0;\n  height:40px;\n}\n\nheader li.box:first-child a {\n  border-radius:5px 0 0 5px;\n}\n\nheader li.box:last-child a {\n  border-radius:0 5px 5px 0;\n}\n\nheader ul.box a {\n  line-height:1;\n  font-size:11px;\n  color:#999;\n  display:block;\n  text-align:center;\n  padding-top:6px;\n  height:34px;\n}\n\nheader ul.box a:hover {\n  color:#999;\n}\n\nheader ul.box a:active {\n  background-color:#f0f0f0;\n}\n\nstrong {\n  color:#222;\n  font-weight:700;\n}\n\nheader ul.box li + li + li {\n  border-right:none;\n  width:89px;\n}\n\nheader ul.box a strong {\n  font-size:14px;\n  display:block;\n  color:#222;\n}\n\nsection {\n  width:500px;\n  float:right;\n  padding-bottom:50px;\n}\n\n.bold {\n  font-weight:bold;\n}\n\nsmall {\n  font-size:11px;\n}\n\nhr {\n  border:0;\n  background:#e5e5e5;\n  height:1px;\n  margin:0 0 20px;\n}\n\nfooter {\n  width:270px;\n  float:left;\n  position:fixed;\n  bottom:50px;\n  -webkit-font-smoothing:subpixel-antialiased;\n}\n\n@media print, screen and (max-width: 960px) {\n\n  div.wrapper {\n    width:auto;\n    margin:0;\n  }\n\n  header, section, footer {\n    float:none;\n    position:static;\n    width:auto;\n  }\n\n  header {\n    padding-right:320px;\n  }\n\n  section {\n    border:1px solid #e5e5e5;\n    border-width:1px 0;\n    padding:20px 0;\n    margin:0 0 20px;\n  }\n\n  header a small {\n    display:inline;\n  }\n\n  header ul {\n    position:absolute;\n    right:50px;\n    top:52px;\n  }\n}\n\n@media print, screen and (max-width: 720px) {\n  body {\n    word-wrap:break-word;\n  }\n\n  header {\n    padding:0;\n  }\n\n  header ul, header p.view {\n    position:static;\n  }\n\n  pre, code {\n    word-wrap:normal;\n  }\n}\n\n@media print, screen and (max-width: 480px) {\n  body {\n    padding:15px;\n  }\n\n  header ul {\n    width:99%;\n  }\n\n  header li, header ul li + li + li {\n    width:33%;\n  }\n}\n\n@media print {\n  body {\n    padding:0.4in;\n    font-size:12pt;\n    color:#444;\n  }\n}\n\n\n/*\nThe MIT License (MIT)\n\nCopyright (c) 2016 GitHub, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n*/\n\n.pl-c /* comment */ {\n  color: #969896;\n}\n\n.pl-c1 /* constant, variable.other.constant, support, meta.property-name, support.constant, support.variable, meta.module-reference, markup.raw, meta.diff.header */,\n.pl-s .pl-v /* string variable */ {\n  color: #0086b3;\n}\n\n.pl-e /* entity */,\n.pl-en /* entity.name */ {\n  color: #795da3;\n}\n\n.pl-smi /* variable.parameter.function, storage.modifier.package, storage.modifier.import, storage.type.java, variable.other */,\n.pl-s .pl-s1 /* string source */ {\n  color: #333;\n}\n\n.pl-ent /* entity.name.tag */ {\n  color: #63a35c;\n}\n\n.pl-k /* keyword, storage, storage.type */ {\n  color: #a71d5d;\n}\n\n.pl-s /* string */,\n.pl-pds /* punctuation.definition.string, string.regexp.character-class */,\n.pl-s .pl-pse .pl-s1 /* string punctuation.section.embedded source */,\n.pl-sr /* string.regexp */,\n.pl-sr .pl-cce /* string.regexp constant.character.escape */,\n.pl-sr .pl-sre /* string.regexp source.ruby.embedded */,\n.pl-sr .pl-sra /* string.regexp string.regexp.arbitrary-repitition */ {\n  color: #183691;\n}\n\n.pl-v /* variable */ {\n  color: #ed6a43;\n}\n\n.pl-id /* invalid.deprecated */ {\n  color: #b52a1d;\n}\n\n.pl-ii /* invalid.illegal */ {\n  color: #f8f8f8;\n  background-color: #b52a1d;\n}\n\n.pl-sr .pl-cce /* string.regexp constant.character.escape */ {\n  font-weight: bold;\n  color: #63a35c;\n}\n\n.pl-ml /* markup.list */ {\n  color: #693a17;\n}\n\n.pl-mh /* markup.heading */,\n.pl-mh .pl-en /* markup.heading entity.name */,\n.pl-ms /* meta.separator */ {\n  font-weight: bold;\n  color: #1d3e81;\n}\n\n.pl-mq /* markup.quote */ {\n  color: #008080;\n}\n\n.pl-mi /* markup.italic */ {\n  font-style: italic;\n  color: #333;\n}\n\n.pl-mb /* markup.bold */ {\n  font-weight: bold;\n  color: #333;\n}\n\n.pl-md /* markup.deleted, meta.diff.header.from-file */ {\n  color: #bd2c00;\n  background-color: #ffecec;\n}\n\n.pl-mi1 /* markup.inserted, meta.diff.header.to-file */ {\n  color: #55a532;\n  background-color: #eaffea;\n}\n\n.pl-mdr /* meta.diff.range */ {\n  font-weight: bold;\n  color: #795da3;\n}\n\n.pl-mo /* meta.output */ {\n  color: #1d3e81;\n}\n\n.remove-all-styles {\n  all: revert;\n}\n"
  },
  {
    "path": "docs/js/landing_page.js",
    "content": "var metas = document.getElementsByTagName('meta');\nvar i;\nif (navigator.userAgent.match(/iPhone/i)) {\n  for (i=0; i<metas.length; i++) {\n    if (metas[i].name == \"viewport\") {\n      metas[i].content = \"width=device-width, minimum-scale=1.0, maximum-scale=1.0\";\n    }\n  }\n  document.addEventListener(\"gesturestart\", gestureStart, false);\n}\nfunction gestureStart() {\n  for (i=0; i<metas.length; i++) {\n    if (metas[i].name == \"viewport\") {\n      metas[i].content = \"width=device-width, minimum-scale=0.25, maximum-scale=1.6\";\n    }\n  }\n}"
  },
  {
    "path": "docs/src/generate_docs.py",
    "content": "import os\n\nimport pycmarkgfm\nfrom bs4 import BeautifulSoup\n\n\ndef extract_html_structure(html_content):\n    html_structure = {}\n    section_order = []\n    section_id = \"header\"\n    section_name = \"Header\"\n    section_content = BeautifulSoup(\"\", \"html.parser\")\n    soup = BeautifulSoup(html_content, \"html.parser\")\n\n    for section in [tag for tag in soup.children if isinstance(tag, str) is False]:\n        if section.name == \"h1\":\n            if section_id:\n                html_structure[section_id] = {\n                    \"name\": section_name,\n                    \"content\": section_content,\n                }\n                section_order.append(section_id)\n\n            if \"Galaxy Docker Image\" in section.text:\n                section_id = \"index\"\n                section_name = \"Global description\"\n            else:\n                anchor = section.find(\"a\", {\"name\": True})\n                section_id = (\n                    anchor[\"name\"].replace(\"user-content-\", \"\").lower()\n                    if anchor\n                    else None\n                )\n                section_name = (\n                    section.get_text(strip=True).replace(\"[toc]\", \"\")\n                    if anchor\n                    else None\n                )\n\n            section_content = BeautifulSoup(f\"<h1>{section_name}</h1>\\n\", \"html.parser\")\n        else:\n            if section.name == \"a\" and section.get(\"href\") == \"#toc\":\n                continue\n            section_content.append(section)\n\n    if section_id:\n        html_structure[section_id] = {\n            \"name\": section_name,\n            \"content\": section_content,\n        }\n        section_order.append(section_id)\n\n    header = BeautifulSoup(features=\"html.parser\")\n    header.append(header.new_tag(\"p\", **{\"class\": \"bold\"}, string=\"Table of content\"))\n    ul_tag = header.new_tag(\"ul\")\n    header.append(ul_tag)\n    for section in section_order:\n        if section == \"header\" or \"toc\" in section:\n            continue\n        li_tag = header.new_tag(\"li\")\n        a_tag = header.new_tag(\"a\", href=f\"{section}.html\")\n        a_tag.string = html_structure[section][\"name\"]\n        li_tag.append(a_tag)\n        ul_tag.append(li_tag)\n\n    for section in html_structure:\n        if section == \"header\" or \"toc\" in section:\n            continue\n        output_filepath = os.path.join(\"docs\", f\"{section}.html\")\n        page_content = html_structure[section][\"content\"]\n        soup = BeautifulSoup(\"<html></html>\", \"html.parser\")\n        html = soup.html\n\n        head = soup.new_tag(\"head\")\n        html.append(head)\n\n        meta_charset = soup.new_tag(\"meta\", charset=\"utf-8\")\n        head.append(meta_charset)\n        meta_compat = soup.new_tag(\n            \"meta\", **{\"http-equiv\": \"X-UA-Compatible\", \"content\": \"chrome=1\"}\n        )\n        head.append(meta_compat)\n        title = soup.new_tag(\"title\")\n        title.string = \"Galaxy Docker Image by bgruening\"\n        head.append(title)\n        link = soup.new_tag(\"link\", rel=\"stylesheet\", href=\"css/landing_page.css\")\n        head.append(link)\n\n        body = soup.new_tag(\"body\")\n        html.append(body)\n\n        wrapper = soup.new_tag(\"div\", **{\"class\": \"wrapper\"})\n        body.append(wrapper)\n\n        header_tag = soup.new_tag(\"header\")\n        wrapper.append(header_tag)\n\n        h1 = soup.new_tag(\"h1\")\n        h1.string = \"Galaxy Docker Image\"\n        header_tag.append(h1)\n        p = soup.new_tag(\"p\")\n        p.string = \"Docker Images tracking the stable Galaxy releases\"\n        header_tag.append(p)\n        header_tag.append(BeautifulSoup(str(header), \"html.parser\"))\n        p_view = soup.new_tag(\"p\", **{\"class\": \"view\"})\n        a_view = soup.new_tag(\"a\", href=\"https://github.com/bgruening/docker-galaxy\")\n        a_view.string = \"View the Project on GitHub \"\n        small_view = soup.new_tag(\"small\")\n        small_view.string = \"bgruening/docker-galaxy\"\n        a_view.append(small_view)\n        p_view.append(a_view)\n        header_tag.append(p_view)\n\n        ul_box = soup.new_tag(\"ul\", **{\"class\": \"box\"})\n        li_zip = soup.new_tag(\"li\", **{\"class\": \"box\"})\n        a_zip = soup.new_tag(\n            \"a\", href=\"https://github.com/bgruening/docker-galaxy/zipball/master\"\n        )\n        a_zip.string = \"Download \"\n        strong_zip = soup.new_tag(\"strong\")\n        strong_zip.string = \"ZIP File\"\n        a_zip.append(strong_zip)\n        li_zip.append(a_zip)\n        ul_box.append(li_zip)\n\n        li_tar = soup.new_tag(\"li\", **{\"class\": \"box\"})\n        a_tar = soup.new_tag(\n            \"a\", href=\"https://github.com/bgruening/docker-galaxy/tarball/master\"\n        )\n        a_tar.string = \"Download \"\n        strong_tar = soup.new_tag(\"strong\")\n        strong_tar.string = \"TAR Ball\"\n        a_tar.append(strong_tar)\n        li_tar.append(a_tar)\n        ul_box.append(li_tar)\n\n        li_github = soup.new_tag(\"li\", **{\"class\": \"box\"})\n        a_github = soup.new_tag(\"a\", href=\"https://github.com/bgruening/docker-galaxy\")\n        a_github.string = \"View On \"\n        strong_github = soup.new_tag(\"strong\")\n        strong_github.string = \"GitHub\"\n        a_github.append(strong_github)\n        li_github.append(a_github)\n        ul_box.append(li_github)\n\n        header_tag.append(ul_box)\n\n        section = soup.new_tag(\"section\")\n        section.append(page_content)\n        wrapper.append(section)\n\n        footer = soup.new_tag(\"footer\")\n        p1 = soup.new_tag(\"p\")\n        p1.append(\n            BeautifulSoup(\n                'This project is maintained by <a href=\"https://github.com/bgruening\">bgruening</a>',\n                \"html.parser\",\n            )\n        )\n        footer.append(p1)\n        p2 = soup.new_tag(\"p\")\n        p2.append(\n            BeautifulSoup(\n                '<small>Hosted on GitHub Pages &mdash; Theme by <a href=\"https://github.com/orderedlist\">orderedlist</a></small>',\n                \"html.parser\",\n            )\n        )\n        footer.append(p2)\n        wrapper.append(footer)\n\n        script = soup.new_tag(\"script\", src=\"js/landing_page.js\")\n        wrapper.append(script)\n\n        with open(output_filepath, \"w\") as output_file:\n            output_file.write(soup.prettify())\n\n\nif __name__ == \"__main__\":\n    with open(\"README.md\", \"r\") as f:\n        doc = f.read()\n    html_content = pycmarkgfm.gfm_to_html(doc, options=pycmarkgfm.options.unsafe)\n    extract_html_structure(html_content)\n"
  },
  {
    "path": "docs/src/requirements.txt",
    "content": "pycmarkgfm\nbeautifulsoup4"
  },
  {
    "path": "galaxy/Dockerfile",
    "content": "# Galaxy - Stable\n#\n# VERSION       Galaxy in Docker\n\n# TODO\n#\n# * README: only Docker next to Docker is supported\n# * NodeJS is getting globally installed via the playbook, this is not needed anymore isn't it?\n# * the playbooks are not cleaning anything up\n#\n\nFROM buildpack-deps:24.04 AS build_base\n\nENV GALAXY_ROOT_DIR=/galaxy \\\n    GALAXY_VIRTUAL_ENV=/galaxy_venv \\\n    GALAXY_HOME=/home/galaxy \\\n    GALAXY_CONDA_PREFIX=/tool_deps/_conda \\\n    MINIFORGE_VERSION=25.11.0-1\n\n\n# Install miniforge and then virtualenv from conda\nFROM build_base AS build_miniforge\n\nADD ./bashrc $GALAXY_HOME/.bashrc\n\nRUN curl -s -L https://github.com/conda-forge/miniforge/releases/download/$MINIFORGE_VERSION/Miniforge3-$MINIFORGE_VERSION-Linux-x86_64.sh > ~/miniforge.sh \\\n    && /bin/bash ~/miniforge.sh -b -p $GALAXY_CONDA_PREFIX/ \\\n    && rm ~/miniforge.sh \\\n    && ln -s $GALAXY_CONDA_PREFIX/etc/profile.d/conda.sh /etc/profile.d/conda.sh \\\n    && echo \". $GALAXY_CONDA_PREFIX/etc/profile.d/conda.sh\" >> $GALAXY_HOME/.bashrc \\\n    && echo \"conda activate base\" >> $GALAXY_HOME/.bashrc \\\n    && export PATH=$GALAXY_CONDA_PREFIX/bin/:$PATH \\\n    && conda config --add channels bioconda \\\n    && conda install -y virtualenv pip ephemeris \"galaxy-tool-util>=24.1\" \\\n    && conda clean --packages -t -i \\\n    && cp -r ~/.conda $GALAXY_HOME && cp ~/.condarc $GALAXY_HOME \\\n    && find $GALAXY_CONDA_PREFIX -name '*.pyc' -delete\n\n\nFROM build_base AS build_galaxy\n\nARG GALAXY_RELEASE=release_26.0\nARG GALAXY_REPO=https://github.com/galaxyproject/galaxy\n\nENV NODE_OPTIONS=--max-old-space-size=4096 \\\n    UV_INSTALL_DIR=/usr/local/bin \\\n    GALAXY_WHEELS_INDEX_URL=\"https://wheels.galaxyproject.org/simple\"\n\nRUN curl -LsSf https://astral.sh/uv/install.sh | sh\n\nCOPY --from=build_miniforge /tool_deps /tool_deps\n\nRUN --mount=type=cache,target=/root/.cache/uv \\\n    --mount=type=cache,target=/root/.npm \\\n    --mount=type=cache,target=/root/.cache/yarn \\\n    mkdir $GALAXY_ROOT_DIR $GALAXY_VIRTUAL_ENV \\\n    # download latest stable release of Galaxy.\n    && curl -L -s $GALAXY_REPO/archive/$GALAXY_RELEASE.tar.gz | tar xzf - --strip-components=1 -C $GALAXY_ROOT_DIR \\\n    && uv venv --seed $GALAXY_VIRTUAL_ENV \\\n    # Install galaxy client\n    && cd $GALAXY_ROOT_DIR && NPM_CONFIG_CACHE=/root/.npm YARN_CACHE_FOLDER=/root/.cache/yarn ./scripts/common_startup.sh \\\n    && uv pip install --python $GALAXY_VIRTUAL_ENV/bin/python \"weasyprint>=61.2\" watchdog \\\n    --index-strategy unsafe-best-match --extra-index-url ${GALAXY_WHEELS_INDEX_URL} \\\n    # cleanup\n    && find config \\( -name 'node_modules' -o -name '.cache' -o -name '.parcel-cache' \\) -type d -prune -exec rm -rf '{}' + \\\n    && find $GALAXY_ROOT_DIR -name '*.pyc' -delete && find $GALAXY_VIRTUAL_ENV -name '*.pyc' -delete \\ \n    && rm -rf $GALAXY_ROOT_DIR/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/\n\n# This is need for gridengine to work with galaxy\n# https://github.com/galaxyproject/galaxy/issues/10425\nRUN cd / \\\n    && curl -L -o jemalloc-5.3.0.tar.gz https://github.com/jemalloc/jemalloc/archive/5.3.0.tar.gz \\\n    && tar -xvzf jemalloc-5.3.0.tar.gz \\\n    && cd jemalloc-5.3.0 \\\n    && ./autogen.sh && ./configure --disable-initial-exec-tls \\\n    && make -j 4 && make install\n\n\n# TEMPORARY SLURM-DRMAA SOURCE BUILD\n# - Slurm 24.11 is required for Ubuntu 24.04 in this image, but the natefoo\n#   slurm-drmaa PPA only ships binaries built against Ubuntu's Slurm 23.11.\n# - That mismatch breaks DRMAA at runtime (libslurm ABI/plugin version errors).\n# - Until a 24.11-compatible slurm-drmaa package exists, we build it here from\n#   source in a dedicated stage and only copy the runtime library into the\n#   final image.\nFROM build_base AS build_slurm_drmaa\n\nARG SLURM_DRMAA_VERSION=1.1.5\n\nRUN apt-get -qq update \\\n    && apt-get install -y --no-install-recommends \\\n        ca-certificates \\\n        git \\\n        autoconf \\\n        automake \\\n        bison \\\n        flex \\\n        gperf \\\n        ragel \\\n        libtool \\\n        pkg-config \\\n        software-properties-common \\\n        dirmngr \\\n        gpg \\\n        gpg-agent \\\n    && add-apt-repository ppa:ubuntu-hpc/slurm-wlm-24.11 \\\n    && apt-get -qq update \\\n    && apt-get install -y --no-install-recommends libslurm-dev slurm-wlm \\\n    && git clone --branch \"$SLURM_DRMAA_VERSION\" --depth 1 --recurse-submodules --shallow-submodules \\\n        https://github.com/natefoo/slurm-drmaa.git /tmp/slurm-drmaa \\\n    && cd /tmp/slurm-drmaa \\\n    && ./autogen.sh \\\n    && ./configure --prefix=/usr/local \\\n    && make -j\"$(nproc)\" \\\n    && make install \\\n    && mkdir -p /out \\\n    && cp -a /usr/local/lib/libdrmaa.so* /out/ \\\n    && rm -rf /var/lib/apt/lists/* /tmp/slurm-drmaa\n\nFROM ubuntu:24.04 AS galaxy_cluster_base\n\nENV GALAXY_ROOT_DIR=/galaxy \\\n    GALAXY_VIRTUAL_ENV=/galaxy_venv \\\n    GALAXY_LOGS_DIR=/home/galaxy/logs \\\n    GALAXY_CONFIG_DIR=/etc/galaxy \\\n    GALAXY_USER=galaxy \\\n    GALAXY_UID=1450 \\\n    GALAXY_GID=1450 \\\n    GALAXY_HOME=/home/galaxy \\\n    GALAXY_CONDA_PREFIX=/tool_deps/_conda \\\n    EXPORT_DIR=/export \\\n    DEBIAN_FRONTEND=noninteractive \\\n    PG_VERSION=15\n\nENV GALAXY_CONFIG_FILE=$GALAXY_CONFIG_DIR/galaxy.yml \\\n    GALAXY_CONFIG_JOB_CONFIG_FILE=$GALAXY_CONFIG_DIR/job_conf.xml \\\n    GALAXY_CONFIG_JOB_METRICS_CONFIG_FILE=$GALAXY_CONFIG_DIR/job_metrics_conf.yml \\\n    GALAXY_CONFIG_TUS_UPLOAD_STORE=/tmp/tus_upload_store \\\n    GALAXY_CONFIG_INTERACTIVETOOLS_MAP=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/database/interactivetools_map.sqlite \\\n    GRAVITY_CONFIG_FILE=$GALAXY_CONFIG_DIR/gravity.yml \\\n    GALAXY_POSTGRES_UID=1550 \\\n    GALAXY_POSTGRES_GID=1550 \\\n    # Define the default postgresql database path\n    PG_DATA_DIR_DEFAULT=/var/lib/postgresql/$PG_VERSION/main/ \\\n    PG_CONF_DIR_DEFAULT=/etc/postgresql/$PG_VERSION/main/ \\\n    PG_DATA_DIR_HOST=$EXPORT_DIR/postgresql/$PG_VERSION/main/\n\nENV UV_INSTALL_DIR=/usr/local/bin\n\nADD ./common_cleanup.sh /usr/bin/common_cleanup.sh\n\nRUN echo \"force-unsafe-io\" > /etc/dpkg/dpkg.cfg.d/02apt-speedup \\\n    && echo \"Acquire::http {No-Cache=True;};\" > /etc/apt/apt.conf.d/no-cache \\\n    && echo 'APT::Install-Recommends \"0\";' > /etc/apt/apt.conf.d/99no-install-recommends \\\n    && apt-get -qq update && apt-get install -y locales curl \\\n    && locale-gen en_US.UTF-8 && dpkg-reconfigure locales \\\n    && apt-get autoremove -y && apt-get clean \\\n    && chmod 755 /usr/bin/common_cleanup.sh \\\n    && /usr/bin/common_cleanup.sh\n\nADD ansible/ /ansible/\n\n# Install ansible and other dependencies\nRUN apt-get -qq update \\\n    && apt install -y software-properties-common dirmngr gpg gpg-agent \\\n    && add-apt-repository ppa:ansible/ansible \\\n    # Use the Ubuntu HPC PPA for Slurm 24.11 so libslurm and plugins stay in sync on 24.04.\n    && add-apt-repository ppa:ubuntu-hpc/slurm-wlm-24.11 \\\n    && apt-get -qq update \\\n    && apt install -y sudo ca-certificates nano git gridengine-common gridengine-drmaa1.0 libswitch-perl nodejs npm singularity-container \\\n    && apt install -y ansible slurm-wlm libslurm42t64 \\\n    # Make python3 standard\n    && update-alternatives --install /usr/bin/python python /usr/bin/python3 10 \\\n    && apt purge -y software-properties-common systemd && apt-get autoremove -y && apt-get clean \\\n    # Install ansible roles\n    && ansible-galaxy install -r /ansible/requirements.yml -p /ansible/roles \\\n    && npm install -g @galaxyproject/gx-it-proxy@latest \\\n    && apt-get purge -y npm \\\n    && apt-get autoremove -y \\\n    && groupadd -r $GALAXY_USER -g $GALAXY_GID \\\n    && useradd -u $GALAXY_UID -r -g $GALAXY_USER -d $GALAXY_HOME -m -c \"Galaxy user\" --shell /bin/bash $GALAXY_USER \\\n    # Create the postgres user before apt-get does (with the configured UID/GID) to facilitate sharing $EXPORT_DIR/postgresql with non-Linux hosts\n    && groupadd -r postgres -g $GALAXY_POSTGRES_GID \\\n    && adduser --system --quiet --home /var/lib/postgresql --no-create-home --shell /bin/bash --gecos \"\" --uid $GALAXY_POSTGRES_UID --gid $GALAXY_POSTGRES_GID postgres \\\n    && mkdir -p $GALAXY_ROOT_DIR $GALAXY_VIRTUAL_ENV $GALAXY_CONFIG_DIR $GALAXY_CONFIG_DIR/web $GALAXY_LOGS_DIR $EXPORT_DIR $EXPORT_DIR/container_cache/singularity/mulled \\\n    && chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_ROOT_DIR $GALAXY_VIRTUAL_ENV $GALAXY_CONFIG_DIR $GALAXY_LOGS_DIR $EXPORT_DIR \\\n    && /usr/bin/common_cleanup.sh\n\nCOPY --from=build_slurm_drmaa /out/ /usr/lib/slurm-drmaa/lib/\n\nRUN ln -sf /usr/lib/slurm-drmaa/lib/libdrmaa.so.1 /usr/lib/slurm-drmaa/lib/libdrmaa.so \\\n    && ldconfig\n\nCOPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_miniforge /tool_deps /tool_deps\n\nRUN curl -LsSf https://astral.sh/uv/install.sh | sh\n\n# Install necessary components and dependencies for running Galaxy\nRUN --mount=type=cache,target=/root/.cache/uv \\\n    uv venv --seed $GALAXY_VIRTUAL_ENV \\\n    && chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_VIRTUAL_ENV \\\n    && ansible-playbook /ansible/provision.yml \\\n    --extra-vars galaxy_server_dir=$GALAXY_ROOT_DIR \\\n    --extra-vars galaxy_venv_dir=$GALAXY_VIRTUAL_ENV \\\n    --extra-vars galaxy_logs_dir=$GALAXY_LOGS_DIR \\\n    --extra-vars galaxy_user_name=$GALAXY_USER \\\n    --extra-vars galaxy_config_file=$GALAXY_CONFIG_FILE \\\n    --extra-vars galaxy_config_dir=$GALAXY_CONFIG_DIR \\\n    --extra-vars gravity_config_file=$GRAVITY_CONFIG_FILE \\\n    --extra-vars galaxy_job_conf_path=$GALAXY_CONFIG_JOB_CONFIG_FILE \\\n    --extra-vars galaxy_job_metrics_conf_path=$GALAXY_CONFIG_JOB_METRICS_CONFIG_FILE \\\n    --extra-vars postgresql_version=$PG_VERSION \\\n    --extra-vars supervisor_postgres_config_path=$PG_CONF_DIR_DEFAULT/postgresql.conf \\\n    --extra-vars redis_venv_dir=$GALAXY_VIRTUAL_ENV \\\n    --extra-vars redis_venv_user=$GALAXY_USER \\\n    --extra-vars galaxy_user_name=$GALAXY_USER \\\n    --extra-vars proftpd_sql_db=galaxy@galaxy \\\n    --extra-vars proftpd_sql_user=$GALAXY_USER \\\n    --extra-vars proftpd_sql_password=$GALAXY_USER \\\n    --extra-vars galaxy_ftp_upload_dir=$EXPORT_DIR/ftp \\\n    --extra-vars tus_upload_store_path=$GALAXY_CONFIG_TUS_UPLOAD_STORE \\\n    --extra-vars gx_it_proxy_sessions_path=$GALAXY_CONFIG_INTERACTIVETOOLS_MAP \\\n    # Install flower separately as systemd tasks (tagged with 'service') have to be skipped\n    && PATH=$GALAXY_CONDA_PREFIX/bin/:$PATH ansible-playbook /ansible/flower.yml --skip-tags service \\\n    --extra-vars flower_venv_dir=$GALAXY_VIRTUAL_ENV \\\n    --extra-vars flower_db_file=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/database/flower.db \\\n    --extra-vars flower_user=$GALAXY_USER \\\n    --extra-vars flower_group=$GALAXY_USER \\\n    --extra-vars flower_venv_user=$GALAXY_USER \\\n    --extra-vars flower_venv_group=$GALAXY_USER \\\n    && chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_VIRTUAL_ENV \\\n    && apt purge -y software-properties-common dirmngr gpg gpg-agent && apt-get autoremove -y && apt-get clean \\\n    && /usr/bin/common_cleanup.sh\n\nFROM galaxy_cluster_base AS final\n\nLABEL maintainer=\"Björn A. Grüning <bjoern.gruening@gmail.com>\"\n\nENV GALAXY_CONFIG_MANAGED_CONFIG_DIR=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/database/config \\\n    GALAXY_CONFIG_TOOL_CONFIG_FILE=$GALAXY_CONFIG_DIR/tool_conf.xml \\\n    GALAXY_CONFIG_TOOL_DATA_TABLE_CONFIG_PATH=$GALAXY_CONFIG_DIR/tool_data_table_conf.xml \\\n    GALAXY_CONFIG_WATCH_TOOL_DATA_DIR=True \\\n    GALAXY_CONFIG_CONTAINER_RESOLVERS_CONFIG_FILE=$GALAXY_CONFIG_DIR/container_resolvers_conf.yml \\\n    GALAXY_CONFIG_TOOL_DEPENDENCY_DIR=$EXPORT_DIR/tool_deps \\\n    GALAXY_CONFIG_TOOL_PATH=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/tools \\\n    GALAXY_DEFAULT_ADMIN_USER=admin \\\n    GALAXY_DEFAULT_ADMIN_EMAIL=admin@example.org \\\n    GALAXY_DEFAULT_ADMIN_PASSWORD=password \\\n    GALAXY_DEFAULT_ADMIN_KEY=fakekey \\\n    GALAXY_DESTINATIONS_DEFAULT=slurm_cluster \\\n    GALAXY_RUNNERS_ENABLE_SLURM=True \\\n    GALAXY_RUNNERS_ENABLE_CONDOR=False \\\n    GALAXY_CONFIG_DATABASE_CONNECTION=postgresql://galaxy:galaxy@localhost:5432/galaxy?client_encoding=utf8 \\\n    GALAXY_CONFIG_ADMIN_USERS=admin@example.org \\\n    GALAXY_CONFIG_BOOTSTRAP_ADMIN_API_KEY=HSNiugRFvgT574F43jZ7N9F3 \\\n    GALAXY_CONFIG_BRAND=\"Galaxy Docker Build\" \\\n    GALAXY_CONFIG_STATIC_ENABLED=False \\\n    GALAXY_CONFIG_FILE_SOURCE_TEMPPLATES=$GALAXY_CONFIG_DIR/file_source_templates.yml \\\n    GALAXY_CONFIG_VAULT_CONFIG_FILE=$GALAXY_CONFIG_DIR/vault_conf.yml \\\n    GALAXY_INTERACTIVE_TOOLS_CONFIG_FILE=$GALAXY_CONFIG_DIR/tool_conf_interactive.xml \\\n    # The following ENV var can be used to set the number of gunicorn workers\n    GUNICORN_WORKERS=2 \\\n    # The following ENV var can be used to set the number of celery workers\n    CELERY_WORKERS=2 \\\n    # Set HTTPS to use a self-signed certificate (or your own certificate in $EXPORT_DIR/{server.key,server.crt})\n    USE_HTTPS=False \\\n    # Set USE_HTTPS_LENSENCRYPT and GALAXY_DOMAIN to a domain that is reachable to get a letsencrypt certificate\n    USE_HTTPS_LETSENCRYPT=False \\\n    GALAXY_DOMAIN=localhost \\\n    # Set the number of Galaxy handlers\n    GALAXY_HANDLER_NUMPROCS=2 \\\n    # Setting a standard encoding. This can get important for things like the unix sort tool.\n    LC_ALL=en_US.UTF-8 \\\n    LANG=en_US.UTF-8\n\nCOPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_galaxy $GALAXY_ROOT_DIR $GALAXY_ROOT_DIR\nCOPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_galaxy $GALAXY_VIRTUAL_ENV $GALAXY_VIRTUAL_ENV\nCOPY --chown=root:root --from=build_galaxy /usr/local/lib/libjemalloc.so.2 /usr/local/lib/libjemalloc.so.2\nCOPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_miniforge $GALAXY_HOME $GALAXY_HOME\nCOPY --chown=$GALAXY_USER:$GALAXY_USER --from=build_miniforge /etc/profile.d/conda.sh /etc/profile.d/conda.sh\n\nADD --chown=$GALAXY_USER:$GALAXY_USER ./sample_tool_list.yaml $GALAXY_HOME/ephemeris/sample_tool_list.yaml\n\n# Activate Interactive Tools during runtime\nADD --chown=$GALAXY_USER:$GALAXY_USER ./tool_conf_interactive.xml.sample $GALAXY_INTERACTIVE_TOOLS_CONFIG_FILE\n\nRUN mkdir -p $GALAXY_CONFIG_TUS_UPLOAD_STORE \\\n    && ln -s /tool_deps/ $GALAXY_CONFIG_TOOL_DEPENDENCY_DIR \\\n    && chown $GALAXY_USER:$GALAXY_USER $GALAXY_CONFIG_TOOL_DEPENDENCY_DIR $GALAXY_CONFIG_TUS_UPLOAD_STORE \\\n    # Configure Galaxy to use the Tool Shed\n    && cp $GALAXY_HOME/.bashrc ~/ \\\n    && su $GALAXY_USER -c \"cp $GALAXY_ROOT_DIR/config/galaxy.yml.sample $GALAXY_CONFIG_FILE\" \\\n    && su $GALAXY_USER -c \"cp $GALAXY_ROOT_DIR/config/tool_conf.xml.sample $GALAXY_CONFIG_TOOL_CONFIG_FILE\" \\\n    && ansible-playbook /ansible/galaxy_job_conf.yml \\\n    --extra-vars galaxy_server_dir=$GALAXY_ROOT_DIR \\\n    --extra-vars galaxy_config_dir=$GALAXY_CONFIG_DIR \\\n    --extra-vars galaxy_config_file=$GALAXY_CONFIG_FILE \\\n    --extra-vars galaxy_job_conf_path=$GALAXY_CONFIG_JOB_CONFIG_FILE \\\n    --extra-vars galaxy_container_resolvers_conf_path=$GALAXY_CONFIG_CONTAINER_RESOLVERS_CONFIG_FILE \\\n    --extra-vars galaxy_user_name=$GALAXY_USER \\\n    && curl -o $GALAXY_CONFIG_TOOL_DATA_TABLE_CONFIG_PATH \\\n    -L https://raw.githubusercontent.com/galaxyproject/usegalaxy-playbook/8adb1f82c94fe95b09df2a2816440ce2420b7d39/env/main/files/galaxy/config/tool_data_table_conf.xml \\\n    && chown $GALAXY_USER:$GALAXY_USER $GALAXY_CONFIG_TOOL_DATA_TABLE_CONFIG_PATH \\\n    # Ensure Galaxy uses the jemalloc we built (gridengine compatibility: #10425).\n    && mv /usr/lib/x86_64-linux-gnu/libjemalloc.so.2 /usr/lib/x86_64-linux-gnu/libjemalloc.so.2.orig \\\n    && ln -s /usr/local/lib/libjemalloc.so.2 /usr/lib/x86_64-linux-gnu/libjemalloc.so.2\n\n# Install optional Galaxy dependencies for the default config during build.\nRUN --mount=type=cache,target=/root/.cache/uv \\\n    optional_deps_file=\"$(mktemp)\" \\\n    && PYTHONPATH=$GALAXY_ROOT_DIR/lib $GALAXY_VIRTUAL_ENV/bin/python \\\n        -c \"from galaxy.dependencies import optional; print('\\n'.join(optional('/etc/galaxy/galaxy.yml')))\" \\\n        > \"$optional_deps_file\" \\\n    && if [ -s \"$optional_deps_file\" ]; then \\\n        /usr/local/bin/uv pip install \\\n            --python \"$GALAXY_VIRTUAL_ENV/bin/python\" \\\n            --index-strategy unsafe-best-match \\\n            --extra-index-url \"${GALAXY_WHEELS_INDEX_URL}\" \\\n            -r \"$optional_deps_file\"; \\\n    fi \\\n    && rm -f \"$optional_deps_file\" \\\n    ###### This is needed because of a setuptools problem, remove in 26.1\n    && cd \"$GALAXY_ROOT_DIR\" \\\n    && ./scripts/common_startup.sh --skip-client-build \\\n    ###############################\n    && chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_VIRTUAL_ENV\n\n# Include all needed scripts from the host\nADD ./setup_postgresql.py /usr/local/bin/setup_postgresql.py\n\n# Configure PostgreSQL\n# 1. Remove all old configuration\n# 2. Create DB-user 'galaxy' with password 'galaxy' in database 'galaxy'\n# 3. Create Galaxy Admin User 'admin@example.org' with password 'admin' and API key 'admin'\n\nRUN cd / \\\n    && rm $PG_DATA_DIR_DEFAULT -rf \\\n    && python /usr/local/bin/setup_postgresql.py --dbuser galaxy --dbpassword galaxy --db-name galaxy --dbpath $PG_DATA_DIR_DEFAULT --dbversion $PG_VERSION \\\n    && service postgresql start \\\n    && service postgresql stop\n\nWORKDIR $GALAXY_ROOT_DIR\n\n# Updating genome informations from UCSC\n# RUN su $GALAXY_USER -c \"export GALAXY=$GALAXY_ROOT_DIR && sh ./cron/updateucsc.sh.sample\"\n\nENV GALAXY_CONFIG_JOB_WORKING_DIRECTORY=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/database/job_working_directory \\\n    GALAXY_CONFIG_FILE_PATH=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/database/files \\\n    GALAXY_CONFIG_NEW_FILE_PATH=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/database/tmp \\\n    GALAXY_CONFIG_TEMPLATE_CACHE_PATH=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/database/compiled_templates \\\n    GALAXY_CONFIG_CITATION_CACHE_DATA_DIR=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/database/citations/data \\\n    GALAXY_CONFIG_FTP_UPLOAD_DIR=$EXPORT_DIR/ftp \\\n    GALAXY_CONFIG_FTP_UPLOAD_SITE=example.org \\\n    GALAXY_CONFIG_USE_PBKDF2=True \\\n    GALAXY_CONFIG_NGINX_X_ACCEL_REDIRECT_BASE=/_x_accel_redirect \\\n    GALAXY_CONFIG_DYNAMIC_PROXY_MANAGE=False \\\n    GALAXY_CONFIG_VISUALIZATION_PLUGINS_DIRECTORY=config/plugins/visualizations \\\n    GALAXY_CONFIG_TRUST_JUPYTER_NOTEBOOK_CONVERSION=True \\\n    GALAXY_CONFIG_SANITIZE_ALL_HTML=False \\\n    GALAXY_CONFIG_WELCOME_URL=$GALAXY_CONFIG_DIR/web/welcome.html \\\n    GALAXY_CONFIG_OVERRIDE_DEBUG=False \\\n    GALAXY_CONFIG_ENABLE_QUOTAS=True \\\n    GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=http://$GALAXY_DOMAIN \\\n    GALAXY_CONFIG_OUTPUTS_TO_WORKING_DIRECTORY=True \\\n    GALAXY_CONDA_PREFIX=$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR/_conda \\\n    DRMAA_LIBRARY_PATH=/usr/lib/slurm-drmaa/lib/libdrmaa.so\n\n# Container Style\nADD --chown=$GALAXY_USER:$GALAXY_USER GalaxyDocker.png $GALAXY_CONFIG_DIR/web/welcome_image.png\nADD --chown=$GALAXY_USER:$GALAXY_USER welcome.html $GALAXY_CONFIG_DIR/web/welcome.html\n\n\n# Activate additional Tool Sheds\n# Activate the Test Tool Shed during runtime, useful for testing repositories.\nADD --chown=$GALAXY_USER:$GALAXY_USER ./tool_sheds_conf.xml $GALAXY_HOME/tool_sheds_conf.xml\n\n# Script that enables easier downstream installation of tools (e.g. for different Galaxy Docker flavours)\nADD install_tools_wrapper.sh /usr/bin/install-tools\nRUN chmod +x /usr/bin/install-tools && \\\n    cd /usr/bin/ && curl https://git.embl.de/grp-gbcs/galaxy-dir-sync/raw/master/src/galaxy-dir-sync.py > galaxy-dir-sync.py && \\\n    chmod +x galaxy-dir-sync.py\n\n# use https://github.com/krallin/tini/ as tiny but valid init and PID 1\nADD https://github.com/krallin/tini/releases/download/v0.18.0/tini /sbin/tini\nADD --chown=$GALAXY_USER:$GALAXY_USER ./run.sh $GALAXY_ROOT_DIR/run.sh\nRUN chmod +x /sbin/tini \\\n    && chmod 755 ./run.sh $GALAXY_ROOT_DIR/run.sh\n\n# This needs to happen here and not above, otherwise the Galaxy start\n# (without running the startup.sh script) will crash because integrated_tool_panel.xml could not be found.\nENV GALAXY_CONFIG_INTEGRATED_TOOL_PANEL_CONFIG=$EXPORT_DIR/${GALAXY_ROOT_DIR#/}/integrated_tool_panel.xml\n\n# Expose port 80, 443 (webserver), 21 (FTP server), 4002 (Proxy), 9002 (supvisord web app)\nEXPOSE 21\nEXPOSE 80\nEXPOSE 443\nEXPOSE 4002\nEXPOSE 9002\n\n# Mark folders as imported from the host.\nVOLUME [\"/export/\", \"/data/\", \"/var/lib/docker\"]\n\nADD startup.sh /usr/bin/startup\nADD startup2.sh /usr/bin/startup2\nENV SUPERVISOR_POSTGRES_AUTOSTART=False \\\n    SUPERVISOR_MANAGE_POSTGRES=True \\\n    SUPERVISOR_MANAGE_CRON=True \\\n    SUPERVISOR_MANAGE_PROFTP=True \\\n    SUPERVISOR_MANAGE_CONDOR=True \\\n    SUPERVISOR_MANAGE_SLURM= \\\n    SUPERVISOR_MANAGE_RABBITMQ=True \\\n    SUPERVISOR_MANAGE_REDIS=True \\\n    SUPERVISOR_MANAGE_FLOWER=True \\\n    GRAVITY_MANAGE_CELERY=True \\\n    GRAVITY_MANAGE_GX_IT_PROXY=True \\\n    GRAVITY_MANAGE_TUSD=True \\\n    HOST_DOCKER_LEGACY= \\\n    STARTUP_EXPORT_USER_FILES=True \\\n    LOAD_GALAXY_CONDITIONAL_DEPENDENCIES=True\n\nENTRYPOINT [\"/sbin/tini\", \"--\"]\n\n# Autostart script that is invoked during container start\nCMD [\"/usr/bin/startup\"]\n"
  },
  {
    "path": "galaxy/ansible/condor.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    htcondor_version: 25.x\n    htcondor_keyring_path: /etc/apt/keyrings/htcondor.asc\n    htcondor_repo_list_path: /etc/apt/sources.list.d/htcondor.list\n    htcondor_role_submit: true\n    htcondor_password: changeme\n    htcondor_domain: '{{ galaxy_user_name }}'\n    htcondor_server: localhost\n    htcondor_firewall_condor: false\n    htcondor_firewall_nfs: false\n\n  pre_tasks:\n    # This pre-task addresses the systemd service that is installed by the role, \n    # which cannot function inside the container. \n    # Therefore, we use an sysvinit script to manage HTCondor during the playbook execution.\n    # The init script will be removed later in the post-tasks, as we will use \n    # supervisor to manage HTCondor.\n    - name: Create HTCondor init script\n      copy:\n        dest: /etc/init.d/condor\n        mode: '0755'\n        content: |\n          #!/bin/sh\n          HTCONDOR_DIR=\"/usr/sbin\"\n          case \"$1\" in\n              start)\n                  echo \"Starting HTCondor...\"\n                  $HTCONDOR_DIR/condor_master\n                  ;;\n              stop)\n                  echo \"Stopping HTCondor...\"\n                  killall -r '.*condor.*'\n                  ;;\n              restart)\n                  echo \"Restarting HTCondor...\"\n                  $0 stop\n                  $0 start\n                  ;;\n          esac\n          exit 0\n\n    - name: Register HTCondor init script\n      command: update-rc.d condor defaults\n\n  tasks:\n    - name: Install HTCondor repository prerequisites\n      apt:\n        name:\n          - curl\n          - gnupg\n          - apt-transport-https\n        state: present\n        update_cache: true\n\n    - name: Ensure APT keyring directory exists\n      file:\n        path: /etc/apt/keyrings\n        state: directory\n        mode: \"0755\"\n\n    - name: Add HTCondor signing key\n      get_url:\n        url: \"https://htcss-downloads.chtc.wisc.edu/repo/keys/HTCondor-{{ htcondor_version }}-Key\"\n        dest: \"{{ htcondor_keyring_path }}\"\n        mode: \"0644\"\n\n    - name: Configure HTCondor apt repository\n      copy:\n        dest: \"{{ htcondor_repo_list_path }}\"\n        content: |\n          # HTCondor repository for the {{ htcondor_version }} feature versions\n          deb [signed-by={{ htcondor_keyring_path }}] https://htcss-downloads.chtc.wisc.edu/repo/ubuntu/{{ htcondor_version }} {{ ansible_distribution_release }} main\n          deb-src [signed-by={{ htcondor_keyring_path }}] https://htcss-downloads.chtc.wisc.edu/repo/ubuntu/{{ htcondor_version }} {{ ansible_distribution_release }} main\n\n    - name: Install HTCondor\n      apt:\n        name: condor\n        state: present\n        update_cache: true\n\n    - name: Create log files for HTCondor\n      file:\n        path: \"/var/log/condor/{{ item }}\"\n        state: touch\n        owner: condor\n      loop:\n        - StartLog\n        - StarterLog\n        - CollectorLog\n        - NegotiatorLog\n\n    - name: Configure HTCondor\n      lineinfile:\n        path: /etc/condor/condor_config.local\n        create: yes\n        line: \"{{ item }}\"\n      loop:\n        - 'DISCARD_SESSION_KEYRING_ON_STARTUP=False'\n        - 'TRUST_UID_DOMAIN=true'\n\n  # Remove the init script and systemd\n  post_tasks:\n    - name: Stop HTCondor service\n      command: /etc/init.d/condor stop\n\n    - name: Remove HTCondor init script\n      file:\n        path: /etc/init.d/condor\n        state: absent\n\n    - name: Remove HTCondor init script registration\n      command: update-rc.d -f condor remove\n    \n    - name: Purge systemd and perform cleanup\n      shell: apt purge -y systemd && apt-get autoremove -y && apt-get clean\n"
  },
  {
    "path": "galaxy/ansible/cvmfs_client.yml",
    "content": "# Setup of the CernVM-File system (CVMFS) and configure so that the reference\n# data hosted by Galaxy on usegalaxy.org is available to the remote target.\n- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: Install CernVM apt key\n      apt_key:\n        url: https://cvmrepo.web.cern.ch/cvmrepo/apt/cernvm.gpg\n\n    # Install & setup CermVM-FS\n    - name: Configure CernVM apt repository\n      apt_repository:\n        filename: \"cernvm.list\"\n        mode: 422\n        repo: deb [allow-insecure=true] https://cvmrepo.web.cern.ch/cvmrepo/apt/ {{ ansible_distribution_release }}-prod main\n\n    - name: Install CernVM-FS client (apt)\n      apt:\n        name: ['cvmfs', 'cvmfs-config', 'autofs']\n        state: \"{{ galaxy_apt_package_state }}\"\n        update_cache: yes\n\n    - name: Make CernVM-FS key directories\n      file:\n        state: directory\n        path: \"{{ item }}\"\n        owner: \"root\"\n        group: \"root\"\n        mode: \"0755\"\n      loop: \"{{ cvmfs_keys | map(attribute='path') | map('dirname') | unique }}\"\n\n    - name: Install CernVM-FS keys\n      copy:\n        content: \"{{ item.key }}\"\n        dest: \"{{ item.path }}\"\n        owner: \"root\"\n        group: \"root\"\n        mode: \"0444\"\n      with_items: \"{{ cvmfs_keys }}\"\n\n    - name: Perform AutoFS and FUSE configuration for CernVM-FS\n      command: cvmfs_config setup\n\n    - name: Configure CernVM-FS config repository\n      block:\n\n        - name: Create config repo config\n          copy:\n            content: |\n              CVMFS_SERVER_URL=\"{{ cvmfs_config_repo.urls | join(';') }}\"\n              CVMFS_PUBLIC_KEY=\"{{ cvmfs_config_repo.key.path }}\"\n            dest: \"/etc/cvmfs/config.d/{{ cvmfs_config_repo.repository.repository }}.conf\"\n            owner: \"root\"\n            group: \"root\"\n            mode: \"0444\"\n\n        - name: Set config repo defaults\n          copy:\n            content: |\n              CVMFS_CONFIG_REPOSITORY=\"{{ cvmfs_config_repo.repository.repository }}\"\n              CVMFS_DEFAULT_DOMAIN=\"{{ cvmfs_config_repo.domain }}\"\n              CVMFS_USE_GEOAPI=\"{{ cvmfs_config_repo.use_geoapi | default('yes') }}\"\n            dest: \"/etc/cvmfs/default.d/80-galaxyproject-cvmfs.conf\"\n            owner: \"root\"\n            group: \"root\"\n            mode: \"0444\"\n\n    - name: Configure CernVM-FS global client settings\n      copy:\n        content: |\n          CVMFS_HTTP_PROXY=\"{{ cvmfs_http_proxies | default(['DIRECT']) | join(';') }}\"\n          CVMFS_QUOTA_LIMIT=\"{{ cvmfs_quota_limit | default('4000') }}\"\n          CVMFS_CACHE_BASE=\"{{ cvmfs_cache_base | default('/var/lib/cvmfs') }}\"\n          CVMFS_USE_GEOAPI=\"{{ cvmfs_use_geoapi | default('yes') }}\"\n        dest: \"/etc/cvmfs/default.local\"\n        owner: \"root\"\n        group: \"root\"\n        mode: \"0644\"\n"
  },
  {
    "path": "galaxy/ansible/docker.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    docker_install_compose: false\n    docker_install_compose_plugin: false\n    docker_users:\n      - \"{{ galaxy_user_name }}\"\n    docker_service_manage: false\n  roles:\n    - role: geerlingguy.docker\n  tasks:\n    - name: Purge systemd and perform cleanup\n      shell: apt purge -y systemd && apt-get autoremove -y && apt-get clean\n"
  },
  {
    "path": "galaxy/ansible/files/413.html",
    "content": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n    <!-- Simple HttpErrorPages | MIT License | https://github.com/HttpErrorPages -->\n\n    <meta charset=\"utf-8\" />\n    <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\" />\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n\n    <title>Request Too Large! | 413 - Payload Too Large</title>\n\n    <style type=\"text/css\">/*! normalize.css v5.0.0 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;line-height:1.15;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,footer,header,nav,section{display:block}h1{font-size:2em;margin:.67em 0}figcaption,figure,main{display:block}figure{margin:1em 40px}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent;-webkit-text-decoration-skip:objects}a:active,a:hover{outline-width:0}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:inherit}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}dfn{font-style:italic}mark{background-color:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}audio,video{display:inline-block}audio:not([controls]){display:none;height:0}img{border-style:none}svg:not(:root){overflow:hidden}button,input,optgroup,select,textarea{font-family:sans-serif;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type=reset],[type=submit],button,html [type=button]{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{display:inline-block;vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-cancel-button,[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details,menu{display:block}summary{display:list-item}canvas{display:inline-block}template{display:none}[hidden]{display:none}/*! Simple HttpErrorPages | MIT X11 License | https://github.com/AndiDittrich/HttpErrorPages */body,html{width:100%;height:100%;background-color:#21232a}body{color:#fff;text-align:center;text-shadow:0 2px 4px rgba(0,0,0,.5);padding:0;min-height:100%;-webkit-box-shadow:inset 0 0 100px rgba(0,0,0,.8);box-shadow:inset 0 0 100px rgba(0,0,0,.8);display:table;font-family:\"Open Sans\",Arial,sans-serif}h1{font-family:inherit;font-weight:500;line-height:1.1;color:inherit;font-size:36px}h1 small{font-size:68%;font-weight:400;line-height:1;color:#777}a{text-decoration:none;color:#fff;font-size:inherit;border-bottom:dotted 1px #707070}.lead{color:silver;font-size:21px;line-height:1.4}.cover{display:table-cell;vertical-align:middle;padding:0 20px}footer{position:fixed;width:100%;height:40px;left:0;bottom:0;color:#a0a0a0;font-size:14px}</style>\n</head>\n<body>\n    <div class=\"cover\">\n        <h1>Request Too Large <small>Error 413</small></h1>\n        <p class=\"lead\">The request payload you tried to send is too large.<br />\n            Please try again with a smaller request.\n        </p>\n    </div>\n    <footer>\n        <p>If the problem isn't resolved, you can try restarting the container, restarting the host, or get some help from the\n            <a href=\"https://gitter.im/galaxyproject/Lobby\">community.</a> :)\n        </p>\n    </footer>\n</body>\n</html>\n"
  },
  {
    "path": "galaxy/ansible/files/500.html",
    "content": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n    <!-- Simple HttpErrorPages | MIT License | https://github.com/HttpErrorPages -->\n\n    <meta charset=\"utf-8\" />\n    <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\" />\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n    \n    <title>Galaxy is down! | 500 - Webservice currently unavailable</title>\n\n    <style type=\"text/css\">/*! normalize.css v5.0.0 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;line-height:1.15;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,footer,header,nav,section{display:block}h1{font-size:2em;margin:.67em 0}figcaption,figure,main{display:block}figure{margin:1em 40px}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent;-webkit-text-decoration-skip:objects}a:active,a:hover{outline-width:0}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:inherit}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}dfn{font-style:italic}mark{background-color:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}audio,video{display:inline-block}audio:not([controls]){display:none;height:0}img{border-style:none}svg:not(:root){overflow:hidden}button,input,optgroup,select,textarea{font-family:sans-serif;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type=reset],[type=submit],button,html [type=button]{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{display:inline-block;vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-cancel-button,[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details,menu{display:block}summary{display:list-item}canvas{display:inline-block}template{display:none}[hidden]{display:none}/*! Simple HttpErrorPages | MIT X11 License | https://github.com/AndiDittrich/HttpErrorPages */body,html{width:100%;height:100%;background-color:#21232a}body{color:#fff;text-align:center;text-shadow:0 2px 4px rgba(0,0,0,.5);padding:0;min-height:100%;-webkit-box-shadow:inset 0 0 100px rgba(0,0,0,.8);box-shadow:inset 0 0 100px rgba(0,0,0,.8);display:table;font-family:\"Open Sans\",Arial,sans-serif}h1{font-family:inherit;font-weight:500;line-height:1.1;color:inherit;font-size:36px}h1 small{font-size:68%;font-weight:400;line-height:1;color:#777}a{text-decoration:none;color:#fff;font-size:inherit;border-bottom:dotted 1px #707070}.lead{color:silver;font-size:21px;line-height:1.4}.cover{display:table-cell;vertical-align:middle;padding:0 20px}footer{position:fixed;width:100%;height:40px;left:0;bottom:0;color:#a0a0a0;font-size:14px}</style>\n</head>\n<body>\n    <div class=\"cover\">\n        <h1>Webservice currently unavailable <small>Error 500</small></h1>\n        <p class=\"lead\">An unexpected condition was encountered.<br />\n        </p>\n    </div>\n    <footer>\n        <p>If the problem isn't resolved after a few minutes, you can try restarting the container, restarting the host, or get some help from the\n            <a href=\"https://gitter.im/galaxyproject/Lobby\">community.</a> :)\n        </p>\n    </footer>\n</body>\n</html>\n\n"
  },
  {
    "path": "galaxy/ansible/files/502.html",
    "content": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n    <!-- Simple HttpErrorPages | MIT License | https://github.com/HttpErrorPages -->\n\n    <meta charset=\"utf-8\" />\n    <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\" />\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n\n    <title>Galaxy is down! | 502 - Webservice currently unavailable</title>\n\n    <style type=\"text/css\">/*! normalize.css v5.0.0 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;line-height:1.15;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,footer,header,nav,section{display:block}h1{font-size:2em;margin:.67em 0}figcaption,figure,main{display:block}figure{margin:1em 40px}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent;-webkit-text-decoration-skip:objects}a:active,a:hover{outline-width:0}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:inherit}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}dfn{font-style:italic}mark{background-color:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}audio,video{display:inline-block}audio:not([controls]){display:none;height:0}img{border-style:none}svg:not(:root){overflow:hidden}button,input,optgroup,select,textarea{font-family:sans-serif;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type=reset],[type=submit],button,html [type=button]{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{display:inline-block;vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-cancel-button,[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details,menu{display:block}summary{display:list-item}canvas{display:inline-block}template{display:none}[hidden]{display:none}/*! Simple HttpErrorPages | MIT X11 License | https://github.com/AndiDittrich/HttpErrorPages */body,html{width:100%;height:100%;background-color:#21232a}body{color:#fff;text-align:center;text-shadow:0 2px 4px rgba(0,0,0,.5);padding:0;min-height:100%;-webkit-box-shadow:inset 0 0 100px rgba(0,0,0,.8);box-shadow:inset 0 0 100px rgba(0,0,0,.8);display:table;font-family:\"Open Sans\",Arial,sans-serif}h1{font-family:inherit;font-weight:500;line-height:1.1;color:inherit;font-size:36px}h1 small{font-size:68%;font-weight:400;line-height:1;color:#777}a{text-decoration:none;color:#fff;font-size:inherit;border-bottom:dotted 1px #707070}.lead{color:silver;font-size:21px;line-height:1.4}.cover{display:table-cell;vertical-align:middle;padding:0 20px}footer{position:fixed;width:100%;height:40px;left:0;bottom:0;color:#a0a0a0;font-size:14px}</style>\n</head>\n<body>\n    <div class=\"cover\">\n        <h1>Webservice currently unavailable <small>Error 502</small></h1>\n        <p class=\"lead\">Most likely, the Galaxy is booting.<br />\n            Please don't panic. Relax, get some tea or ice-cream (depending on the season), and try again few seconds later.\n        </p>\n    </div>\n    <footer>\n        <p>If the problem isn't resolved after a few minutes, you can try restarting the container, restarting the host, or get some help from the\n            <a href=\"https://gitter.im/galaxyproject/Lobby\">community.</a> :)\n        </p>\n    </footer>\n</body>\n</html>\n"
  },
  {
    "path": "galaxy/ansible/files/nginx_sample.crt",
    "content": "-----BEGIN CERTIFICATE-----\nMIIE7TCCAtWgAwIBAgIUHBIplAOVmxyIRH51KvXuSWydCj8wDQYJKoZIhvcNAQEL\nBQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTI0MTEwNjIzMDU1NVoXDTM0MTEw\nNDIzMDU1NVowFDESMBAGA1UEAwwJbG9jYWxob3N0MIICIjANBgkqhkiG9w0BAQEF\nAAOCAg8AMIICCgKCAgEA1kSpfexOnDQvNDwSg/4Cjv13+41VF2RgJdpk0n1iBz92\nGKEl7SEh+nhUFinn+CKv2EaNQ7Nv5/+BNoPbBvS8Gm7ZtGt+cFXqRy4ka5It68sq\nbwZadmAGwksJbvcizs5D6XS3BPIB2FrxvBbhzOj+oDYxC3HItIgYwV0+Gv/GBDCi\nF4+b9dO//gfR1ywqsvGczaaMBkbhuOZ2WZph9nFEcdHgNEzLn/HJsYZv0crrFjCL\no7+FWsYIdM1wNP2bkpPzRFpB1ujxfl9xxH4pTc06sHIKivnoVMs5VvsqdWtosJi/\ns84ALuofPDKuGN2JaTA8e0MVnC2ZxOcDFUtR/WvN8rkUMcNnP8nKJGlFuDRbVMCq\nMrzxglAUeOCc4sjOZxvSlCGa44xGUCvpiVvE9pVO0pNTXQGlNlC9DWnhO4VkOZf7\nrnx2a9u707g3GbyfjJfMmZIn3jZTpbOe+6JVgtgrxATa9g8aYKWVOe5HzJywgTkC\nktNWcqZr4Kx4lKTM4so4dbsZe3pwHBi+XeInUzwEhFcbcP3UUZXpRmzOX7vIkd6+\nVVR1WwaWwOAEUx872chHLqabrFMWeQLa64vUeG/A82ltFD03anzTBVr7dEkxczIS\n2Ljt5Bzc/5hhbev8s/0CCLhQy89EucnIs1Ow+Fwdc9Ue6Qn/OvVjw65hJRruxrUC\nAwEAAaM3MDUwFAYDVR0RBA0wC4IJbG9jYWxob3N0MB0GA1UdDgQWBBTorI9k0nHV\n1OA4uCA6/ciE3gdgRDANBgkqhkiG9w0BAQsFAAOCAgEAWYmTrc/h4kSv4W12JrTP\nmzMF5qGRUK5YBTxN656Pf9wENEMbZmWbCFu5e+Ewe+z3BCE/oZZFpHEx/mdT2ARO\nYbIXpw0THzrfRtwVRSlUDl7O2zt6DrT4AxIJzBLpf8TSWNSMIQwI/Hv1wewwClM9\nD7YM+S2WkwSu6jByXVfLCByjXsLLN25X16q+t7tnio6IMA2gB8XdnstMRzQmwQti\ndfki3mCSehuhDV6Ylj4Ln/JGkyJ648MzvGnl2J8l4FArR9E9Rzx0XEPbMiqZmYXq\njPtBErrO4thXkgqr3TjLuxqt3RjG7cmmlsGY3oMtscs8QeNDhhCxoyWpz7ECmCpE\n43DtUMfLLf77XbDUSvakPAJ2ZdWl44+JgOS7v7CxiV6DFzRi5ZXfmx+KdZ3YakkV\nB6pvGHmxqy2uNAk2WX632BFa0OvGnYFF68x8flYQUOXVgC3B5/xMxWfy5JLC/jwE\nDFvLMZinxLiJIx9Bbn1PCPXNIk7waDK2Y1YYVj61tKupFEh470C1Rra/C+RP08Be\n5zJi0OZumpfCa+Hz4UDv9Cm6tTrrG/xwpicQidfPzkSublgzzW0Zvx/s6C+5Rqzm\nPLZw6l33bEQstPcJSxUmSNlaNwsUJxEGGNnAwfQq6vPTp0YJZ+xo0BUOeFpt7tdC\npygM9OEmi1vuRknJr3vD+aM=\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "galaxy/ansible/files/nginx_sample.key",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIJKQIBAAKCAgEA1kSpfexOnDQvNDwSg/4Cjv13+41VF2RgJdpk0n1iBz92GKEl\n7SEh+nhUFinn+CKv2EaNQ7Nv5/+BNoPbBvS8Gm7ZtGt+cFXqRy4ka5It68sqbwZa\ndmAGwksJbvcizs5D6XS3BPIB2FrxvBbhzOj+oDYxC3HItIgYwV0+Gv/GBDCiF4+b\n9dO//gfR1ywqsvGczaaMBkbhuOZ2WZph9nFEcdHgNEzLn/HJsYZv0crrFjCLo7+F\nWsYIdM1wNP2bkpPzRFpB1ujxfl9xxH4pTc06sHIKivnoVMs5VvsqdWtosJi/s84A\nLuofPDKuGN2JaTA8e0MVnC2ZxOcDFUtR/WvN8rkUMcNnP8nKJGlFuDRbVMCqMrzx\nglAUeOCc4sjOZxvSlCGa44xGUCvpiVvE9pVO0pNTXQGlNlC9DWnhO4VkOZf7rnx2\na9u707g3GbyfjJfMmZIn3jZTpbOe+6JVgtgrxATa9g8aYKWVOe5HzJywgTkCktNW\ncqZr4Kx4lKTM4so4dbsZe3pwHBi+XeInUzwEhFcbcP3UUZXpRmzOX7vIkd6+VVR1\nWwaWwOAEUx872chHLqabrFMWeQLa64vUeG/A82ltFD03anzTBVr7dEkxczIS2Ljt\n5Bzc/5hhbev8s/0CCLhQy89EucnIs1Ow+Fwdc9Ue6Qn/OvVjw65hJRruxrUCAwEA\nAQKCAf8gSZuhrHbI4ElDmmH/c/j38/ceP1B3i0DRg5GbW2nGb424cjPYd6bVPqaP\nt1tmvLVh6wPD9j8wg8NMeFF9d/cqN0TS/+ogHMRcsqUmkuCGugjf9Pcm/6Rl9cq5\nAGReqc+25kmnDVaF8wA+VUtwvH+UQasGohDtLJdG47FVh2gTbEw1tAHaPlzCfnkA\nKvhbRi2ZBwx4GGkEjSuEVQ/xdiJP2KXG34ZxEDWi2Rcw9Jf7tHw4WiK3Tw2tW7sQ\nSlwSVzthUgEkPHJ19yD/gfDqKUXDg6Mn09YVVnNI6Lm5Jgw7DM4W/g0t60Dp+tTC\nZLIv24OihQO+7yloi6MSgj3dQvmudbrGIaUQ3WuSVIeRyzlEBQRC6TfsIMNEIH7E\nGogW+uopWWMhdnZkesGzYRIMkOpkOiFd4gZ6zw7++IOB7SaiR7JkJnTVmFip4oDf\ntFXxxOOtZDlTpRzfjZggNzPm7Kyh4KFdKD27OdHwxpkl14GALtELncFCWBq6zt8g\n32HtFhMKTkjCjZ+oAe4X3PAZ2vISsKk7sEAebumy6E1xUAfDrfcXNN0rBz2jaMro\nSsdUeOsaaOQ3sB5KMyR04GvBdHsCI0rCEWJKtjxzbRkRryAftxXp1pOzDZg69xZO\nCFrNW+FrRLu19k6ZNuoToq7p7oSqwMSzj3BScD2EW+QL1PpJAoIBAQDkwyjL8/cL\n1gG52hodMjPUBXEQbQW6GDMvmt0rHUcX7tbpvfvidZuF35qPujVFsRIa/gQSH6sB\nVvIrarsioaqkNaRLBl1EmnF2JpGH1SJWw5yOt7FUpC95sqwWk5vkXdOxL3XCzLVy\npIVbGKoy2bYe0lcYH804ggnTuGQYR7EkjSvv7/FpBZY2uDTzf3X2w5YzfaBQOuSF\nrzOMj7php61z/jA0POaVq6yr3mnZ0Hxk1aJfyX+a5mvKBGfChceIpcdzpNBuSpbS\nZNKF4YBNmZ8ESd0qjUnBmtPg4h0gpOXR3wa86xHiboMoYyhor6eCo1yS33qxv6B+\nXn1GzLGBkWJHAoIBAQDvx7YR91sAb/uwEHQB/nciXrynXEfjWC1fnDNiJMgF89Z9\ndUgEKPPeJNX7S8TrHNZryHBnLj4m8G3pWrIyJkTEvWZwr3jJKsl3x3ZTgS1LhkPZ\nKvXph6xlkpx+4BcDHFQX4B/mTOnsYZyIb0C71LMZ/vNE6J3WN5uVFtI7ShZDf6SJ\nGUKCBDckYXDaEAJ0WqK3b7Eu7wBhYjWq6EPTZxtudVMJkHvaLVJAPBUCgswtyE2o\nETXoTExG4388XrPCQ/qosuUNPDm9pSMjTiWtDz0VQg+clJNdjTu7EfhqD2zx/sHM\n1KVJvoZUowrL5UdErKRJGoRvtev9ZIJl48m8a3EjAoIBAQDdaUKoPEXFL+nVvxH9\nZiShtm6bTln3pwqLreEYpKq1sFZUP6x2oAvaA/Tt3XVIMbzrYSYBgKMbldKoURI0\nz7KAYubUMqG9D5p3l5bNmG02+vchbwt0d8D3kgZbh5yf6GxHFz9sPoP0JOZpqDK7\nKtrJdB4V3FndsobeY56FnYYHcZewEFVgp6ae6aVec+Rx5RYQWiv62zVpaoyDJG1p\nrUgFd2WiebtX66QhaRCcX9y2H8ub2EPoYdK74Y2nyaG5UXL9K+0Mgqb9ldXo/LwY\n33H4TaGBWOSlPTyLcW5ttQw3GBzGZuKVfQ723Ro0UKbZm1GzWhe/yFAHX17zUpUP\nae5rAoIBAQDQ7SMn5G/WobycXLm4QxFrUUDwUugQn/RpKqFbEtF23lA2YMqvVT+o\ngFAy9oJOmoH6yFuojBJ7u2MJwY0jRVUGWEG6TirgnfeN9q6TdCsTc5oKz/QV17HP\njz/tDTT/8N8VLqSc9secwDC0cLvm7h5guFUf5dAhp7JY5dmo75UWm1GyY+AfiazC\ndmTunKSG3bKKQzgPvRCHyhsZH+h5e43bYT9JRiukn3jbn35vAakG+1Eu8FAYaOLN\nocxrvdjDnJf8BmSuc5ucMxe624zYjj6bF0SjGpKNIVK6XZ4mS+qRsXkMEP00lF5X\nwPjXUKAYppU/XWuoKsvFrp4wSZquIrAhAoIBAQCWE7gx9/Ao7hT5fq0ho+TVV7Cp\n5gnDlNVASexjOrVS8OuQntrKNbYrvcgdCivyxVXxi3HFoydHPg9U4VchrADj9qPL\n9BH3/BCZJPttGYVIfPThJCnj6kM11BuOSweG/nEU03N36XsxR4748RiRf+MyL2kX\nKxMEwt+RZvH5beqMLrbLuQ8ey7QNJx/XK3wYhl90q8eJgEFHaKz/LBB/Fwerj5mW\nf+5jNCbDg+ey7gUmYh8pHqc1KlI8orvRmYP2m2OJFXIHVY+O0U5k/lKXBndM8TpQ\njzCf/N7e2g/7lkZKQhaMrfBsU1s9Ib2p7JuaOyeOKdN4qKURj8xtb3m6fymK\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "galaxy/ansible/files/production_b2drop.yml",
    "content": "- id: b2drop\n  version: 0\n  name: B2DROP\n  description: |\n    B2DROP is a Nextcloud to sync and share your research data.\n  variables:\n    username:\n      label: Username / Account Name\n      type: string\n      help: |\n        The username or account name to use to connect to B2DROP. This is not your email address but the name show in the URL of your profile page.\n    writable:\n      label: Writable?\n      type: boolean\n      default: false\n      help: Allow Galaxy to write data to B2DROP.\n  secrets:\n    password:\n      label: Password\n      help: |\n        The password to use to connect to B2DROP.\n  configuration:\n    type: webdav\n    url: 'https://b2drop.bsc.es'\n    root: '/remote.php/dav/files/{{ variables.username }}'\n    login: '{{ variables.username }}'\n    writable: '{{ variables.writable }}'\n    password: '{{ secrets.password }}'\n"
  },
  {
    "path": "galaxy/ansible/flower.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    flower_python_package_version: 1.2.0\n    flower_custom_logging: false\n    flower_conf_dir: \"{{ flower_conf_path | dirname }}\"\n    flower_ui_users: []\n  roles:\n    - role: usegalaxy_eu.flower\n  tasks:\n    - name: Add url prefix to flower config\n      lineinfile:\n        path: \"{{ flower_conf_path }}\"\n        line: 'url_prefix = \"{{ flower_url_prefix }}\"'\n"
  },
  {
    "path": "galaxy/ansible/galaxy_file_source_templates.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: Install fs.webdavfs for Galaxy's file source plugins\n      pip:\n        name: \"fs.webdavfs\"\n        extra_args: \"--index-url https://wheels.galaxyproject.org/simple/ --extra-index-url https://pypi.python.org/simple\"\n        virtualenv: \"{{ galaxy_venv_dir }}\"\n      environment:\n        PYTHOPATH: null\n        VIRTUAL_ENV: \"{{ galaxy_venv_dir }}\"\n      become_user: \"{{ galaxy_user_name }}\"\n\n    - name: \"Setup user configurable file source templates, also called BYOD\"\n      template: src=file_source_templates.yml.j2 dest={{ galaxy_file_source_templates_config_file }} owner={{ galaxy_user_name }} group={{ galaxy_user_name }}\n\n    - name: \"Copy B2Drop file source template\"\n      ansible.builtin.copy:\n        src: \"./files/production_b2drop.yml\"\n        dest: \"{{ galaxy_config_dir }}/production_b2drop.yml\"\n        owner: \"{{ galaxy_user_name }}\"\n        group: \"{{ galaxy_user_name }}\"\n        mode: '0644'\n\n"
  },
  {
    "path": "galaxy/ansible/galaxy_job_conf.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: \"Ensure dynamic handler assignment method is configured\"\n      lineinfile:\n        path: \"{{ galaxy_config_file }}\"\n        regexp: '^job_handler_assignment_method:'\n        line: 'job_handler_assignment_method: db-skip-locked'\n        insertafter: EOF\n        create: true\n        owner: \"{{ galaxy_user_name }}\"\n        group: \"{{ galaxy_user_name }}\"\n        mode: \"0644\"\n      when: galaxy_dynamic_handlers | bool\n\n    - name: \"Install Galaxy job conf\"\n      template: src=job_conf.xml.j2 dest={{ galaxy_job_conf_path }} owner={{ galaxy_user_name }} group={{ galaxy_user_name }}\n\n    - name: \"Install Galaxy container resolution configuration\"\n      template: src=container_resolvers_conf.yml.j2 dest={{ galaxy_container_resolvers_conf_path }} owner={{ galaxy_user_name }} group={{ galaxy_user_name }}\n      when: galaxy_container_resolution | bool\n"
  },
  {
    "path": "galaxy/ansible/galaxy_job_metrics.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: \"Setup job metrics\"\n      template: src=job_metrics_conf.yml.j2 dest={{ galaxy_job_metrics_conf_path }} owner={{ galaxy_user_name }} group={{ galaxy_user_name }}\n"
  },
  {
    "path": "galaxy/ansible/galaxy_object_store_templates.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: \"Setup user configurable object store templates, also called BYOS\"\n      template: src=object_store_templates.yml.j2 dest={{ galaxy_object_store_templates_config_file }} owner={{ galaxy_user_name }} group={{ galaxy_user_name }}\n"
  },
  {
    "path": "galaxy/ansible/galaxy_scripts.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: \"Install galaxy user creation script.\"\n      template: src=create_galaxy_user.py.j2 dest=/usr/local/bin/create_galaxy_user.py mode=a+x\n\n    - name: \"Install galaxy check database script.\"\n      template: src=check_database.py.j2 dest=/usr/local/bin/check_database.py mode=a+x\n\n    - name: \"Install export user files script.\"\n      template: src=export_user_files.py.j2 dest=/usr/local/bin/export_user_files.py mode=a+x\n\n    - name: \"Install add_tool_shed script.\"\n      template: src=add_tool_shed.py.j2 dest=/usr/local/bin/add-tool-shed mode=a+x\n      \n    - name: \"Install startup lite script.\"\n      template: src=startup_lite.sh.j2 dest=/usr/bin/startup_lite mode=a+x\n\n    - name: \"Install cgroupfs_mount.sh for startup script.\"\n      template: src=cgroupfs_mount.sh.j2 dest=/root/cgroupfs_mount.sh mode=a+x\n\n    - name: \"Install update_yaml_value script.\"\n      template: src=update_yaml_value.py.j2 dest=/usr/local/bin/update_yaml_value mode=a+x\n"
  },
  {
    "path": "galaxy/ansible/galaxy_vault_config.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n# You should change this key in production. You can generate Fernet keys with:\n#from cryptography.fernet import Fernet\n#Fernet.generate_key().decode('utf-8')\n  vars:\n    galaxy_vault_encryption_keys:\n      - pwiL08wXlpkBm-_Dr75aw1_uOPVA3HET1y7xrpynhKU=\n  tasks:\n    - name: \"Configure Galaxy vault\"\n      template: src=vault_conf.yml.j2 dest={{ galaxy_vault_config_file }} owner={{ galaxy_user_name }} group={{ galaxy_user_name }}\n"
  },
  {
    "path": "galaxy/ansible/gravity.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: \"Install gravity for galaxy\"\n      pip: \n        name: gravity\n        version: 1.0.6\n        virtualenv: \"{{ galaxy_venv_dir }}\"\n        virtualenv_command: \"{{ pip_virtualenv_command | default( 'virtualenv' ) }}\"\n        extra_args: \"--index-url https://wheels.galaxyproject.org/ --extra-index-url https://pypi.python.org/simple\"\n      become: True\n      become_user: \"{{ galaxy_user_name }}\"\n\n    - name: Deploy galaxyctl wrapper script\n      copy:\n        content: |\n          #!/usr/bin/env sh\n          export GRAVITY_CONFIG_FILE={{ gravity_config_file }}\n          export GRAVITY_STATE_DIR={{ gravity_state_dir }}\n          exec sudo -E -H -u $GALAXY_USER {{ galaxy_venv_dir }}/bin/galaxyctl \"$@\"\n        dest: \"/usr/local/bin/galaxyctl\"\n        mode: \"0755\"\n      become: True\n      become_user: root\n\n    - name: \"Install Gravity conf\"\n      template: src=gravity.yml.j2 dest={{ gravity_config_file }} owner={{ galaxy_user_name }} group={{ galaxy_user_name }}\n"
  },
  {
    "path": "galaxy/ansible/group_vars/all.yml",
    "content": "use_pbkdf2: true\npostgresql_version: 15\ngalaxy_apt_package_state: present\n\n# The storage backend to use for docker-in-docker.\n# overlay2 on parent docker cannot be combined with overlay2 in child docker\ndocker_storage_backend: overlay2\ndocker_legacy: false\n\ngalaxy_nginx: true\ngalaxy_postgres: true\ngalaxy_proftpd: true\ngalaxy_slurm: true\ngalaxy_condor: true\ngalaxy_pbs: false\ngalaxy_k8s_jobs: false\ngalaxy_supervisor: true\ngalaxy_job_metrics: true\ngalaxy_file_source_templates: true\ngalaxy_object_store_templates: true\ngalaxy_vault_config: true\ngalaxy_scripts: true\ngalaxy_domain: \"localhost\"  # This is used by letsencrypt and Interactive Tools, set it to the domain name under which galaxy can be reached\ngalaxy_startup: true\ngalaxy_rabbitmq: true\ngalaxy_redis: true\ngalaxy_flower: true\ngalaxy_tusd: true\ngalaxy_cvmfs_client: true\ngalaxy_job_conf: true\ngalaxy_gravity: true\ngalaxy_docker: true\n\ngalaxy_db_port: \"5432\"\ngalaxy_database_connection: \"postgres://{{ galaxy_user_name }}@localhost:{{ galaxy_db_port }}/galaxy\"\n\n# Default destination for Galaxy jobs in generated job_conf.xml - can\n# tweak this to allow for a different default for Docker-enabled tools.\ngalaxy_destination_default: slurm_cluster\ngalaxy_destination_docker_default: \"{{ galaxy_destination_default }}\"\ngalaxy_destination_singularity_default: \"{{ galaxy_destination_default }}\"\n\n# set the FQDN for the pbs server, only used when galaxy_pbs: true\npbs_server_name: pbsqueue\n\n# Only used when galaxy_slurm: true, sets slurm ntask in job_conf.xml.\n# Will be overwritten if NATIVE_SPEC environmental variable is set.\n# In the default setting controls the value of GALAXY_SLOTS.\n# Use ansible_processor_cores: \"{{ ansible_processor_vcpus  }}\" to set this to the number of\n# threads per core * processor count * cores per processor\ngalaxy_slurm_ntask: 1\n\ngalaxy_gcc_available: false\n\n# Follow job_conf attributes set if galaxy_k8s_jobs is true.\ngalaxy_k8s_jobs_use_service_account: true\ngalaxy_k8s_jobs_persistent_volume_claims: galaxy-web-claim0:/export\ngalaxy_k8s_jobs_namespace: default\ngalaxy_k8s_jobs_supplemental_group_id: 0\ngalaxy_k8s_jobs_fs_group_id: 0\ngalaxy_k8s_jobs_pull_policy: IfNotPresent\n\n# Point at the existing Galaxy configuration.\ngalaxy_server_dir: \"/galaxy\"\ngalaxy_config_dir: \"{{ galaxy_server_dir }}/config\"\ngalaxy_job_conf_path: \"{{ galaxy_config_dir }}/job_conf.xml\"\ngalaxy_container_resolvers_conf_path: \"{{ galaxy_config_dir }}/container_resolvers_conf.yml\"\ngalaxy_job_metrics_conf_path: \"{{ galaxy_config_dir }}/job_metrics_conf.yml\"\ngalaxy_file_source_templates_config_file: \"{{ galaxy_config_dir }}/file_source_templates.yml\"\ngalaxy_object_store_templates_config_file: \"{{ galaxy_config_dir }}/object_store_templates.yml\"\ngalaxy_vault_config_file: \"{{ galaxy_config_dir }}/vault_conf.yml\"\ngalaxy_user_name: \"galaxy\"\ngalaxy_home_dir: \"/home/{{ galaxy_user_name }}\"\ngalaxy_source_shellrc: false\ngalaxy_user_shellrc: \"{{ galaxy_home_dir }}/.bashrc\"\ngalaxy_logs_dir: \"{{ galaxy_home_dir }}/\"\ngalaxy_venv_dir: \"/galaxy_venv\"\n\ngalaxy_config_file: \"{{ galaxy_config_dir }}/galaxy.yml\"\ngalaxy_toolshed_config_file: \"{{ galaxy_config_dir }}/tool_shed.yml\"\ngalaxy_tool_data_table_config_file: \"{{ galaxy_config_dir }}/tool_data_table_conf.xml\"\n\ngalaxy_toolshed_port: \"9009\"\n\n# Docker defaults\ngalaxy_docker_enabled: false\ngalaxy_docker_sudo: false\ngalaxy_docker_default_image: 'busybox:ubuntu-14.04'\ngalaxy_docker_volumes_from: \"\"\ngalaxy_docker_volumes : \"$defaults\"\ngalaxy_docker_net: \"bridge\"\ngalaxy_docker_auto_rm: true\ngalaxy_docker_set_user: \"\"\n\n# Singularity defaults\ngalaxy_singularity_enabled: false\ngalaxy_singularity_sudo: false\n# ToDo create default image\ngalaxy_singularity_default_image: ''\ngalaxy_singularity_volumes_from: \"\"\n# rw directories are not considered if the parent is ro\ngalaxy_singularity_volumes : \"$defaults{{ ',/cvmfs:/cvmfs' if galaxy_cvmfs_client | bool else '' }}\"\n\ngalaxy_container_resolution: true\ncontainer_resolution_explicit: true\ncontainer_resolution_mulled: true\ncontainer_resolution_cached_mulled: \"{{ container_resolution_mulled }}\"\ncontainer_resolution_build_mulled: \"{{ container_resolution_mulled }}\"\ncontainer_resolution_mulled_namespace: biocontainers\n\n# Gravity configuration.\ngravity_config_file: \"{{ galaxy_config_dir }}/gravity.yml\"\ngravity_state_dir: \"{{ galaxy_server_dir }}/database/gravity\"\ngravity_process_manager: \"supervisor\"\ngravity_manage_celery: true\ngravity_manage_tusd: true\ngravity_manage_gx_it_proxy: true\n\n# Gunicorn configuration.\ngalaxy_gunicorn: true\ngunicorn_port: \"4001\"\ngunicorn_workers: 2\n\n# Handler configuration.\ngalaxy_dynamic_handlers: true\ngalaxy_handler_processes: 2\n\n# Celery configuration.\ngalaxy_celery: true\ngalaxy_celery_beat: true\ncelery_workers: 2\n\n# gx_it_proxy configuration.\ngalaxy_gx_it_proxy: true\ngx_it_proxy_port: \"4002\"\ngx_it_proxy_version: '>=0.0.6'\ngx_it_proxy_sessions_path: \"{{ galaxy_server_dir }}/database/interactivetools_map.sqlite\"\n\n# Tusd configuration.\ntusd_port: \"1080\"\ntusd_path: \"/usr/local/sbin/tusd\"\ntus_upload_store_path: \"/tmp/tus_upload_store\"\ntusd_base_path: \"{{ nginx_tusd_location }}\"\n\ngalaxy_job_metrics_core: true\ngalaxy_job_metrics_env: false\ngalaxy_job_metrics_cpuinfo: true\ngalaxy_job_metrics_meminfo: true\ngalaxy_job_metrics_uname: true\ngalaxy_job_metrics_hostname: false\ngalaxy_job_metrics_cgroup: false\n# TODO: configure collectl, individual env files\n# TODO: alternative to configure metrics all at once using yml datastructure.\n\ngalaxy_it_fetch_jupyter: false\ngalaxy_it_jupyter_image: quay.io/bgruening/docker-jupyter-notebook:2021-03-05\ngalaxy_it_fetch_rstudio: false\ngalaxy_it_rstudio_image: quay.io/galaxy/docker-rstudio-notebook:23.1\ngalaxy_it_fetch_ethercalc: false\ngalaxy_it_ethercalc_image: shiltemann/ethercalc-galaxy-ie:17.05\ngalaxy_it_fetch_phinch: false\ngalaxy_it_phinch_image: shiltemann/docker-phinch-galaxy:16.04\ngalaxy_it_fetch_neo: false\ngalaxy_it_neo_image: quay.io/sanbi-sa/neo_ie:3.1.9\n\n# Nginx configuration.\nnginx_conf_dir: /etc/nginx\nnginx_conf_file: \"{{ nginx_conf_dir }}/nginx.conf\"\n\n# Use nginx_*_location variables to control serving apps at subdirectories.\n# If galaxy should be served at subdirectory (e.g. example.com/galaxy) set nginx_galaxy_location: /galaxy\n# If all apps should be served on a common subdirectory, use nginx_prefix_location: /your_common_dir\nnginx_prefix_location: \"\"\nnginx_galaxy_location: \"{{ nginx_prefix_location }}\"\nnginx_rabbitmq_management_location: \"{{ nginx_prefix_location }}/rabbitmq\"\nnginx_flower_location: \"{{ nginx_prefix_location }}/flower\"\nnginx_tusd_location: \"{{ nginx_prefix_location }}/api/upload/resumable_upload\"\nnginx_planemo_web_location: \"{{ nginx_prefix_location }}/planemo\"\nnginx_ide_location: \"{{ nginx_prefix_location }}/ide\"\nnginx_welcome_location: \"{{ nginx_prefix_location }}/etc/galaxy/web\"\nnginx_welcome_path: \"/etc/galaxy/web\"\n\n# Synchronize error handling with ansible-galaxy role.\ngalaxy_errordocs_dir: \"/var/www/galaxy_errordocs\"\n\n#web security\nnginx_use_passwords: false\nnginx_htpasswds:\n  - \"admin:WiBKbsJTSQ8dc\"\nnginx_use_remote_header: true\n\n# Additional configurations to be appended to nginx config\nnginx_additional_config : []\n\nnginx_proxy_gunicorn: true\nnginx_proxy_rabbitmq_management: true\nnginx_proxy_flower: true\nnginx_proxy_interactive_tools: true\n\n# Certbot Configuration.\ncertbot_auto_renew_hour: \"{{ 23 |random(seed=inventory_hostname)  }}\"\ncertbot_auto_renew_minute: \"{{ 59 |random(seed=inventory_hostname)  }}\"\ncertbot_auth_method: --webroot\ncertbot_install_method: virtualenv\ncertbot_auto_renew: yes\ncertbot_auto_renew_user: root\ncertbot_environment: production\ncertbot_well_known_root: \"{{ nginx_conf_dir }}/_well-known_root\"\ncertbot_share_key_users:\n  - \"{{ galaxy_user_name }}\"\ncertbot_post_renewal: |\n    supervisorctl restart nginx || true\ncertbot_agree_tos: --agree-tos\n\n## Proftp Configuration.\nproftpd_conf_path:  /etc/proftpd/proftpd.conf\nproftpd_sql_db: galaxy@galaxy\nproftpd_sql_user: galaxy\nproftpd_sql_password: galaxy\nproftpd_welcome: \"Public Galaxy FTP\"\ngalaxy_ftp_upload_dir: /export/ftp\nproftpd_ftp_port: 21\nproftpd_passive_port_low: 30000\nproftpd_passive_port_high: 40000\nproftpd_sftp_port: 22\n# Set masquearade to true if host is NAT'ed.\nproftpd_nat_masquerade: false\n# proftpd_masquerade_address refers to the ip that clients use to establish an ftp connection.\n# Can be a command that returns an IP or an IP address and applies only if proftpd_nat_masquerade is true.\n# ec2metadata --public-ipv4 returns the public ip for amazon's ec2 service.\nproftpd_masquerade_address: \"`ec2metadata --public-ipv4`\"\n\n## RabbitMQ Configuration.\nrabbitmq_port: \"5672\"\nrabbitmq_management_port: \"15672\"\nrabbitmq_admin_username: admin\nrabbitmq_admin_password: admin\nrabbitmq_galaxy_vhost: galaxy\nrabbitmq_galaxy_username: galaxy\nrabbitmq_galaxy_password: galaxy\nrabbitmq_flower_username: flower\nrabbitmq_flower_password: flower\ngalaxy_amqp_internal_connection: \"pyamqp://{{ rabbitmq_galaxy_username }}:{{ rabbitmq_galaxy_password }}@localhost:{{ rabbitmq_port }}/{{ rabbitmq_galaxy_vhost }}\"\n\n## Flower Configuration.\nflower_conf_path: /etc/flower/flowerconfig.py\nflower_bind_address: 0.0.0.0\nflower_port: \"5555\"\nflower_broker_api: \"http://{{ rabbitmq_flower_username }}:{{ rabbitmq_flower_password }}@localhost:{{ rabbitmq_management_port }}/api/\"  # URL of broker (RabbitMQ Management) API\nflower_broker_url: \"amqp://{{ rabbitmq_flower_username }}:{{ rabbitmq_flower_password }}@localhost:{{ rabbitmq_port }}/{{ rabbitmq_galaxy_vhost }}\"  # AMQP URL for Flower to connect to broker (RabbitMQ)\nflower_persistent: true\nflower_db_file: \"{{ galaxy_server_dir }}/database/flower.db\"\nflower_app_name: galaxy.celery\nflower_log: \"{{ galaxy_logs_dir }}/flower.log\"\nflower_url_prefix: \"{{ nginx_flower_location }}\"\nflower_venv_dir: \"{{ galaxy_venv_dir }}\"\nflower_user: \"{{ galaxy_user_name }}\"\nflower_group: \"{{ galaxy_user_name }}\"\nflower_venv_user: \"{{ galaxy_user_name }}\"\nflower_venv_group: \"{{ galaxy_user_name }}\"\n\n## Supervisor Configuration.\nsupervisor_conf_path: \"/etc/supervisor/conf.d/galaxy.conf\"\nsupervisor_webserver: true\nsupervisor_webserver_port: \"0.0.0.0:9002\"\nsupervisor_webserver_username: null\nsupervisor_webserver_password: changeme\n\nsupervisor_manage_cron: true\nsupervisor_manage_autofs: true\nsupervisor_manage_slurm: false\nsupervisor_manage_condor: true\nsupervisor_manage_postgres: true\nsupervisor_manage_proftp: true\nsupervisor_manage_nginx: true\nsupervisor_manage_toolshed: false\nsupervisor_manage_docker: true\nsupervisor_manage_rabbitmq: true\nsupervisor_manage_redis: true\nsupervisor_manage_flower: true\n\nsupervisor_cron_autostart: false\nsupervisor_autofs_autostart: true\nsupervisor_slurm_autostart: true\nsupervisor_condor_autostart: false\nsupervisor_postgres_autostart: false\nsupervisor_proftpd_autostart: false\nsupervisor_docker_autostart: false\nsupervisor_docker_autorestart: true\nsupervisor_rabbitmq_autostart: false\nsupervisor_redis_autostart: false\nsupervisor_flower_autostart: false\n\nsupervisor_slurm_config_dir: \"/home/galaxy\"\nsupervisor_postgres_config_path: \"/etc/postgresql/{{ postgresql_version }}/main/postgresql.conf\"\nsupervisor_postgres_database_path: \"/export/postgresql/{{ postgresql_version }}/main\"\nsupervisor_postgres_options: \"-D {{ supervisor_postgres_database_path }} -c \\\"config_file={{ supervisor_postgres_config_path }}\\\"\"\n\nsupervisor_galaxy_startsecs: 20\n# had to increase retries to ensure the postgres database is available,\n# wasn't needed in the past.\nsupervisor_galaxy_startretries: 15\n\n## CVMFS Configuration.\ncvmfs_config_repo:\n  domain: galaxyproject.org\n  key:\n    path: /etc/cvmfs/keys/galaxyproject.org/cvmfs-config.galaxyproject.org.pub\n    key: |\n      -----BEGIN PUBLIC KEY-----\n      MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuJZTWTY3/dBfspFKifv8\n      TWuuT2Zzoo1cAskKpKu5gsUAyDFbZfYBEy91qbLPC3TuUm2zdPNsjCQbbq1Liufk\n      uNPZJ8Ubn5PR6kndwrdD13NVHZpXVml1+ooTSF5CL3x/KUkYiyRz94sAr9trVoSx\n      THW2buV7ADUYivX7ofCvBu5T6YngbPZNIxDB4mh7cEal/UDtxV683A/5RL4wIYvt\n      S5SVemmu6Yb8GkGwLGmMVLYXutuaHdMFyKzWm+qFlG5JRz4okUWERvtJ2QAJPOzL\n      mAG1ceyBFowj/r3iJTa+Jcif2uAmZxg+cHkZG5KzATykF82UH1ojUzREMMDcPJi2\n      dQIDAQAB\n      -----END PUBLIC KEY-----\n  urls:\n    - http://cvmfs1-psu0.galaxyproject.org/cvmfs/@fqrn@\n    - http://cvmfs1-iu0.galaxyproject.org/cvmfs/@fqrn@\n    - http://cvmfs1-tacc0.galaxyproject.org/cvmfs/@fqrn@\n    - http://cvmfs1-ufr0.galaxyproject.eu/cvmfs/@fqrn@\n    - http://cvmfs1-mel0.gvl.org.au/cvmfs/@fqrn@\n  repository:\n    repository: cvmfs-config.galaxyproject.org\n    stratum0: cvmfs0-psu0.galaxyproject.org\n    owner: \"root\"\n    server_options: []\n    client_options: []\n\ncvmfs_keys:\n  - path: /etc/cvmfs/keys/galaxyproject.org/cvmfs-config.galaxyproject.org.pub\n    key: |\n      -----BEGIN PUBLIC KEY-----\n      MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuJZTWTY3/dBfspFKifv8\n      TWuuT2Zzoo1cAskKpKu5gsUAyDFbZfYBEy91qbLPC3TuUm2zdPNsjCQbbq1Liufk\n      uNPZJ8Ubn5PR6kndwrdD13NVHZpXVml1+ooTSF5CL3x/KUkYiyRz94sAr9trVoSx\n      THW2buV7ADUYivX7ofCvBu5T6YngbPZNIxDB4mh7cEal/UDtxV683A/5RL4wIYvt\n      S5SVemmu6Yb8GkGwLGmMVLYXutuaHdMFyKzWm+qFlG5JRz4okUWERvtJ2QAJPOzL\n      mAG1ceyBFowj/r3iJTa+Jcif2uAmZxg+cHkZG5KzATykF82UH1ojUzREMMDcPJi2\n      dQIDAQAB\n      -----END PUBLIC KEY-----\n  - path: /etc/cvmfs/keys/galaxyproject.org/data.galaxyproject.org.pub\n    key: |\n      -----BEGIN PUBLIC KEY-----\n      MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5LHQuKWzcX5iBbCGsXGt\n      6CRi9+a9cKZG4UlX/lJukEJ+3dSxVDWJs88PSdLk+E25494oU56hB8YeVq+W8AQE\n      3LWx2K2ruRjEAI2o8sRgs/IbafjZ7cBuERzqj3Tn5qUIBFoKUMWMSIiWTQe2Sfnj\n      GzfDoswr5TTk7aH/FIXUjLnLGGCOzPtUC244IhHARzu86bWYxQJUw0/kZl5wVGcH\n      maSgr39h1xPst0Vx1keJ95AH0wqxPbCcyBGtF1L6HQlLidmoIDqcCQpLsGJJEoOs\n      NVNhhcb66OJHah5ppI1N3cZehdaKyr1XcF9eedwLFTvuiwTn6qMmttT/tHX7rcxT\n      owIDAQAB\n      -----END PUBLIC KEY-----\n"
  },
  {
    "path": "galaxy/ansible/k8s.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: \"Install secure urllib3 for galaxy - better SSL verification with pykube\"\n      # See also https://github.com/kelproject/pykube/issues/29\n      pip: name=urllib3[secure] virtualenv={{ galaxy_venv_dir }} virtualenv_command=\"{{ pip_virtualenv_command | default( 'virtualenv' ) }}\"\n      when: galaxy_gcc_available | bool\n      become: True\n      become_user: \"{{ galaxy_user_name }}\"\n\n    - name: \"Install ipaddress for galaxy - better SSL verification with pykube\"\n      # See also https://github.com/kelproject/pykube/issues/29\n      pip: name=ipaddress virtualenv={{ galaxy_venv_dir }} virtualenv_command=\"{{ pip_virtualenv_command | default( 'virtualenv' ) }}\"\n      become: True\n      become_user: \"{{ galaxy_user_name }}\"\n\n    - name: \"Install pykube for galaxy\"\n      pip: name=pykube-ng version=\"21.3.0\" virtualenv={{ galaxy_venv_dir }} virtualenv_command=\"{{ pip_virtualenv_command | default( 'virtualenv' ) }}\"\n      become: True\n      become_user: \"{{ galaxy_user_name }}\"\n"
  },
  {
    "path": "galaxy/ansible/nginx.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    # Default container config: avoid DH param generation and OCSP stapling\n    # errors in ephemeral/self-signed setups. Override in production.\n    nginx_conf_ssl_protocols:\n      - TLSv1.2\n      - TLSv1.3\n    nginx_conf_ssl_dhparam: false\n    nginx_conf_ssl_stapling: \"off\"\n    nginx_conf_ssl_stapling_verify: \"off\"\n    nginx_conf_http:\n      client_max_body_size: 50g\n      proxy_buffers: 8 16k\n      proxy_buffer_size: 16k\n      underscores_in_headers: \"on\"\n      keepalive_timeout: 600\n      proxy_read_timeout: 300\n      server_names_hash_bucket_size: 128\n      # gzip: \"on\" # This is enabled by default in Ubuntu, and the duplicate directive will cause a crash.\n      gzip_proxied: \"any\"\n      gzip_static: \"on\"\n      gzip_vary: \"on\"\n      gzip_min_length: 128\n      gzip_comp_level: 6\n      gzip_types: |\n        text/plain\n        text/css\n        text/xml\n        text/javascript\n        application/javascript\n        application/x-javascript\n        application/json\n        application/xml\n        application/xml+rss\n        application/xhtml+xml\n        application/x-font-ttf\n        application/x-font-opentype\n        image/png\n        image/svg+xml\n        image/x-icon\n    nginx_extra_configs:\n      - galaxy_common.conf\n      - interactive_tools_common.conf\n      - flower_auth.conf\n      - delegated_uploads.conf\n    nginx_servers:\n      - galaxy_http\n      - interactive_tools_http\n    galaxy_errordocs:\n      - { code: '502', src: '502.html' }\n      - { code: '413', src: '413.html' }\n      - { code: '500', src: '500.html' }\n  \n  # this is required as we re-run this playbook during startup (while setting up ssl or proxy prefix)\n  pre_tasks:\n    - name: Ensure 'daemon off' is absent from nginx.conf\n      lineinfile:\n        path: '{{ nginx_conf_file }}'\n        regexp: '^(\\s*daemon\\s+off\\s*;)'\n        state: absent\n\n    - name: Ensure nginx sites-available and sites-enabled directories are empty and recreated\n      file:\n        state: \"{{ item.state }}\"\n        path: \"{{ nginx_conf_dir }}/{{ item.dir }}\"\n        owner: root\n        group: root\n        mode: '0755'\n      with_items:\n        - { dir: 'sites-available', state: 'absent' }\n        - { dir: 'sites-available', state: 'directory' }\n        - { dir: 'sites-enabled', state: 'absent' }\n        - { dir: 'sites-enabled', state: 'directory' }\n  \n  roles:\n    - role: galaxyproject.nginx\n\n  tasks:\n    - name: Ensure nginx is run by galaxy user\n      lineinfile:\n        path: '{{ nginx_conf_file }}'\n        regexp: '^(\\s*user\\s+.*)'\n        line: 'user {{ galaxy_user_name }};'\n        state: present\n  \n    - name: Place htpasswd file\n      template:\n        src: \"nginx/htpasswd.j2\"\n        dest: \"{{ nginx_conf_dir }}/htpasswd\"\n        owner: \"{{ galaxy_user_name }}\"\n        group: \"{{ galaxy_user_name }}\"\n        mode: \"0600\"\n    \n    - name: Create directories for error docs\n      file:\n        path: \"{{ galaxy_errordocs_dir }}/{{ item.code }}\"\n        state: directory\n      loop: \"{{ galaxy_errordocs }}\"\n\n    - name: Copy error docs into their respective directories\n      copy:\n        src: \"{{ item.src }}\"\n        dest: \"{{ galaxy_errordocs_dir }}/{{ item.code }}/index.html\"\n        mode: '0755'\n      loop: \"{{ galaxy_errordocs }}\"\n\n  post_tasks:\n    - name: Ensure 'daemon off' in nginx.conf as we use supervisor to manage nginx\n      lineinfile:\n        path: '{{ nginx_conf_file }}'\n        regexp: '^(\\s*daemon\\s+.*)'\n        line: 'daemon off;'\n        state: present\n    \n    - name: Stop and disable nginx.\n      service: name=nginx state=stopped enabled=no\n"
  },
  {
    "path": "galaxy/ansible/pbs.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: Install PBS/torque system packages\n      apt: \n        state: \"{{ galaxy_apt_package_state }}\"\n        name: \"{{ packages }}\"\n      vars:\n        packages:\n        - torque-client\n        - pbs-drmaa-dev\n\n    # If job_conf.xml is installed before running galaxyprojectdotorg.galaxy, this would already be installed.\n    - name: Fetch DRMAA wheel for Galaxy\n      pip:\n        name: \"drmaa\"\n        extra_args: \"--index-url https://wheels.galaxyproject.org/simple/ --extra-index-url https://pypi.python.org/simple\"\n        virtualenv: \"{{ galaxy_venv_dir }}\"\n      environment:\n        PYTHOPATH: null\n        VIRTUAL_ENV: \"{{ galaxy_venv_dir }}\"\n      become_user: \"{{ galaxy_user_name }}\"\n\n    - name: \"Set PBS/torque server name\"\n      lineinfile: dest=/etc/torque/server_name line={{ pbs_server_name }} state=present create=yes\n"
  },
  {
    "path": "galaxy/ansible/postgresql.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    postgresql_backup_local_dir: /export/postgresql_backup/\n    postgresql_version: 15\n    postgresql_flavor: pgdg\n    postgresql_conf:\n      listen_addresses: \"'*'\"\n      hba_file: \"'/etc/postgresql/{{ postgresql_version }}/main/pg_hba.conf'\"\n      ident_file: \"'/etc/postgresql/{{ postgresql_version }}/main/pg_ident.conf'\"\n    postgresql_pg_hba_conf:\n      - host all all 0.0.0.0/0 md5\n  roles:\n    - role: galaxyproject.postgresql\n"
  },
  {
    "path": "galaxy/ansible/proftpd.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    proftpd_galaxy_auth: yes\n    galaxy_user:\n      name: \"{{ galaxy_user_name }}\"\n    proftpd_galaxy_modules:\n      - mod_sql.c\n      - mod_sql_passwd.c\n      - mod_sql_postgres.c\n      - mod_sftp.c\n      - mod_sftp_pam.c\n      - mod_sftp_sql.c\n    proftpd_create_ftp_upload_dir: yes\n    proftpd_options:\n      - User: \"{{ galaxy_user_name }}\"\n      - Group: \"{{ galaxy_user_name }}\"\n    proftpd_global_options:\n      - PassivePorts: \"{{ proftpd_passive_port_low }} {{ proftpd_passive_port_high }}\"\n    proftpd_display_connect: \"{{ proftpd_welcome }}\"\n    base_ssh_host_keys_dir: /etc/proftpd/ssh_host_keys\n    proftpd_virtualhosts:\n      - id: sftp\n        address: 0.0.0.0\n        options:\n          - Port: \"{{ proftpd_sftp_port}}\"\n          - SFTPEngine: on\n          - SFTPPAMEngine: off\n          - CreateHome: on dirmode 700\n          - SFTPHostKey: \"{{ base_ssh_host_keys_dir }}/rsa\"\n          - SFTPHostKey: \"{{ base_ssh_host_keys_dir }}/dsa\"\n          - SFTPCompression: delayed\n          - SQLEngine: on\n          - SQLPasswordEngine: on\n          - SQLLogFile: /var/log/proftpd/sql.log\n          - SQLBackend: postgres\n          - SQLAuthenticate: users\n          - SQLConnectInfo: \"{{ proftpd_sql_db }} {{ proftpd_sql_user }} {{ proftpd_sql_password }}\"\n          - SQLAuthTypes: PBKDF2 SHA1\n          - SQLPasswordPBKDF2: sql:/GetPBKDF2Params\n          - SQLPasswordEncoding: base64\n          - SQLUserInfo: custom:/LookupGalaxyUser\n          - SQLPasswordUserSalt: sql:/GetUserSalt\n          - SQLNamedQuery: GetPBKDF2Params  SELECT \"(CASE WHEN split_part(password, '$', 1) = 'PBKDF2' THEN UPPER(split_part(password, '$', 2)) ELSE 'SHA256' END), (CASE WHEN split_part(password, '$', 1) = 'PBKDF2' THEN split_part(password, '$', 3) ELSE '10000' END), 24 FROM galaxy_user WHERE email='%U'\"\n          - SQLNamedQuery: GetUserSalt      SELECT \"(CASE WHEN split_part(password, '$', 1) = 'PBKDF2' THEN split_part(password, '$', 4) END) FROM galaxy_user WHERE email='%U'\"\n          - SQLNamedQuery: LookupGalaxyUser SELECT \"email, (CASE WHEN split_part(password, '$', 1) = 'PBKDF2' THEN split_part(password, '$', 5) ELSE encode(decode(password, 'hex'), 'base64') END),'{{ galaxy_user_name }}','{{ galaxy_user_name }}','{{ galaxy_ftp_upload_dir }}/%U','/bin/bash' FROM galaxy_user WHERE email='%U'\"\n\n  # Required for sftp server\n  pre_tasks:\n    - name: Install ProFTPD module packages\n      apt:\n        name:\n          - proftpd-mod-crypto\n          - proftpd-mod-pgsql\n        state: present\n        update_cache: true\n\n    - name: Install OpenSSH client package\n      apt: pkg=openssh-client\n\n    - name: Create ssh host keys directory\n      file: path=\"{{ base_ssh_host_keys_dir }}\" state=directory\n\n    - name: Generate new SSH keys (rsa)\n      shell: ssh-keygen -b 2048 -t rsa -f \"{{ base_ssh_host_keys_dir }}/rsa\" -N \"\"\n      args:\n        creates: \"{{ base_ssh_host_keys_dir }}/rsa\"\n\n    - name: Generate new SSH keys (dsa)\n      shell: ssh-keygen -b 1024 -t dsa -f \"{{ base_ssh_host_keys_dir }}/dsa\" -N \"\"\n      args:\n        creates: \"{{ base_ssh_host_keys_dir }}/dsa\"\n\n  roles:\n    - role: galaxyproject.proftpd\n"
  },
  {
    "path": "galaxy/ansible/provision.yml",
    "content": "---\n- import_playbook: gravity.yml\n  when: galaxy_gravity | bool\n  tags: galaxy_gravity\n\n- import_playbook: postgresql.yml\n  when: galaxy_postgres | bool\n  tags: galaxy_postgres\n\n- import_playbook: nginx.yml\n  when: galaxy_nginx | bool\n  tags: galaxy_nginx\n\n- import_playbook: proftpd.yml\n  when: galaxy_proftpd | bool\n  tags: galaxy_proftpd\n\n- import_playbook: slurm.yml\n  when: galaxy_slurm | bool\n  tags: galaxy_slurm\n\n- import_playbook: condor.yml\n  when: galaxy_condor | bool\n  tags: galaxy_condor\n\n- import_playbook: pbs.yml\n  when: galaxy_pbs | bool\n  tags: galaxy_pbs\n\n- import_playbook: k8s.yml\n  when: galaxy_k8s_jobs | bool\n  tags: galaxy_k8s_jobs\n\n- import_playbook: cvmfs_client.yml\n  when: galaxy_cvmfs_client | bool\n  tags: galaxy_cvmfs_client\n\n- import_playbook: rabbitmq.yml\n  when: galaxy_rabbitmq | bool\n  tags: galaxy_rabbitmq\n\n- import_playbook: redis.yml\n  when: galaxy_redis | bool\n  tags: galaxy_redis\n\n# - import_playbook: flower.yml\n#   when: galaxy_flower | bool\n#   tags: galaxy_flower\n\n- import_playbook: tusd.yml\n  when: galaxy_tusd | bool\n  tags: galaxy_tusd\n\n- import_playbook: docker.yml\n  when: galaxy_docker | bool\n  tags: galaxy_docker\n\n- import_playbook: supervisor.yml\n  when: galaxy_supervisor | bool\n  tags: galaxy_supervisor\n\n- import_playbook: galaxy_scripts.yml\n  when: galaxy_scripts | bool\n  tags: galaxy_scripts\n\n- import_playbook: galaxy_job_conf.yml\n  when: galaxy_job_conf | bool\n  tags: galaxy_job_conf\n\n- import_playbook: galaxy_job_metrics.yml\n  when: galaxy_job_metrics | bool\n  tags: galaxy_job_metrics\n\n- import_playbook: galaxy_file_source_templates.yml\n  when: galaxy_file_source_templates | bool\n  tags: galaxy_file_source_templates\n\n- import_playbook: galaxy_object_store_templates.yml\n  when: galaxy_object_store_templates | bool\n  tags: galaxy_object_store_templates\n\n- import_playbook: galaxy_vault_config.yml\n  when: galaxy_vault_config | bool\n  tags: galaxy_vault_config\n"
  },
  {
    "path": "galaxy/ansible/rabbitmq.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    rabbitmq_keyring_path: /usr/share/keyrings/com.rabbitmq.team.gpg\n    rabbitmq_repo_list_path: /etc/apt/sources.list.d/rabbitmq.list\n    rabbitmq_version: 4.2.2-1\n    rabbitmq_erlang_packages:\n      - erlang-base\n      - erlang-asn1\n      - erlang-crypto\n      - erlang-eldap\n      - erlang-ftp\n      - erlang-inets\n      - erlang-mnesia\n      - erlang-os-mon\n      - erlang-parsetools\n      - erlang-public-key\n      - erlang-runtime-tools\n      - erlang-snmp\n      - erlang-ssl\n      - erlang-syntax-tools\n      - erlang-tftp\n      - erlang-tools\n      - erlang-xmerl\n  tasks:\n    - name: Install RabbitMQ repository prerequisites\n      apt:\n        name:\n          - curl\n          - gnupg\n          - apt-transport-https\n        state: present\n        update_cache: true\n\n    - name: Add RabbitMQ signing key\n      shell: |\n        curl -1sLf \"https://keys.openpgp.org/vks/v1/by-fingerprint/0A9AF2115F4687BD29803A206B73A36E6026DFCA\" | gpg --dearmor -o {{ rabbitmq_keyring_path }}\n      args:\n        creates: \"{{ rabbitmq_keyring_path }}\"\n\n    - name: Configure RabbitMQ apt repositories\n      copy:\n        dest: \"{{ rabbitmq_repo_list_path }}\"\n        content: |\n          ## Modern Erlang/OTP releases\n          deb [arch=amd64 signed-by={{ rabbitmq_keyring_path }}] https://deb1.rabbitmq.com/rabbitmq-erlang/ubuntu/noble noble main\n          deb [arch=amd64 signed-by={{ rabbitmq_keyring_path }}] https://deb2.rabbitmq.com/rabbitmq-erlang/ubuntu/noble noble main\n\n          ## Latest RabbitMQ releases\n          deb [arch=amd64 signed-by={{ rabbitmq_keyring_path }}] https://deb1.rabbitmq.com/rabbitmq-server/ubuntu/noble noble main\n          deb [arch=amd64 signed-by={{ rabbitmq_keyring_path }}] https://deb2.rabbitmq.com/rabbitmq-server/ubuntu/noble noble main\n\n    - name: Install Erlang packages\n      apt:\n        name: \"{{ rabbitmq_erlang_packages }}\"\n        state: present\n        update_cache: true\n\n    - name: Install RabbitMQ server\n      apt:\n        name: \"rabbitmq-server={{ rabbitmq_version }}\"\n        state: present\n        update_cache: true\n\n    - name: Enable rabbitmq management plugin\n      rabbitmq_plugin:\n        name: rabbitmq_management\n        broker_state: offline\n        state: enabled\n\n    - name: Copy startup script for rabbitmq\n      template: src=rabbitmq.sh.j2 dest=/usr/local/bin/rabbitmq.sh\n\n    - name: Install rabbitmq users configuration script\n      template: src=configure_rabbitmq_users.yml.j2 dest=/usr/local/bin/configure_rabbitmq_users.yml\n    \n    - name: Purge systemd and perform cleanup\n      shell: apt purge -y systemd && apt-get autoremove -y && apt-get clean\n"
  },
  {
    "path": "galaxy/ansible/redis.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  roles:\n    - role: geerlingguy.redis\n  tasks:\n    - name: Set daemonize as no in redis config\n      lineinfile:\n        path: /etc/redis/redis.conf\n        regexp: '^daemonize'\n        line: 'daemonize no'\n        state: \"{{ galaxy_apt_package_state }}\"\n    - name: Install redis python package for galaxy\n      pip:\n        name: \"redis\"\n        virtualenv: \"{{ redis_venv_dir }}\"\n        virtualenv_command: \"{{ pip_virtualenv_command | default( 'virtualenv' ) }}\"\n        extra_args: --index-url https://wheels.galaxyproject.org/simple --extra-index-url https://pypi.python.org/simple\n      become: True\n      become_user: \"{{ redis_venv_user }}\"\n"
  },
  {
    "path": "galaxy/ansible/requirements.yml",
    "content": "---\nroles:\n  - name: galaxyproject.postgresql\n    version: 1.1.8\n  - name: geerlingguy.docker\n    version: 7.9.0\n  - name: usegalaxy_eu.flower\n    version: 2.1.1\n  - name: grycap.htcondor\n    src: https://github.com/usegalaxy-eu/ansible-htcondor-grycap\n    version: fe15ce1569e93a9d1030350c42d1af79e8c3e905\n  - name: galaxyproject.proftpd\n    version: 0.3.3\n  - name: geerlingguy.rabbitmq\n    src: https://github.com/geerlingguy/ansible-role-rabbitmq\n    version: 3.0.0\n  - name: geerlingguy.redis\n    version: 1.9.1\n  - name: galaxyproject.repos\n    version: 0.0.3\n  - name: galaxyproject.slurm\n    version: 1.0.5\n  - name: galaxyproject.tusd\n    src: https://github.com/galaxyproject/ansible-role-tusd\n    version: e009b498a7989d8002c6a5d104176295d63e9fae\n  - name: galaxyproject.nginx\n    version: 1.0.0\n  - name: usegalaxy_eu.certbot\n    version: 0.1.13\n  - name: galaxyproject.self_signed_certs\n    version: 0.0.4\n"
  },
  {
    "path": "galaxy/ansible/slurm.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    slurm_roles: ['controller', 'exec']\n    slurm_config:\n      SlurmctldHost: localhost\n      SlurmUser: '{{ galaxy_user_name }}'\n      SelectType: select/cons_tres\n      SelectTypeParameters: CR_Core_Memory\n      StateSaveLocation: /tmp/slurm\n      ReturnToService: 1\n  roles:\n    - role: galaxyproject.slurm\n    # - role: galaxyproject.repos\n  tasks:\n    - name: Ensure slurm-drmaa library path exists\n      file:\n        path: /usr/lib/slurm-drmaa/lib\n        state: directory\n\n    - name: Ensure slurm-drmaa symlink exists\n      file:\n        src: /usr/lib/slurm-drmaa/lib/libdrmaa.so.1\n        dest: /usr/lib/slurm-drmaa/lib/libdrmaa.so\n        state: link\n        force: true\n\n    - name: Setup tmp area for slurm\n      file: path=/tmp/slurm state=directory owner={{ galaxy_user_name }} group={{ galaxy_user_name }}\n\n    - name: Add script to update slurm configuration file\n      template: src=configure_slurm.py.j2 dest=/usr/sbin/configure_slurm.py mode=0755\n\n    - name: Setup Munge permissions and folder\n      file: path={{ item }} state=directory owner=root group=root recurse=yes\n      with_items:\n        - /var/run/munge\n        - /var/lib/munge\n        - /var/log/munge\n        - /var/run/munge\n        - /etc/munge\n\n    # If job_conf.xml is installed before running galaxyprojectdotorg.galaxy, this would already be installed.\n    - name: Fetch DRMAA wheel for Galaxy\n      pip:\n        name: \"drmaa\"\n        extra_args: \"--index-url https://wheels.galaxyproject.org/simple/ --extra-index-url https://pypi.python.org/simple\"\n        virtualenv: \"{{ galaxy_venv_dir }}\"\n      environment:\n        PYTHOPATH: null\n        VIRTUAL_ENV: \"{{ galaxy_venv_dir }}\"\n      become_user: \"{{ galaxy_user_name }}\"\n"
  },
  {
    "path": "galaxy/ansible/supervisor.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  tasks:\n    - name: Install supervisor package\n      apt: \n        state: \"{{ galaxy_apt_package_state }}\"\n        name: supervisor\n\n    - name: Install cron\n      apt: \n        state: \"{{ galaxy_apt_package_state }}\"\n        name: cron\n      when: supervisor_manage_cron | bool\n\n    - name: Create Galaxy configuration file\n      template: src=supervisor.conf.j2 dest={{ supervisor_conf_path }}\n\n    - name: Stop supervisor\n      service: name=supervisor state=stopped\n\n    - name: Stop and remove munge.\n      service: name={{ item }} state=stopped enabled=no\n      with_items:\n        - munge\n      when: supervisor_manage_slurm | bool\n\n    - name: Stop and remove slurm.\n      service: name={{ item }} state=stopped enabled=no\n      with_items:\n        - slurmd\n        - slurmctld\n      when: supervisor_manage_slurm | bool\n\n    - name: Stop and remove postgresql.\n      service: name={{ item }} state=stopped enabled=no\n      with_items:\n        - postgresql\n      when: supervisor_manage_postgres | bool \n\n    - name: Stop and remove proftpd.\n      service: name={{ item }} state=stopped enabled=no\n      with_items:\n        - proftpd\n      when: supervisor_manage_proftp | bool\n\n    - name: Stop and remove nginx.\n      service: name={{ item }} state=stopped enabled=no\n      with_items:\n        - nginx\n      when: supervisor_manage_nginx | bool\n\n    - name: Stop and remove rabbitmq.\n      service: name={{ item }} state=stopped enabled=no\n      with_items:\n        - rabbitmq-server\n      when: supervisor_manage_rabbitmq | bool\n\n    - name: Stop and remove redis.\n      service: name={{ item }} state=stopped enabled=no\n      with_items:\n        - redis-server\n      when: supervisor_manage_redis | bool\n\n    - name: Purge systemd and perform cleanup\n      shell: apt purge -y systemd && apt-get autoremove -y && apt-get clean\n\n    - name: Start supervisor\n      service: name=supervisor state=started\n"
  },
  {
    "path": "galaxy/ansible/templates/add_tool_shed.py.j2",
    "content": "#!/usr/bin/env python\n\nimport os\nimport argparse\nimport xml.etree.ElementTree as ET\n\nTOOL_SHEDS_XML = os.path.join(os.environ['GALAXY_ROOT_DIR'], \"config/tool_sheds_conf.xml\")\nTOOL_SHEDS_XML_SAMPLE = TOOL_SHEDS_XML + '.sample'\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser(description='Add new Tool Shed to Galaxy.')\n    parser.add_argument('-n', '--name', help='Tool Shed name that is displayed in the admin menue')\n    parser.add_argument('-u', '--url', help='Tool Shed URL')\n\n    args = parser.parse_args()\n\n    ts = ET.Element('tool_shed')\n    ts.set('name', args.name)\n    ts.set('url', args.url)\n\n    if os.path.exists( TOOL_SHEDS_XML ):\n        tree = ET.parse( TOOL_SHEDS_XML )\n    else:\n        tree = ET.parse( TOOL_SHEDS_XML_SAMPLE )\n    root = tree.getroot()\n    root.append( ts )\n    tree.write( TOOL_SHEDS_XML )\n"
  },
  {
    "path": "galaxy/ansible/templates/cgroupfs_mount.sh.j2",
    "content": "#!/bin/sh\nset -e\n\n# Get the latest version of this script from https://github.com/moby/moby/blob/65cfcc28ab37cb75e1560e4b4738719c07c6618e/hack/dind\n\n\n# DinD: a wrapper script which allows docker to be run inside a docker container.\n# Original version by Jerome Petazzoni <jerome@docker.com>\n# See the blog post: https://www.docker.com/blog/docker-can-now-run-within-docker/\n#\n# This script should be executed inside a docker container in privileged mode\n# ('docker run --privileged', introduced in docker 0.6).\n\n# Usage: dind CMD [ARG...]\n\n# apparmor sucks and Docker needs to know that it's in a container (c) @tianon\n#\n# Set the container env-var, so that AppArmor is enabled in the daemon and\n# containerd when running docker-in-docker.\n#\n# see: https://github.com/containerd/containerd/blob/787943dc1027a67f3b52631e084db0d4a6be2ccc/pkg/apparmor/apparmor_linux.go#L29-L45\n# see: https://github.com/moby/moby/commit/de191e86321f7d3136ff42ff75826b8107399497\nexport container=docker\n\n# Allow AppArmor to work inside the container;\n#\n#     aa-status\n#     apparmor filesystem is not mounted.\n#     apparmor module is loaded.\n#\n#     mount -t securityfs none /sys/kernel/security\n#\n#     aa-status\n#     apparmor module is loaded.\n#     30 profiles are loaded.\n#     30 profiles are in enforce mode.\n#       /snap/snapd/18357/usr/lib/snapd/snap-confine\n#       ...\n#\n# Note: https://0xn3va.gitbook.io/cheat-sheets/container/escaping/sensitive-mounts#sys-kernel-security\n#\n#     ## /sys/kernel/security\n#\n#     In /sys/kernel/security mounted the securityfs interface, which allows\n#     configuration of Linux Security Modules. This allows configuration of\n#     AppArmor policies, and so access to this may allow a container to disable\n#     its MAC system.\n#\n# Given that we're running privileged already, this should not be an issue.\nif [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then\n\tmount -t securityfs none /sys/kernel/security || {\n\t\techo >&2 'Could not mount /sys/kernel/security.'\n\t\techo >&2 'AppArmor detection and --privileged mode might break.'\n\t}\nfi\n\n# Mount /tmp (conditionally)\nif ! mountpoint -q /tmp; then\n\tmount -t tmpfs none /tmp\nfi\n\n# cgroup v2: enable nesting\nif [ -f /sys/fs/cgroup/cgroup.controllers ]; then\n\t# move the processes from the root group to the /init group,\n\t# otherwise writing subtree_control fails with EBUSY.\n\t# An error during moving non-existent process (i.e., \"cat\") is ignored.\n\tmkdir -p /sys/fs/cgroup/init\n\txargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :\n\t# enable controllers\n\tsed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \\\n\t\t> /sys/fs/cgroup/cgroup.subtree_control\nfi\n\n# Change mount propagation to shared to make the environment more similar to a\n# modern Linux system, e.g. with SystemD as PID 1.\nmount --make-rshared /\n\nif [ $# -gt 0 ]; then\n\texec \"$@\"\nfi\n\necho >&2 'ERROR: No command specified.'\necho >&2 'You probably want to run hack/make.sh, or maybe a shell?'\n\n"
  },
  {
    "path": "galaxy/ansible/templates/check_database.py.j2",
    "content": "#!/usr/bin/env python\n\n# This script checks if the database is connected by querying an user\n\nimport sys\nsys.path.insert(1,'{{ galaxy_server_dir }}')\nsys.path.insert(1,'{{ galaxy_server_dir }}/lib')\n\nfrom galaxy.model import User\nfrom galaxy.model.mapping import init\nfrom galaxy.model.orm.scripts import get_config\nimport argparse\n\n__author__ = \"Lukas Voegtle\"\n__email__ = \"voegtlel@tf.uni-freiburg.de\"\n\nif __name__ == \"__main__\":\n    db_url = get_config(sys.argv)['db_url']\n    mapping = init('/tmp/', db_url)\n    sa_session = mapping.context\n    security_agent = mapping.security_agent\n\n    # Just query something\n    query = sa_session.query(User).filter_by(email=\"admin@example.org\")\n    query.count()\n"
  },
  {
    "path": "galaxy/ansible/templates/configure_rabbitmq_users.yml.j2",
    "content": "---\n- hosts: localhost\n  connection: local\n  become: yes\n  tasks:\n    - name: Delete 'guest' user\n      rabbitmq_user:\n        name: guest\n        state: absent\n\n    - name: Add 'admin' user\n      rabbitmq_user:\n        user: {{ rabbitmq_admin_username }}\n        password: {{ rabbitmq_admin_password }}\n        vhost: /\n        configure_priv: .*\n        read_priv: .*\n        write_priv: .*\n        tags: administrator\n        state: present\n    \n    - name: Add vhost for galaxy\n      rabbitmq_vhost:\n        vhost: {{ rabbitmq_galaxy_vhost }}\n        state: present\n\n    - name: Add 'galaxy' user\n      rabbitmq_user:\n        user: {{ rabbitmq_galaxy_username }}\n        password: {{ rabbitmq_galaxy_password }}\n        vhost: {{ rabbitmq_galaxy_vhost }}\n        configure_priv: .*\n        read_priv: .*\n        write_priv: .*\n        state: present\n\n    - name: Add 'flower' user\n      rabbitmq_user:\n        user: {{ rabbitmq_flower_username }}\n        password: {{ rabbitmq_flower_password }}\n        vhost: {{ rabbitmq_galaxy_vhost }}\n        configure_priv: .*\n        read_priv: .*\n        write_priv: .*\n        tags: administrator\n        state: present\n"
  },
  {
    "path": "galaxy/ansible/templates/configure_slurm.py.j2",
    "content": "from socket import gethostname\nfrom os import environ\nimport subprocess\nimport json\n\nCONFIG_FILE_PATH = \"/etc/slurm/slurm.conf\"\n\nENV_MAP = {\n    \"CPUs\": \"SLURM_CPUS\",\n    \"RealMemory\": \"SLURM_MEMORY\",\n    \"Boards\": \"SLURM_BOARDS\",\n    \"SocketsPerBoard\": \"SLURM_SOCKETS_PER_BOARD\",\n    \"CoresPerSocket\": \"SLURM_CORES_PER_SOCKET\",\n    \"ThreadsPerCore\": \"SLURM_THREADS_PER_CORE\",\n}\n\nFORCED_KV = {\n    \"ProctrackType\": \"proctrack/pgid\",\n    \"TaskPlugin\": \"task/none\",\n    \"JobAcctGatherType\": \"jobacct_gather/none\",\n    \"MpiDefault\": \"none\",\n}\n\ndef _as_int(value):\n    try:\n        return int(str(value).split()[0])\n    except (TypeError, ValueError):\n        return None\n\ndef _slurmd_status():\n    try:\n        output = subprocess.check_output([\"slurmd\", \"-C\"], stderr=subprocess.DEVNULL).decode(\"utf-8\")\n    except Exception:\n        return {}\n    info = {}\n    for chunk in output.split():\n        if \"=\" in chunk:\n            key, value = chunk.split(\"=\", 1)\n            info[key] = value\n    return info\n\ndef _lscpu_status():\n    try:\n        output = subprocess.check_output([\"lscpu\", \"-J\"], stderr=subprocess.DEVNULL).decode(\"utf-8\")\n        data = json.loads(output)\n    except Exception:\n        return {}\n    fields = {}\n    for entry in data.get(\"lscpu\", []):\n        field = entry.get(\"field\", \"\").strip().strip(\":\")\n        fields[field] = entry.get(\"data\")\n    cpus = _as_int(fields.get(\"CPU(s)\"))\n    sockets = _as_int(fields.get(\"Socket(s)\"))\n    cores = _as_int(fields.get(\"Core(s) per socket\"))\n    threads = _as_int(fields.get(\"Thread(s) per core\"))\n    info = {}\n    if cpus is not None:\n        info[\"CPUs\"] = str(cpus)\n    if sockets is not None:\n        info[\"SocketsPerBoard\"] = str(sockets)\n    if cores is not None:\n        info[\"CoresPerSocket\"] = str(cores)\n    if threads is not None:\n        info[\"ThreadsPerCore\"] = str(threads)\n    info.setdefault(\"Boards\", \"1\")\n    return info\n\ndef _real_memory_mb():\n    try:\n        with open(\"/proc/meminfo\", \"r\") as handle:\n            for line in handle:\n                if line.startswith(\"MemTotal:\"):\n                    parts = line.split()\n                    if len(parts) >= 2:\n                        return int(int(parts[1]) / 1024)\n    except Exception:\n        return None\n    return None\n\ndef main():\n    dict_status = _slurmd_status()\n    for key, value in _lscpu_status().items():\n        dict_status.setdefault(key, value)\n    if \"RealMemory\" not in dict_status:\n        real_memory = _real_memory_mb()\n        if real_memory is not None:\n            dict_status[\"RealMemory\"] = str(real_memory)\n    cpus = dict_status.get('CPUs')\n    memory = dict_status.get('RealMemory')\n    mem_per_cpu = None\n    if cpus and memory:\n        mem_per_cpu = int(int(memory) / int(cpus))\n\n    # Define variables based on environment or default values\n    hostname = gethostname()\n    template_params = {\n        \"SlurmctldHost\": environ.get('SLURMCTLD_HOST', hostname),\n        \"ClusterName\": environ.get('SLURM_CLUSTER_NAME', 'cluster'),\n        \"SlurmUser\": environ.get('SLURM_USER_NAME', '{{ galaxy_user_name }}'),\n    }\n\n    # Construct NodeName and PartitionName lines\n    node_parts = [f\"NodeName={hostname}\", \"State=UNKNOWN\"]\n    for key in (\"CPUs\", \"Boards\", \"SocketsPerBoard\", \"CoresPerSocket\", \"ThreadsPerCore\", \"RealMemory\"):\n        env_key = ENV_MAP.get(key)\n        value = environ.get(env_key) if env_key else None\n        if value is None:\n            value = dict_status.get(key)\n        if value is not None:\n            node_parts.append(f\"{key}={value}\")\n    node_line = \" \".join(node_parts)\n    partition_line = f\"PartitionName={environ.get('SLURM_PARTITION_NAME', 'debug')} Default=YES Nodes={hostname} \" \\\n        f\"MaxTime=INFINITE State=UP Shared=YES\"\n    if mem_per_cpu is not None:\n        partition_line += f\" DefMemPerCPU={environ.get('SLURM_MEMORY_PER_CPU', mem_per_cpu)}\"\n\n    with open(CONFIG_FILE_PATH, 'r') as file:\n        lines = file.readlines()\n\n    # Updated lines with replacements\n    updated_lines = []\n    found_keys = set()\n    for line in lines:\n        stripped_line = line.strip()\n\n        # Update lines based on key-value matching\n        if stripped_line.startswith(\"NodeName=\"):\n            updated_lines.append(node_line + \"\\n\")\n        elif stripped_line.startswith(\"PartitionName=\"):\n            updated_lines.append(partition_line + \"\\n\")\n        else:\n            # Update specific key-values based on template_params\n            updated = False\n            for key, value in template_params.items():\n                if stripped_line.startswith(f\"{key}=\"):\n                    updated_lines.append(f\"{key}={value}\\n\")\n                    found_keys.add(key)\n                    updated = True\n                    break\n            if not updated:\n                for key, value in FORCED_KV.items():\n                    if stripped_line.startswith(f\"{key}=\"):\n                        updated_lines.append(f\"{key}={value}\\n\")\n                        found_keys.add(key)\n                        updated = True\n                        break\n            if not updated:\n                # Keep the line as-is if no match\n                updated_lines.append(line)\n\n    for key, value in template_params.items():\n        if key not in found_keys:\n            updated_lines.append(f\"{key}={value}\\n\")\n    for key, value in FORCED_KV.items():\n        if key not in found_keys:\n            updated_lines.append(f\"{key}={value}\\n\")\n\n    with open(CONFIG_FILE_PATH, 'w') as file:\n        file.writelines(updated_lines)\n    # Slurm 24.11 supports disabling cgroups, avoiding systemd/cgroup requirements in containers.\n    with open(\"/etc/slurm/cgroup.conf\", \"w\") as file:\n        file.write(\"CgroupPlugin=disabled\\n\")\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "galaxy/ansible/templates/container_resolvers_conf.yml.j2",
    "content": "{% if container_resolution_explicit %}\n- type: explicit\n{% endif %}\n{% if container_resolution_cached_mulled %}\n- type: cached_mulled\n- type: cached_mulled_singularity\n  cache_directory: \"/cvmfs/singularity.galaxyproject.org/all\"\n- type: cached_mulled_singularity\n  cache_directory: \"/export/container_cache/singularity/mulled\"\n{% endif %}\n{% if container_resolution_mulled %}\n- type: mulled\n  namespace: \"{{ container_resolution_mulled_namespace }}\"\n{% endif %}\n{% if container_resolution_build_mulled %}\n- type: build_mulled\n  namespace: local\n{% endif %}\n"
  },
  {
    "path": "galaxy/ansible/templates/create_galaxy_user.py.j2",
    "content": "#!/usr/bin/env python\nimport sys\nsys.path.insert(1,'{{ galaxy_server_dir }}')\nsys.path.insert(1,'{{ galaxy_server_dir }}/lib')\n\nfrom galaxy.model import User, APIKeys\nfrom galaxy.model.mapping import init\nfrom galaxy.model.orm.scripts import get_config\nimport argparse\n\ndef add_user(sa_session, security_agent, email, password, key=None, username=\"admin\"):\n    \"\"\"\n        Add Galaxy User.\n        From John https://gist.github.com/jmchilton/4475646\n    \"\"\"\n    query = sa_session.query( User ).filter_by( email=email )\n    user = None\n    User.use_pbkdf2 = {{ use_pbkdf2 }}\n    if query.count() > 0:\n        user = query.first()\n        user.username = username\n        user.set_password_cleartext(password)\n        sa_session.add(user)\n        sa_session.flush()\n    else:\n        user = User(email)\n        user.username = username\n        user.set_password_cleartext(password)\n        sa_session.add(user)\n        sa_session.flush()\n\n        security_agent.create_private_user_role( user )\n        if not user.default_permissions:\n            security_agent.user_set_default_permissions( user, history=True, dataset=True )\n\n    if key is not None:\n        query = sa_session.query( APIKeys ).filter_by( user_id=user.id ).delete()\n        sa_session.flush()\n\n        api_key = APIKeys()\n        api_key.user_id = user.id\n        api_key.key = key\n        sa_session.add(api_key)\n        sa_session.flush()\n\n    sa_session.commit()\n    return user\n\n\nif __name__ == \"__main__\":\n    db_url = get_config(sys.argv, use_argparse=False)['db_url']\n\n    parser = argparse.ArgumentParser(description='Create Galaxy Admin User.')\n\n    parser.add_argument(\"--user\", required=True,\n                    help=\"Username, it should be an email address.\")\n    parser.add_argument(\"--password\", required=True,\n                    help=\"Password.\")\n    parser.add_argument(\"--key\", help=\"API-Key.\")\n    parser.add_argument(\"--username\", default=\"admin\",\n                    help=\"The public username. Public names must be at least three characters in length and contain only lower-case letters, numbers, and the '-' character.\")\n    parser.add_argument('args', nargs=argparse.REMAINDER)\n\n    options = parser.parse_args()\n\n    mapping = init('/tmp/', db_url)\n    sa_session = mapping.context\n    security_agent = mapping.security_agent\n\n    add_user(sa_session, security_agent, options.user, options.password, key=options.key, username=options.username)\n"
  },
  {
    "path": "galaxy/ansible/templates/export_user_files.py.j2",
    "content": "#!/usr/bin/env python\nimport fnmatch\nimport glob\nimport sys\nimport os\nimport re\nimport hashlib\nimport shutil\nimport subprocess\n\nPG_VERSION = os.environ.get('PG_VERSION', '15')\nGALAXY_UID = int(os.environ['GALAXY_UID'])\nGALAXY_GID = int(os.environ['GALAXY_GID'])\nGALAXY_ROOT_DIR = os.environ.get('GALAXY_ROOT_DIR', '/galaxy/')\nGALAXY_EXPORT_MARKER_PATH = '/export/.galaxy_export_marker'\n\nif len( sys.argv ) == 2:\n    PG_DATA_DIR_DEFAULT = sys.argv[1]\nelse:\n    PG_DATA_DIR_DEFAULT = f\"/var/lib/postgresql/{PG_VERSION}/main\"\nPG_DATA_DIR_HOST = os.environ.get(\"PG_DATA_DIR_HOST\", f\"/export/postgresql/{PG_VERSION}/main/\")\n\ndef change_path( src ):\n    \"\"\"\n        src will be copied to /export/`src` and a symlink will be placed in src pointing to /export/\n    \"\"\"\n    if os.path.exists( src ):\n        dest = os.path.join( '/export/', src.strip('/') )\n        # if destination is empty move all files into /export/ and symlink back to source\n        if not os.path.exists( dest ):\n            dest_dir = os.path.dirname(dest)\n            if not os.path.exists( dest_dir ):\n                os.makedirs(dest_dir)\n            shutil.move( src, dest )\n            os.symlink( dest, src.rstrip('/') )\n            os.chown( src, GALAXY_UID, GALAXY_GID )\n            subprocess.call( f'chown -R {GALAXY_UID}:{GALAXY_GID} {dest}', shell=True )\n        # if destination exists (e.g. continuing a previous session), remove source and symlink\n        else:\n            if not os.path.realpath( src ) == os.path.realpath( dest ):\n                stripped_src = src.rstrip('/')\n                if not os.path.islink( stripped_src ):\n                    if os.path.isdir( stripped_src ):\n                        shutil.rmtree( stripped_src )\n                    else:\n                        os.unlink( stripped_src )\n                    os.symlink( dest, src.rstrip('/') )\n\n\ndef copy_samples(src, dest):\n    if not os.path.realpath(src) == os.path.realpath(dest):\n        for filename in os.listdir(src):\n            if filename.endswith('ml.sample') or filename.endswith('ml.sample_advanced') or filename.endswith('ml.sample_basic'):\n                distrib_file = os.path.join(src, filename)\n                export_file = os.path.join(dest, filename)\n                shutil.copy(distrib_file, export_file)\n                os.chown(export_file, GALAXY_UID, GALAXY_GID)\n\n\ndef _makedir(path):\n    if not os.path.exists( path ):\n        os.makedirs( path )\n    os.chown(path, GALAXY_UID, GALAXY_GID)\n\n\ndef _ignore_static(dir, *patterns):\n    def __ignore_static(path, names):\n        ignored_names = []\n        if dir in path:\n            for pattern in patterns:\n                ignored_names.extend(fnmatch.filter(names, pattern))\n        return set(ignored_names)\n    return __ignore_static\n\ndef _read_image_marker():\n    marker = os.environ.get('GALAXY_EXPORT_MARKER')\n    if marker:\n        return marker.strip()\n    version_py = os.path.join(GALAXY_ROOT_DIR, 'lib', 'galaxy', 'version.py')\n    if os.path.exists(version_py):\n        try:\n            with open(version_py, 'r', encoding='utf-8', errors='ignore') as handle:\n                text = handle.read()\n            # Extract __version__ without importing Galaxy modules during startup.\n            match = re.search(r'__version__\\\\s*=\\\\s*[\\\\\\'\"]([^\\\\\\'\"]+)[\\\\\\'\"]', text)\n            if match:\n                return f\"version:{match.group(1)}\"\n            digest = hashlib.sha256(text.encode('utf-8')).hexdigest()\n            return f\"version_py_sha256:{digest}\"\n        except Exception:\n            pass\n    version_file = os.path.join(GALAXY_ROOT_DIR, 'VERSION')\n    if os.path.exists(version_file):\n        try:\n            with open(version_file, 'rb') as handle:\n                digest = hashlib.sha256(handle.read()).hexdigest()\n            return f\"version_file_sha256:{digest}\"\n        except Exception:\n            pass\n    return None\n\ndef _should_copy_distribution(marker):\n    if not os.path.exists('/export/galaxy'):\n        return True\n    if not marker:\n        return True\n    try:\n        # Explicit UTF-8 decoding avoids locale-dependent behavior for marker files.\n        with open(GALAXY_EXPORT_MARKER_PATH, 'r', encoding='utf-8') as handle:\n            return handle.read().strip() != marker\n    except OSError:\n        return True\n\ndef _write_marker(marker):\n    if not marker:\n        return\n    tmp_path = f\"{GALAXY_EXPORT_MARKER_PATH}.tmp\"\n    with open(tmp_path, 'w', encoding='utf-8') as handle:\n        handle.write(marker + '\\n')\n    os.replace(tmp_path, GALAXY_EXPORT_MARKER_PATH)\n\nif __name__ == \"__main__\":\n    \"\"\"\n        If the '/export/' folder exist, meaning docker was started with '-v /home/foo/bar:/export',\n        we will link every file that needs to persist to the host system. A marker file at\n        /export/.galaxy_export_marker is written to indicate the export contents match the image version.\n        If the user re-starts (with docker start) the container and the marker matches, the linking\n        is skipped.\n    \"\"\"\n    marker = _read_image_marker()\n    if _should_copy_distribution(marker):\n        galaxy_distrib_paths = {os.path.join(GALAXY_ROOT_DIR, 'config'): '/export/.distribution_config',\n                                os.path.join(GALAXY_ROOT_DIR, 'lib'): '/export/galaxy/lib',\n                                os.path.join(GALAXY_ROOT_DIR, 'tools'): '/export/galaxy/tools'}\n        for image_path, export_path in galaxy_distrib_paths.items():\n            if os.path.exists(export_path):\n                shutil.rmtree(export_path)\n            # Ignore 2 dead symlinks in galaxy code: see https://github.com/galaxyproject/galaxy/issues/9847\n            shutil.copytree( image_path, export_path, ignore=_ignore_static(os.path.join(GALAXY_ROOT_DIR, '/lib/galaxy/web/framework/static/style'), 'question-octagon-frame.png', 'ok_small.png') )\n\n        shutil.copy(os.path.join(GALAXY_ROOT_DIR, 'requirements.txt'), '/export/galaxy/requirements.txt')\n        _write_marker(marker)\n\n    _makedir('/export/galaxy/')\n    _makedir('/export/ftp/')\n\n    change_path( os.path.join(GALAXY_ROOT_DIR, 'config') )\n\n    # Copy all sample config files to config dir\n    # TODO find a way to update plugins/ without breaking user customizations\n    config_src = os.path.join(GALAXY_ROOT_DIR, 'config')\n    config_dest = os.path.join('/export/', GALAXY_ROOT_DIR, 'config')\n    copy_samples(config_src, config_dest)\n\n    # Copy all sample files to tool-data dir\n    # TODO find a way to update shared/ without breaking user customizations\n    tool_data_src = os.path.join(GALAXY_ROOT_DIR, 'tool-data')\n    tool_data_dest = os.path.join('/export/', GALAXY_ROOT_DIR, 'tool-data')\n    copy_samples(tool_data_src, tool_data_dest)\n\n    # TODO find a way to update /export/galaxy/display_applications/ without breaking user customizations\n\n    # Copy all files starting with \"welcome\"\n    # This enables a flexible start page design.\n    for filename in os.listdir('/export/'):\n        if filename.startswith('welcome'):\n            export_file = os.path.join( '/export/', filename)\n            image_file = os.path.join('/etc/galaxy/web/', filename)\n            shutil.copy(export_file, image_file)\n            os.chown( image_file, GALAXY_UID, GALAXY_GID )\n\n    # copy image defaults to config/<file>.docker_sample to base derivatives on,\n    # and if there is a realized version of these files in the export directory\n    # replace Galaxy's copy with these. Use symbolic link instead of copying so\n    # deployer can update and reload Galaxy and changes will be reflected.\n    for config in [ 'galaxy.yml', 'gravity.yml' ,'job_conf.xml' ]:\n        image_config = os.path.join('/etc/galaxy/', config)\n        export_config = os.path.join( '/export/galaxy/config', config )\n        export_sample = export_config + \".docker_sample\"\n        shutil.copy(image_config, export_sample)\n        if os.path.exists(export_config):\n            subprocess.call('ln -s -f %s %s' % (export_config, image_config), shell=True)\n\n    # Update Conda version if needed\n    if os.environ.get('GALAXY_AUTO_UPDATE_CONDA', '0') != 0:\n        src_conda = '/tool_deps/_conda/'\n        dest_conda = '/export/tool_deps/_conda/'\n        if os.path.exists(dest_conda) and os.path.realpath(src_conda) != os.path.realpath(dest_conda):\n            for subdir in ['bin', 'compiler_compat', 'conda-meta', 'etc', 'include', 'lib', 'share', 'ssl', 'x86_64-conda_cos6-linux-gnu']:\n                if os.path.exists(os.path.join(dest_conda, subdir)):\n                    shutil.rmtree(os.path.join(dest_conda, subdir))\n                subprocess.call('cp -p --preserve -R %s %s' % (os.path.join(src_conda, subdir), os.path.join(dest_conda, subdir)), shell=True)\n\n    change_path( os.path.join(GALAXY_ROOT_DIR, 'tools.yaml') )\n    change_path( os.path.join(GALAXY_ROOT_DIR, 'integrated_tool_panel.xml') )\n    change_path( os.path.join(GALAXY_ROOT_DIR, 'display_applications') )\n    change_path( os.path.join('/tool_deps') )\n    change_path( os.path.join(GALAXY_ROOT_DIR, 'tool-data') )\n    change_path( os.path.join(GALAXY_ROOT_DIR, 'database') )\n\n    if os.path.exists('/export/common_htpasswd'):\n        shutil.copy('/export/common_htpasswd', '/etc/nginx/htpasswd')\n\n    try:\n        change_path('/var/lib/docker/')\n    except:\n        # In case of unprivileged access this will result in a \"Device or resource busy.\" error.\n        pass\n\n    if not os.path.exists( PG_DATA_DIR_HOST ) or 'PG_VERSION' not in os.listdir( PG_DATA_DIR_HOST ):\n        dest_dir = os.path.dirname( PG_DATA_DIR_HOST )\n        if not os.path.exists( dest_dir ):\n            os.makedirs(dest_dir)\n        # User given dbpath, usually a directory from the host machine\n        # copy the postgresql data folder to the new location\n        subprocess.call('cp -R %s/* %s' % (PG_DATA_DIR_DEFAULT, PG_DATA_DIR_HOST), shell=True)\n        os.symlink( os.path.join(os.environ.get('PG_CONF_DIR_DEFAULT'), 'conf.d'), os.path.join(PG_DATA_DIR_HOST, 'conf.d') )\n        # copytree needs an non-existing dst dir, how annoying :(\n        # shutil.copytree(PG_DATA_DIR_DEFAULT, PG_DATA_DIR_HOST)\n        subprocess.call('chown -R postgres:postgres /export/postgresql/', shell=True)\n        subprocess.call('chmod -R 0755 /export/', shell=True)\n        subprocess.call('chmod -R 0700 %s' % PG_DATA_DIR_HOST, shell=True)\n"
  },
  {
    "path": "galaxy/ansible/templates/file_source_templates.yml.j2",
    "content": "- include: \"{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_azure.yml\"\n- include: \"{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_ftp.yml\"\n- include: \"{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_s3fs.yml\"\n- include: \"{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_aws_private_bucket.yml\"\n- include: \"{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_aws_public_bucket.yml\"\n- include: \"{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_azure.yml\"\n- include: \"{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_dropbox.yml\"\n- include: \"{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_google_drive.yml\"\n- include: \"{{ galaxy_server_dir }}/lib/galaxy/files/templates/examples/production_webdav.yml\"\n- include: \"{{ galaxy_config_dir }}/production_b2drop.yml\"\n"
  },
  {
    "path": "galaxy/ansible/templates/gravity.yml.j2",
    "content": "# Configuration for Gravity process manager.\ngravity:\n\n  # Process manager to use.\n  # ``supervisor`` is the default process manager when Gravity is invoked as a non-root user.\n  # ``systemd`` is the default when Gravity is invoked as root.\n  # Valid options are: supervisor, systemd\n  process_manager: {{ gravity_process_manager }}\n\n  # What command to write to the process manager configs\n  # `gravity` (`galaxyctl exec <service-name>`) is the default\n  # `direct` (each service's actual command) is also supported.\n  # Valid options are: gravity, direct\n  # service_command_style: gravity\n\n  # Use the process manager's *service instance* functionality for services that can run multiple instances.\n  # Presently this includes services like gunicorn and Galaxy dynamic job handlers. Service instances are only supported if\n  # ``service_command_style`` is ``gravity``, and so this option is automatically set to ``false`` if\n  # ``service_command_style`` is set to ``direct``.\n  # use_service_instances: true\n\n  # umask under which services should be executed. Setting ``umask`` on an individual service overrides this value.\n  # umask: '022'\n\n  # Memory limit (in GB), processes exceeding the limit will be killed. Default is no limit. If set, this is default value\n  # for all services. Setting ``memory_limit`` on an individual service overrides this value. Ignored if ``process_manager``\n  # is ``supervisor``.\n  # memory_limit:\n\n  # Specify Galaxy config file (galaxy.yml), if the Gravity config is separate from the Galaxy config. Assumed to be the\n  # same file as the Gravity config if a ``galaxy`` key exists at the root level, otherwise, this option is required.\n  galaxy_config_file: {{ galaxy_config_file }}\n\n  # Specify Galaxy's root directory.\n  # Gravity will attempt to find the root directory, but you can set the directory explicitly with this option.\n  galaxy_root: {{ galaxy_server_dir }}\n\n  # User to run Galaxy as, required when using the systemd process manager as root.\n  # Ignored if ``process_manager`` is ``supervisor`` or user-mode (non-root) ``systemd``.\n  galaxy_user: {{ galaxy_user_name }}\n\n  # Group to run Galaxy as, optional when using the systemd process manager as root.\n  # Ignored if ``process_manager`` is ``supervisor`` or user-mode (non-root) ``systemd``.\n  # galaxy_group:\n\n  # Set to a directory that should contain log files for the processes controlled by Gravity.\n  # If not specified defaults to ``<galaxy_data_dir>/gravity/log``.\n  log_dir: {{ galaxy_logs_dir }}\n\n  # Set to Galaxy's virtualenv directory.\n  # If not specified, Gravity assumes all processes are on PATH. This option is required in most circumstances when using\n  # the ``systemd`` process manager.\n  virtualenv: {{ galaxy_venv_dir }}\n\n  # Select the application server.\n  # ``gunicorn`` is the default application server.\n  # ``unicornherder`` is a production-oriented manager for (G)unicorn servers that automates zero-downtime Galaxy server restarts,\n  # similar to uWSGI Zerg Mode used in the past.\n  # Valid options are: gunicorn, unicornherder\n  # app_server: gunicorn\n\n  # Override the default instance name.\n  # this is hidden from you when running a single instance.\n  # instance_name: _default_\n\n  # Configuration for Gunicorn. Can be a list to run multiple gunicorns for rolling restarts.\n  gunicorn:\n\n    # Enable Galaxy gunicorn server.\n    enable: {{ galaxy_gunicorn }}\n\n    # The socket to bind. A string of the form: ``HOST``, ``HOST:PORT``, ``unix:PATH``, ``fd://FD``. An IP is a valid HOST.\n    bind: \"127.0.0.1:{{ gunicorn_port }}\"\n\n    # Controls the number of Galaxy application processes Gunicorn will spawn.\n    # Increased web performance can be attained by increasing this value.\n    # If Gunicorn is the only application on the server, a good starting value is the number of CPUs * 2 + 1.\n    # 4-12 workers should be able to handle hundreds if not thousands of requests per second.\n    workers: {{ gunicorn_workers }}\n\n    # Gunicorn workers silent for more than this many seconds are killed and restarted.\n    # Value is a positive number or 0. Setting it to 0 has the effect of infinite timeouts by disabling timeouts for all workers entirely.\n    # If you disable the ``preload`` option workers need to have finished booting within the timeout.\n    # timeout: 300\n\n    # Extra arguments to pass to Gunicorn command line.\n    # extra_args:\n\n    # Use Gunicorn's --preload option to fork workers after loading the Galaxy Application.\n    # Consumes less memory when multiple processes are configured. Default is ``false`` if using unicornherder, else ``true``.\n    # preload:\n\n    # umask under which service should be executed\n    # umask:\n\n    # Value of supervisor startsecs, systemd TimeoutStartSec\n    # start_timeout: 15\n\n    # Value of supervisor stopwaitsecs, systemd TimeoutStopSec\n    # stop_timeout: 65\n\n    # Amount of time to wait for a server to become alive when performing rolling restarts.\n    # restart_timeout: 300\n\n    # Memory limit (in GB). If the service exceeds the limit, it will be killed. Default is no limit or the value of the\n    # ``memory_limit`` setting at the top level of the Gravity configuration, if set. Ignored if ``process_manager`` is\n    # ``supervisor``.\n    # memory_limit:\n\n    # Extra environment variables and their values to set when running the service. A dictionary where keys are the variable\n    # names.\n    # environment: {}\n\n  # Configuration for Celery Processes.\n  celery:\n\n    # Enable Celery distributed task queue.\n    enable: {{ galaxy_celery }}\n\n    # Enable Celery Beat periodic task runner.\n    enable_beat: {{ galaxy_celery_beat }}\n\n    # Number of Celery Workers to start.\n    concurrency: {{ celery_workers }}\n\n    # Log Level to use for Celery Worker.\n    # Valid options are: DEBUG, INFO, WARNING, ERROR\n    # loglevel: DEBUG\n\n    # Queues to join\n    # queues: celery,galaxy.internal,galaxy.external\n\n    # Pool implementation\n    # Valid options are: prefork, eventlet, gevent, solo, processes, threads\n    # pool: threads\n\n    # Extra arguments to pass to Celery command line.\n    # extra_args:\n\n    # umask under which service should be executed\n    # umask:\n\n    # Value of supervisor startsecs, systemd TimeoutStartSec\n    # start_timeout: 10\n\n    # Value of supervisor stopwaitsecs, systemd TimeoutStopSec\n    # stop_timeout: 10\n\n    # Memory limit (in GB). If the service exceeds the limit, it will be killed. Default is no limit or the value of the\n    # ``memory_limit`` setting at the top level of the Gravity configuration, if set. Ignored if ``process_manager`` is\n    # ``supervisor``.\n    # memory_limit:\n\n    # Extra environment variables and their values to set when running the service. A dictionary where keys are the variable\n    # names.\n    # environment: {}\n\n  # Configuration for gx-it-proxy.\n  gx_it_proxy:\n\n    # Set to true to start gx-it-proxy\n    enable: {{ galaxy_gx_it_proxy }}\n\n    # gx-it-proxy version\n    version: '{{ gx_it_proxy_version }}'\n\n    # Public-facing IP of the proxy\n    ip: 127.0.0.1\n\n    # Public-facing port of the proxy\n    port: {{ gx_it_proxy_port }}\n\n    # Routes file to monitor.\n    # Should be set to the same path as ``interactivetools_map`` in the ``galaxy:`` section. This is ignored if\n    # ``interactivetools_map is set``.\n    sessions: {{ gx_it_proxy_sessions_path }}\n\n    # Include verbose messages in gx-it-proxy\n    # verbose: true\n\n    # Forward all requests to IP.\n    # This is an advanced option that is only needed when proxying to remote interactive tool container that cannot be reached through the local network.\n    # forward_ip:\n\n    # Forward all requests to port.\n    # This is an advanced option that is only needed when proxying to remote interactive tool container that cannot be reached through the local network.\n    # forward_port:\n\n    # Rewrite location blocks with proxy port.\n    # This is an advanced option that is only needed when proxying to remote interactive tool container that cannot be reached through the local network.\n    # reverse_proxy: false\n\n    # umask under which service should be executed\n    # umask:\n\n    # Value of supervisor startsecs, systemd TimeoutStartSec\n    # start_timeout: 10\n\n    # Value of supervisor stopwaitsecs, systemd TimeoutStopSec\n    # stop_timeout: 10\n\n    # Memory limit (in GB). If the service exceeds the limit, it will be killed. Default is no limit or the value of the\n    # ``memory_limit`` setting at the top level of the Gravity configuration, if set. Ignored if ``process_manager`` is\n    # ``supervisor``.\n    # memory_limit:\n\n    # Extra environment variables and their values to set when running the service. A dictionary where keys are the variable\n    # names.\n    # environment: {}\n\n  # Configuration for tusd server (https://github.com/tus/tusd).\n  # The ``tusd`` binary must be installed manually and made available on PATH (e.g in galaxy's .venv/bin directory).\n  tusd:\n\n    # Enable tusd server.\n    # If enabled, you also need to set up your proxy as outlined in https://docs.galaxyproject.org/en/latest/admin/nginx.html#receiving-files-via-the-tus-protocol.\n    enable: {{ galaxy_tusd }}\n\n    # Path to tusd binary\n    tusd_path: {{ tusd_path }}\n\n    # Host to bind the tusd server to\n    host: 127.0.0.1\n\n    # Port to bind the tusd server to\n    port: {{ tusd_port }}\n\n    # Directory to store uploads in.\n    # Must match ``tus_upload_store`` setting in ``galaxy:`` section.\n    upload_dir: {{ tus_upload_store_path }}\n\n    # Value of tusd -hooks-httpd option\n    #\n    # the default of is suitable for using tusd for Galaxy uploads and should not be changed unless you are using tusd for\n    # other purposes such as Pulsar staging.\n    #\n    # The value of galaxy_infrastructure_url is automatically prepended if the option starts with a `/`\n    # hooks_http: /api/upload/hooks\n\n    # Comma-separated string of enabled tusd hooks.\n    #\n    # Leave at the default value to require authorization at upload creation time.\n    # This means Galaxy's web process does not need to be running after creating the initial\n    # upload request.\n    #\n    # Set to empty string to disable all authorization. This means data can be uploaded (but not processed)\n    # without the Galaxy web process being available.\n    #\n    # You can find a list of available hooks at https://github.com/tus/tusd/blob/master/docs/hooks.md#list-of-available-hooks.\n    # hooks_enabled_events: pre-create\n\n    # Extra arguments to pass to tusd command line.\n    extra_args: -behind-proxy -base-path {{ tusd_base_path }}\n\n    # umask under which service should be executed\n    # umask:\n\n    # Value of supervisor startsecs, systemd TimeoutStartSec\n    # start_timeout: 10\n\n    # Value of supervisor stopwaitsecs, systemd TimeoutStopSec\n    # stop_timeout: 10\n\n    # Memory limit (in GB). If the service exceeds the limit, it will be killed. Default is no limit or the value of the\n    # ``memory_limit`` setting at the top level of the Gravity configuration, if set. Ignored if ``process_manager`` is\n    # ``supervisor``.\n    # memory_limit:\n\n    # Extra environment variables and their values to set when running the service. A dictionary where keys are the variable\n    # names.\n    # environment: {}\n\n  # Configure dynamic handlers in this section.\n  # See https://docs.galaxyproject.org/en/latest/admin/scaling.html#dynamically-defined-handlers for details.\n{% if not galaxy_dynamic_handlers %}\n  handlers: {}\n{% else %}\n  handlers:\n    handler:\n      processes: {{ galaxy_handler_processes }}\n      pools:\n        - job-handlers\n        - workflow-schedulers\n{% endif %}\n"
  },
  {
    "path": "galaxy/ansible/templates/job_conf.xml.j2",
    "content": "<?xml version=\"1.0\"?>\n{% import \"macros.xml.j2\" as macros with context %}\n<job_conf>\n    <plugins workers=\"2\">\n{% if galaxy_slurm %}\n        <plugin id=\"slurm\" type=\"runner\" load=\"galaxy.jobs.runners.slurm:SlurmJobRunner\">\n            <param id=\"drmaa_library_path\">/usr/lib/slurm-drmaa/lib/libdrmaa.so</param>\n            <param id=\"enabled\" from_environ=\"GALAXY_RUNNERS_ENABLE_SLURM\">true</param>\n        </plugin>\n{% endif %}\n{% if galaxy_condor %}\n        <plugin id=\"condor\" type=\"runner\" load=\"galaxy.jobs.runners.condor:CondorJobRunner\">\n            <param id=\"enabled\" from_environ=\"GALAXY_RUNNERS_ENABLE_CONDOR\">true</param>\n        </plugin>\n{% endif %}\n{% if galaxy_pbs %}\n        <plugin id=\"pbs\" type=\"runner\" load=\"galaxy.jobs.runners.drmaa:DRMAAJobRunner\">\n            <param id=\"drmaa_library_path\">/usr/lib/pbs-drmaa/lib/libdrmaa.so.1</param>\n            <param id=\"enabled\" from_environ=\"GALAXY_RUNNERS_ENABLE_PBS\">true</param>\n        </plugin>\n{% endif %}\n{% if galaxy_k8s_jobs %}\n        <plugin id=\"k8s\" type=\"runner\" load=\"galaxy.jobs.runners.kubernetes:KubernetesJobRunner\">\n            <!-- We are inside of Kubernetes so use this. -->\n            <param id=\"k8s_use_service_account\" from_environ=\"GALAXY_RUNNERS_K8S_USE_SERVICE_ACCOUNT\">{{ galaxy_k8s_jobs_use_service_account }}</param>\n            <param id=\"k8s_persistent_volume_claims\" from_environ=\"GALAXY_RUNNERS_K8S_PERSISTENT_VOLUME_CLAIMS\">{{ galaxy_k8s_jobs_persistent_volume_claims }}</param>\n            <param id=\"k8s_namespace\" from_environ=\"GALAXY_RUNNERS_K8S_NAMESPACE\">{{ galaxy_k8s_jobs_namespace }}</param>\n            <param id=\"k8s_supplemental_group_id\" from_environ=\"GALAXY_RUNNERS_K8S_SUPPLEMENTAL_GROUP_ID\">{{ galaxy_k8s_jobs_supplemental_group_id }}</param>\n            <param id=\"k8s_fs_group_id\" from_environ=\"GALAXY_RUNNERS_K8S_FS_GROUP_ID\">{{ galaxy_k8s_jobs_fs_group_id }}</param>\n            <param id=\"k8s_pull_policy\" from_environ=\"GALAXY_RUNNERS_K8S_PULL_POLICY\">{{ galaxy_k8s_jobs_pull_policy }}</param>\n            <param id=\"enabled\" from_environ=\"GALAXY_RUNNERS_ENABLE_K8\">true</param>\n        </plugin>\n{% endif %}\n        <plugin id=\"local\" type=\"runner\" load=\"galaxy.jobs.runners.local:LocalJobRunner\"/>\n    </plugins>\n{% if not galaxy_dynamic_handlers %}\n    <!-- The default handler can be changed by specifying the GALAXY_HANDLERS_DEFAULT environment variable. -->\n    <handlers default_from_environ=\"GALAXY_HANDLERS_DEFAULT\" default=\"handlers\">\n  {% if galaxy_handler_processes == 0 %}\n        <handler id=\"web0\" tags=\"handlers\"/>\n  {% else %}\n    {% for i in range(galaxy_handler_processes) %}\n        <handler id=\"handler{{ i }}\" tags=\"handlers\"/>\n    {% endfor %}\n  {% endif %}\n    </handlers>\n{% else %}\n    <!-- Dynamic handlers -->\n    <handlers assign_with=\"db-skip-locked\" />\n{% endif %}\n    <!-- The default destination can be changed by specifying the GALAXY_DESTINATIONS_DEFAULT environment variable. -->\n    <destinations default_from_environ=\"GALAXY_DESTINATIONS_DEFAULT\" default=\"{{ galaxy_destination_default }}\">\n        <destination id=\"docker_dispatch\" runner=\"dynamic\">\n            <!-- Allow different default destinations based on whether the tool\n                 supports Docker or not. -->\n            <param id=\"type\">docker_dispatch</param>\n            <param id=\"docker_destination_id\" from_environ=\"GALAXY_DESTINATIONS_DOCKER_DEFAULT\">{{ galaxy_destination_docker_default }}</param>\n            <param id=\"default_destination_id\" from_environ=\"GALAXY_DESTINATIONS_NO_DOCKER_DEFAULT\">{{ galaxy_destination_default }}</param>\n        </destination>\n        <!--destination id=\"singularity_dispatch\" runner=\"dynamic\">\n            <param id=\"type\">singularity_dispatch</param>\n            <param id=\"singularity_destination_id\" from_environ=\"GALAXY_DESTINATIONS_DOCKER_DEFAULT\">{{ galaxy_destination_singularity_default }}</param>\n            <param id=\"default_destination_id\" from_environ=\"GALAXY_DESTINATIONS_NO_DOCKER_DEFAULT\">{{ galaxy_destination_default }}</param>\n        </destination-->\n        {% call macros.destination(\"local_no_container\", \"local\") %}{% endcall %}\n        {% call macros.destination(\"local_docker\", \"local\", container_type=\"docker\") %}{% endcall %}\n        {% call macros.destination(\"local_force_docker\", \"local\", container_type=\"docker\", force_container=True) %}{% endcall %}\n{% if galaxy_pbs %}\n        {% call macros.destination(\"pbs_cluster\", \"pbs\") %}{% endcall %}\n        {% call macros.destination(\"pbs_cluster_docker\", \"pbs\", container_type=\"docker\") %}{% endcall %}\n        {% call macros.destination(\"pbs_cluster_force_docker\", \"pbs\", container_type=\"docker\", force_container=True) %}{% endcall %}\n{% endif %}\n{% if galaxy_slurm %}\n        {% call macros.destination(\"slurm_cluster\", \"slurm\") %}\n            <param id=\"nativeSpecification\" from_environ=\"NATIVE_SPEC\">--ntasks={{ galaxy_slurm_ntask | string }} --share</param>\n        {% endcall %}\n        <!-- Docker -->\n        {% call macros.destination(\"slurm_cluster_docker\", \"slurm\", container_type=\"docker\") %}\n            <param id=\"nativeSpecification\" from_environ=\"NATIVE_SPEC\">--ntasks={{ galaxy_slurm_ntask | string }} --share</param>\n        {% endcall %}\n        {% call macros.destination(\"slurm_cluster_force_docker\", \"slurm\", container_type=\"docker\", force_container=True) %}\n            <param id=\"nativeSpecification\" from_environ=\"NATIVE_SPEC\">--ntasks={{ galaxy_slurm_ntask | string }} --share</param>\n        {% endcall %}\n        <!-- Singularity -->\n        {% call macros.destination(\"slurm_cluster_singularity\", \"slurm\", container_type=\"singularity\") %}\n            <param id=\"nativeSpecification\" from_environ=\"NATIVE_SPEC\">--ntasks={{ galaxy_slurm_ntask | string }} --share</param>\n        {% endcall %}\n\n{% endif %}\n{% if galaxy_condor %}\n        {% call macros.destination(\"condor_cluster\", \"condor\") %}\n            <param id=\"universe\" from_environ=\"GALAXY_CONDOR_UNIVERSE\">vanilla</param>\n        {% endcall %}\n        {% call macros.destination(\"condor_cluster_docker\", \"condor\", container_type=\"docker\") %}\n            <param id=\"universe\" from_environ=\"GALAXY_CONDOR_UNIVERSE\">vanilla</param>\n        {% endcall %}\n        {% call macros.destination(\"condor_cluster_force_docker\", \"condor\", container_type=\"docker\", force_container=True) %}\n            <param id=\"universe\" from_environ=\"GALAXY_CONDOR_UNIVERSE\">vanilla</param>\n        {% endcall %}\n        {% call macros.destination(\"condor_docker_universe\", \"condor\", container_type=\"docker\", force_container=True) %}\n            <param id=\"universe\" from_environ=\"GALAXY_CONDOR_UNIVERSE\">docker</param>\n        {% endcall %}\n        <!-- Following destinations send to basic Condor runner if no Docker image is available\n             otherwise they both use the Docker image - the first submits a normal Condor job\n             that will run Docker on the resulting worker node and the second uses Condor's\n             native Docker universe support.\n        -->\n        {{ macros.docker_dispatch_destination(\"condor_docker_cluster_dispatch\", \"condor_cluster_docker\", \"condor_cluster\")}}\n        {{ macros.docker_dispatch_destination(\"condor_docker_universe_dispatch\", \"condor_docker_universe\", \"condor_cluster\")}}\n{% endif %}\n{% if galaxy_k8s_jobs %}\n        {% call macros.destination(\"k8s_default\", \"k8s\", container_type=\"docker\", force_container=True) %}{% endcall %}\n        {{ macros.docker_dispatch_destination(\"k8s_or_local_dispatch\", \"k8s_default\", \"local_no_container\")}}\n        {{ macros.docker_dispatch_destination(\"k8s_or_slurm_dispatch\", \"k8s_default\", \"slurm_cluster\")}}\n        {{ macros.docker_dispatch_destination(\"k8s_or_condor_dispatch\", \"k8s_default\", \"condor_cluster\")}}\n{% endif %}\n    </destinations>\n    <limits>\n    </limits>\n</job_conf>\n"
  },
  {
    "path": "galaxy/ansible/templates/job_metrics_conf.yml.j2",
    "content": "{% if galaxy_job_metrics_core %}\n- type: core\n{% endif %}\n{% if galaxy_job_metrics_cpuinfo and galaxy_job_metrics_cpuinfo == \"verbose\" %}\n- type: cpuinfo\n  verbose: false\n{% elif galaxy_job_metrics_cpuinfo %}\n- type: cpuinfo\n{% endif %}\n{% if galaxy_job_metrics_meminfo %}\n- type: meminfo\n{% endif %}\n{% if galaxy_job_metrics_uname %}\n- type: uname\n{% endif %}\n{% if galaxy_job_metrics_env %}\n- type: env\n{% endif %}\n{% if galaxy_job_metrics_hostname %}\n- type: hostname\n{% endif %}\n{% if galaxy_job_metrics_cgroup %}\n- type: cgroup\n{% endif %}\n"
  },
  {
    "path": "galaxy/ansible/templates/macros.xml.j2",
    "content": "{% macro destination(id, runner, container_type=None, force_container=False) -%}\n    <destination id=\"{{ id }}\" runner=\"{{ runner }}\">\n        <env file=\"{{ galaxy_venv_dir }}/bin/activate\"/>\n        {% if galaxy_source_shellrc %}\n            <env file=\"{{ galaxy_user_shellrc }}\" />\n        {% endif %}\n        <param id=\"enabled\" from_environ=\"GALAXY_RUNNERS_ENABLE_{{ runner|upper }}\">true</param>\n\n        {% if container_type == 'docker' %}\n            <param id=\"docker_enabled\">true</param>\n            <param id=\"docker_sudo\" from_environ=\"GALAXY_DOCKER_SUDO\">{{ galaxy_docker_sudo | string }}</param>\n            <!-- The empty volumes from shouldn't affect Galaxy, set GALAXY_DOCKER_VOLUMES_FROM to use. -->\n            <param id=\"docker_volumes_from\" from_environ=\"GALAXY_DOCKER_VOLUMES_FROM\">{{ galaxy_docker_volumes_from }}</param>\n            <!-- For a stock Galaxy instance and traditional job runner $defaults will expand out as: $galaxy_root:ro,$tool_directory:ro,$working_directory:rw,$default_file_path:rw -->\n            <param id=\"docker_volumes\" from_environ=\"GALAXY_DOCKER_VOLUMES\">{{ galaxy_docker_volumes }}</param>\n            <param id=\"docker_net\" from_environ=\"GALAXY_DOCKER_NET\">{{ galaxy_docker_net }}</param>\n            <param id=\"docker_auto_rm\" from_environ=\"GALAXY_DOCKER_AUTO_RM\">{{ galaxy_docker_auto_rm | string }}</param>\n            <param id=\"docker_set_user\" from_environ=\"GALAXY_DOCKER_SET_USER\">{{ galaxy_docker_set_user }}</param>\n            {% if force_container %}\n                <param id=\"docker_default_container_id\" from_environ=\"GALAXY_DOCKER_DEFAULT_CONTAINER\">{{ galaxy_docker_default_image }}</param>\n            {% endif %}\n        {% endif %}\n\n        {% if container_type == 'singularity' %}\n            <param id=\"singularity_enabled\">true</param>\n            <param id=\"singularity_sudo\" from_environ=\"GALAXY_SINGULARITY_SUDO\">{{ galaxy_singularity_sudo | string }}</param>\n            <!-- The empty volumes from shouldn't affect Galaxy, set GALAXY_SINGULARITY_VOLUMES_FROM to use. -->\n            <param id=\"singularity_volumes_from\" from_environ=\"GALAXY_SINGULARITY_VOLUMES_FROM\">{{ galaxy_singularity_volumes_from }}</param>\n            <!-- For a stock Galaxy instance and traditional job runner $defaults will expand out as: $galaxy_root:ro,$tool_directory:ro,$working_directory:rw,$default_file_path:rw -->\n            <param id=\"singularity_volumes\" from_environ=\"GALAXY_SINGULARITY_VOLUMES\">{{ galaxy_singularity_volumes }}</param>\n            {% if force_container %}\n                <param id=\"singularity_default_container_id\" from_environ=\"GALAXY_SINGULARITY_DEFAULT_CONTAINER\">{{ galaxy_singularity_default_image }}</param>\n            {% endif %}\n\n        {% endif %}\n\n        {{ caller() }}\n    </destination>\n{%- endmacro %}\n\n{% macro docker_dispatch_destination(id, default_destination, docker_destination) -%}\n    <destination id=\"{{ id }}\" runner=\"dynamic\">\n        <param id=\"type\">docker_dispatch</param>\n        <param id=\"docker_destination_id\">{{ docker_destination }}</param>\n        <param id=\"default_destination_id\">{{ default_destination }}</param>\n    </destination>\n{%- endmacro %}\n\n{% macro singularity_dispatch_destination(id, default_destination, singularity_destination) -%}\n    <destination id=\"{{ id }}\" runner=\"dynamic\">\n        <param id=\"type\">singularity_dispatch</param>\n        <param id=\"singularity_destination_id\">{{ singularity_destination }}</param>\n        <param id=\"default_destination_id\">{{ default_destination }}</param>\n    </destination>\n{%- endmacro %}\n"
  },
  {
    "path": "galaxy/ansible/templates/nginx/delegated_uploads.conf.j2",
    "content": "# delegated uploads\nlocation {{ nginx_tusd_location }} {\n    # Disable request and response buffering\n    proxy_request_buffering off;\n    proxy_buffering off;\n    proxy_http_version 1.1;\n\n    # Add X-Forwarded-* headers\n    proxy_set_header X-Forwarded-Host $http_host;\n    proxy_set_header X-Forwarded-Proto $scheme;\n        \n    proxy_set_header Upgrade $http_upgrade;\n    proxy_set_header Connection \"upgrade\";\n    client_max_body_size 0;\n    proxy_pass http://127.0.0.1:{{ tusd_port }};\n}\n"
  },
  {
    "path": "galaxy/ansible/templates/nginx/flower_auth.conf.j2",
    "content": "# Authenticating with htpasswd file\n\nset $auth \"Flower is restricted. Please contact your administrator.\";\n\nauth_basic $auth;\nauth_basic_user_file htpasswd;\n"
  },
  {
    "path": "galaxy/ansible/templates/nginx/galaxy_common.conf.j2",
    "content": "{% if nginx_use_passwords %}\n        auth_basic      \"devbox\";\n        auth_basic_user_file  /etc/nginx/htpasswd;\n{% endif %}\n\n{% if nginx_proxy_flower %}\n        # enable flower under :80/flower/\n        location {{ nginx_flower_location }}/ {\n            # include authentification settings if enabled\n            include {{ nginx_conf_dir }}/flower_auth.conf;\n            proxy_pass http://127.0.0.1:{{ flower_port }};\n            proxy_set_header Host $host;\n            proxy_redirect off;\n            proxy_http_version 1.1;\n            proxy_set_header Upgrade $http_upgrade;\n            proxy_set_header Connection \"upgrade\";\n        }\n{% endif %}\n\n{% if nginx_proxy_rabbitmq_management %}\n        # enable rabbitmq management under :80/rabbitmq/\n        location  ~* {{ nginx_rabbitmq_management_location }}/(.*) {\n            rewrite ^{{ nginx_rabbitmq_management_location }}/(.*)$ /$1 break;\n            proxy_pass http://127.0.0.1:{{ rabbitmq_management_port }};\n            proxy_buffering                    off;\n            proxy_set_header Host              $http_host;\n            proxy_set_header X-Real-IP         $remote_addr;\n            proxy_set_header X-Forwarded-For   $proxy_add_x_forwarded_for;\n            proxy_set_header X-Forwarded-Proto $scheme;\n        }\n{% endif %}\n\n{% if nginx_proxy_gunicorn %}\n        # pass to gunicorn by default\n        location {{ nginx_galaxy_location }}/ {\n            proxy_pass http://127.0.0.1:{{ gunicorn_port }};\n            proxy_set_header Host $http_host;\n            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n            proxy_set_header X-Forwarded-Proto $scheme;\n            proxy_set_header Upgrade $http_upgrade;\n        {% if galaxy_admin_user is defined and galaxy_admin_user %}\n            # hard-code a fixed user to pass to Galaxy to auto-login\n            proxy_set_header HTTP_REMOTE_USER '{{ galaxy_admin_user }}';\n        {% endif %}\n        {% if nginx_use_remote_header %}\n            # forward the remote_user header in case it is set by a previous proxy\n            proxy_set_header HTTP_REMOTE_USER $remote_user;\n        {% endif %}\n        }\n{% endif %}\n\n{% for a in nginx_additional_config %}\n        include {{ nginx_conf_dir }}/{{ a }}.conf;\n{% endfor %}\n\n        # serve static content\n        location {{ nginx_galaxy_location }}/static {\n            alias {{ galaxy_server_dir }}/static;\n            gzip on;\n            gzip_types text/plain text/xml text/javascript text/css application/x-javascript;\n            expires 24h;\n        }\n        location {{ nginx_galaxy_location }}/static/style {\n            alias {{ galaxy_server_dir }}/static/style;\n            gzip on;\n            gzip_types text/plain text/xml text/javascript text/css application/x-javascript;\n            expires 24h;\n        }\n        location {{ nginx_galaxy_location }}/static/dist {\n            alias {{ galaxy_server_dir }}/static/dist;\n            gzip on;\n            gzip_types text/plain text/xml text/javascript text/css application/x-javascript;\n            expires 24h;\n        }\n        location /favicon.ico {\n            alias {{ galaxy_server_dir }}/static/favicon.ico;\n        }\n\n        # delegated downloads\n        location /_x_accel_redirect/ {\n            internal;\n            alias /;\n            # Add upstream response headers that would otherwise be omitted\n            add_header Access-Control-Allow-Origin $upstream_http_access_control_allow_origin;\n            add_header Access-Control-Allow-Methods $upstream_http_access_control_allow_methods;\n        }\n\n        # this is needed if 'welcome_url' is set to /etc/galaxy/web\n        location {{ nginx_welcome_location }} {\n            alias {{ nginx_welcome_path }};\n            gzip on;\n            gzip_types text/plain text/xml text/javascript text/css application/x-javascript;\n            expires 24h;\n        }\n\n{% if nginx_proxy_interactive_tools %}\n        # Route all path-based interactive tool requests to the InteractiveTool proxy application\n        location ~* ^{{ nginx_galaxy_location }}/(interactivetool/.+)$ {\n            proxy_redirect off;\n            proxy_http_version 1.1;\n            proxy_set_header Host $host;\n            proxy_set_header X-Real-IP $remote_addr;\n            proxy_set_header Upgrade $http_upgrade;\n            proxy_set_header Connection \"upgrade\";\n            proxy_pass http://127.0.0.1:{{ gx_it_proxy_port }};\n        }\n{% endif %}\n\n        location ~ ^{{ nginx_galaxy_location }}/plugins/(?<plug_type>[^/]+?)/((?<vis_d>[^/_]*)_?)?(?<vis_name>[^/]*?)/static/(?<static_file>.*?)$ {\n            alias {{ galaxy_server_dir }}/config/plugins/$plug_type/;\n            try_files $vis_d/${vis_d}_${vis_name}/static/$static_file\n                    $vis_d/static/$static_file =404;\n        }\n\n        # include delegated uploads settings if enabled\n        include {{ nginx_conf_dir }}/delegated_uploads.conf;\n\n        # error docs\n        error_page  502 503 504 {{ nginx_prefix_location }}/error/502/index.html;\n        error_page  413         {{ nginx_prefix_location }}/error/413/index.html;\n        error_page  500         {{ nginx_prefix_location }}/error/500/index.html;\n        location {{ nginx_prefix_location }}/error {\n            internal;\n            alias {{ galaxy_errordocs_dir }};\n        }\n"
  },
  {
    "path": "galaxy/ansible/templates/nginx/galaxy_http.j2",
    "content": "server {\n        listen 80 default_server;\n        listen [::]:80 default_server;\n\n        include {{ nginx_conf_dir }}/galaxy_common.conf;\n}\n"
  },
  {
    "path": "galaxy/ansible/templates/nginx/galaxy_https.j2",
    "content": "server {\n        listen 443 ssl default_server;\n        listen [::]:443 ssl default_server;\n\n        include {{ nginx_conf_dir }}/galaxy_common.conf;\n}\n"
  },
  {
    "path": "galaxy/ansible/templates/nginx/galaxy_redirect_ssl.j2",
    "content": "server {\n        listen 80 default_server;\n        listen [::]:80 default_server;\n\n        location /.well-known/ {\n\t        root {{ certbot_well_known_root }};\n        }\n        rewrite ^ https://$host$request_uri permanent;\n}\n"
  },
  {
    "path": "galaxy/ansible/templates/nginx/htpasswd.j2",
    "content": "{% for p in nginx_htpasswds %}\n{{ p }}\n{% endfor %}"
  },
  {
    "path": "galaxy/ansible/templates/nginx/interactive_tools_common.conf.j2",
    "content": "# Match all requests for the interactive tools subdomain\nserver_name  *.interactivetool.{{ galaxy_domain }};\n\n# Log files will go here.\nerror_log /var/log/nginx/interactive_tools_error.log;\naccess_log /var/log/nginx/interactive_tools_access.log;\n\n# Proxy all requests to the Gx IT Proxy application\nlocation / {\n    proxy_redirect off;\n    proxy_http_version 1.1;\n    proxy_set_header Host $host;\n    proxy_set_header X-Real-IP $remote_addr;\n    proxy_set_header Upgrade $http_upgrade;\n    proxy_set_header Connection \"upgrade\";\n    proxy_pass http://127.0.0.1:{{ gx_it_proxy_port }};\n}\n"
  },
  {
    "path": "galaxy/ansible/templates/nginx/interactive_tools_http.j2",
    "content": "server {\n    listen 80;\n    listen [::]:80;\n\n    include {{ nginx_conf_dir }}/interactive_tools_common.conf;\n}\n"
  },
  {
    "path": "galaxy/ansible/templates/nginx/interactive_tools_https.j2",
    "content": "server {\n    listen 443 ssl;\n    listen [::]:443 ssl;\n\n    include {{ nginx_conf_dir }}/interactive_tools_common.conf;\n}\n"
  },
  {
    "path": "galaxy/ansible/templates/nginx/interactive_tools_redirect_ssl.j2",
    "content": "server {\n    listen 80;\n    listen [::]:80;\n    rewrite ^ https://$host$request_uri permanent;\n}\n"
  },
  {
    "path": "galaxy/ansible/templates/object_store_templates.yml.j2",
    "content": "# This is a catalog file for all the user object store templates\n- include: \"{{ galaxy_server_dir }}/lib/galaxy/objectstore/templates/examples/production_azure_blob.yml\"\n- include: \"{{ galaxy_server_dir }}/lib/galaxy/objectstore/templates/examples/production_aws_s3.yml\"\n- include: \"{{ galaxy_server_dir }}/lib/galaxy/objectstore/templates/examples/production_generic_s3.yml\"\n- include: \"{{ galaxy_server_dir }}/lib/galaxy/objectstore/templates/examples/production_gcp_s3.yml\"\n- include: \"{{ galaxy_server_dir }}/lib/galaxy/objectstore/templates/examples/cloudflare.yml\"\n- include: \"{{ galaxy_server_dir }}/lib/galaxy/objectstore/templates/examples/onedata.yml\"\n- include: \"{{ galaxy_server_dir }}/lib/galaxy/objectstore/templates/examples/minio_just_buckets.yml\"\n"
  },
  {
    "path": "galaxy/ansible/templates/rabbitmq.sh.j2",
    "content": "#!/bin/sh\n# call \"rabbitmqctl stop\" when exiting\n# taken from https://gist.github.com/caioariede/342a583f75467509ad42\nmkdir -p /var/run/rabbitmq && chown rabbitmq:rabbitmq /var/run/rabbitmq && chmod 755 /var/run/rabbitmq\nRABBITMQ_ENV=/usr/lib/rabbitmq/bin/rabbitmq-env\nRABBITMQ_SCRIPTS_DIR=$(dirname \"$RABBITMQ_ENV\")\n. /usr/lib/rabbitmq/bin/rabbitmq-env\ntrap \"{ echo Stopping rabbitmq; rabbitmqctl stop; exit 0; }\" TERM\n\necho Starting rabbitmq\nrabbitmq-server &\n\n# from docs: When Bash receives a signal for which a\n# trap has been set while waiting for a command to\n# complete, the trap will not be executed until the\n# command completes.\n#\n# This is why we use & and wait here. Idea taken from:\n# http://veithen.github.io/2014/11/16/sigterm-propagation.html\nPID=$!\nwait $PID\n"
  },
  {
    "path": "galaxy/ansible/templates/startup_lite.sh.j2",
    "content": "#!/bin/bash\n\ncd $GALAXY_ROOT_DIR\n\nexport GALAXY_CONFIG_STATIC_ENABLED=True\nexport GALAXY_CONFIG_ALLOW_PATH_PASTE=True\nunset GALAXY_CONFIG_TUS_UPLOAD_STORE\n\n# The lite mode can be useful to populate data libraries.\n# To make this work it is needed to unset the following variables\nunset GALAXY_CONFIG_JOB_WORKING_DIRECTORY\nunset GALAXY_CONFIG_FILE_PATH\nunset GALAXY_CONFIG_NEW_FILE_PATH\nunset GALAXY_CONFIG_TEMPLATE_CACHE_PATH\nunset GALAXY_CONFIG_CITATION_CACHE_DATA_DIR\nunset GALAXY_CONFIG_FTP_UPLOAD_DIR\nunset GALAXY_CONFIG_INTEGRATED_TOOL_PANEL_CONFIG\n\nJOB_CONF=$GALAXY_ROOT_DIR/lib/galaxy/config/sample/job_conf.xml.sample_basic\n\nwhile getopts \"j\" opt; do\n  case $opt in\n    j)\n      #if they pass -j, don't override the job config file\n      JOB_CONF=$GALAXY_CONFIG_JOB_CONFIG_FILE\n      ;;\n    \\?)\n      echo \"Invalid option: -$OPTARG\" >&2\n      ;;\n  esac\ndone\n\nexport GALAXY_CONFIG_JOB_CONFIG_FILE=$JOB_CONF\n\nservice postgresql start\n\n. {{ galaxy_venv_dir }}/bin/activate\n\necho \"Checking if database is up and running\"\nuntil /usr/local/bin/check_database.py 2>&1 >/dev/null; do sleep 1; echo \"Waiting for database\"; done\necho \"Database connected\"\n\n./run.sh -d galaxy_startup_lite.log --pidfile galaxy_startup_lite.pid --http-timeout 3000 \n"
  },
  {
    "path": "galaxy/ansible/templates/supervisor.conf.j2",
    "content": "[supervisord]\nnodaemon=false\n\n{% if supervisor_webserver %}\n[inet_http_server]\nport={{ supervisor_webserver_port }}\n{% if supervisor_webserver_username %}\nusername={{ supervisor_webserver_username }}\npassword={{ supervisor_webserver_password }}\n{% endif %}\n{% endif %}\n\n{% if supervisor_manage_cron %}\n[program:cron]\nuser            = root\ncommand         = /usr/sbin/cron -f\nautostart       = {{ supervisor_cron_autostart }}\nautorestart     = true\n{% endif %}\n\n{% if supervisor_manage_autofs %}\n[program:autofs]\nuser            = root\ncommand         = /usr/sbin/automount -f\nautostart       = {{ supervisor_autofs_autostart }}\nautorestart     = true\nredirect_stderr = true\nstdout_logfile  = /var/log/autofs.log\n{% endif %}\n\n{% if supervisor_manage_slurm %}\n[program:munge]\nuser=root\n# In VMs the chown seems to be needed, in containers the mkdir.\n# Keep munge threads modest by default; increase via munge_num_threads if needed.\ncommand=/bin/bash -c \"mkdir -p /var/run/munge && chown -R root:root /var/run/munge && /usr/sbin/munged -f -F --num-threads={{ munge_num_threads | default(2) }}\"\nredirect_stderr = true\npriority        = 100\nstopasgroup     = true\n\n[program:slurmctld]\nuser=root\ncommand=/bin/bash -c \"/usr/bin/python /usr/sbin/configure_slurm.py && /usr/sbin/slurmctld -D -L {{ supervisor_slurm_config_dir }}/slurmctld.log\"\nredirect_stderr=true\nautostart       = {{ supervisor_slurm_autostart }}\nautorestart     = true\npriority        = 200\nstopasgroup     = true\n\n[program:slurmd]\nuser=root\ncommand=/usr/sbin/slurmd -D -L {{ supervisor_slurm_config_dir }}/slurmd.log\nautostart       = {{ supervisor_slurm_autostart }}\nredirect_stderr = true\nautorestart     = true\npriority        = 300\n{% endif %}\n\n{% if supervisor_manage_condor %}\n[program:condor]\nuser=root\ncommand=condor_master -f -t\nredirect_stderr = true\nautostart       = {{ supervisor_condor_autostart }}\nautorestart     = true\npriority        = 100\n{% endif %}\n\n\n{% if supervisor_manage_postgres %}\n{% if ansible_virtualization_type != \"docker\" %}\n[program:pre_postgresql]\nuser            = root\nstartsecs       = 0\ncommand         = /bin/bash -c \"install -d -m 2775 -o postgres -g postgres /var/run/postgresql\"\n{% endif %}\n\n[program:postgresql]\nuser            = postgres\ncommand         = /usr/lib/postgresql/{{ postgresql_version }}/bin/postmaster {{ supervisor_postgres_options }}\nprocess_name    = %(program_name)s\nstopsignal      = INT\nautostart       = {{ supervisor_postgres_autostart }}\nautorestart     = true\nredirect_stderr = true\npriority        = 100\n{% endif %}\n\n{% if supervisor_manage_proftp %}\n[program:proftpd]\n{% if proftpd_nat_masquerade %}\ncommand         = bash -c \" export MASQUERADE_ADDRESS={{ proftpd_masquerade_address }} && /usr/sbin/proftpd -n -c {{ proftpd_conf_path }}\"\n{% else %}\ncommand         = /usr/sbin/proftpd -n -c {{ proftpd_conf_path }}\n{% endif %}\nautostart       = {{ supervisor_proftpd_autostart }}\nautorestart     = true\nstopasgroup     = true\nkillasgroup     = true\n{% endif %}\n\n{% if supervisor_manage_nginx %}\n[program:nginx]\ncommand         = /usr/sbin/nginx\ndirectory       = /\numask           = 022\nautostart       = true\nautorestart     = unexpected\nstartsecs       = 5\nexitcodes       = 0\nuser            = root\npriority        = 200\n{% endif %}\n\n{% if supervisor_manage_toolshed %}\n[program:toolshed]\ncommand         = {{ galaxy_venv_dir }}/bin/gunicorn 'tool_shed.webapp.fast_factory:factory()' --config python:galaxy.web_stack.gunicorn_config --worker-class galaxy.webapps.galaxy.workers.Worker --preload --workers 1 --bind 127.0.0.1:{{ galaxy_toolshed_port }} --timeout 600 --log-file {{ galaxy_logs_dir }}/toolshed.log --pid {{ galaxy_logs_dir }}/toolshed.pid --pythonpath lib\ndirectory       = {{ galaxy_server_dir }}\nprocess_name    = toolshed\numask           = 022\nautostart       = true\nautorestart     = true\nenvironment     = PATH={{ galaxy_venv_dir }}:{{ galaxy_venv_dir }}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin,PYTHONPATH=lib,TOOL_SHED_CONFIG_FILE={{ galaxy_toolshed_config_file }}\nstartsecs       = {{ supervisor_galaxy_startsecs }}\nuser            = {{ galaxy_user_name }}\nstartretries    = {{ supervisor_galaxy_startretries }}\n{% endif %}\n\n{% if supervisor_manage_docker %}\n[program:docker]\ndirectory       = /\n{% if docker_legacy %}\ncommand         = /usr/bin/docker daemon --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 -s {{ docker_storage_backend }}\n{% else %}\ncommand         = /usr/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 -s {{ docker_storage_backend }}\n{% endif %}\nautostart       = {{ supervisor_docker_autostart }}\nautorestart     = {{ supervisor_docker_autorestart }}\nuser            = root\nstartsecs       = 5\nredirect_stderr = true\n{% endif %}\n\n{% if supervisor_manage_rabbitmq %}\n[program:rabbitmq]\ncommand         = /bin/sh /usr/local/bin/rabbitmq.sh\nuser            = root\nautostart       = {{ supervisor_rabbitmq_autostart }}\nautorestart     = true\n{% endif %}\n\n{% if supervisor_manage_redis %}\n[program:redis]\ncommand         = /usr/bin/redis-server /etc/redis/redis.conf\nuser            = root\nautostart       = {{ supervisor_redis_autostart }}\nautorestart     = true\n{% endif %}\n\n{% if supervisor_manage_flower %}\n[program:flower]\ncommand         = {{ galaxy_venv_dir }}/bin/celery --broker={{ flower_broker_url }} --app {{ flower_app_name }} flower --conf={{ flower_conf_path }} --log_file_prefix={{ flower_log }}\ndirectory       = {{ galaxy_server_dir }}\numask           = 022\nautostart       = {{ supervisor_flower_autostart }}\nautorestart     = true\nstartsecs       = 10\nuser            = {{ galaxy_user_name }}\nenvironment     = PATH={{ galaxy_venv_dir }}:{{ galaxy_venv_dir }}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin,PYTHONPATH={{ galaxy_server_dir }}/lib,GALAXY_ROOT_DIR={{ galaxy_server_dir }},GALAXY_CONFIG_FILE={{ galaxy_config_file }}\n{% endif %}\n"
  },
  {
    "path": "galaxy/ansible/templates/update_yaml_value.py.j2",
    "content": "import sys\nimport yaml\nimport argparse\n\ndef modify_yaml(file_path, key_path, new_value):\n    # Load the YAML file\n    with open(file_path, 'r') as file:\n        data = yaml.safe_load(file)\n    \n    # Split the key_path by '.' to access nested keys\n    keys = key_path.split('.')\n    \n    # Traverse the dictionary to reach the correct key\n    temp = data\n    for key in keys[:-1]:\n        if key in temp:\n            temp = temp[key]\n        else:\n            print(f\"Key path '{'.'.join(keys)}' does not exist in {file_path}. No update made.\")\n            return\n    \n    # Check if the last key exists and update its value\n    if keys[-1] in temp:\n        temp[keys[-1]] = yaml.safe_load(new_value)\n        # Write the updated data back to the YAML file\n        with open(file_path, 'w') as file:\n            yaml.dump(data, file, default_flow_style=False)\n        print(f\"Updated {key_path} to {new_value} in {file_path}\")\n    else:\n        print(f\"Key '{keys[-1]}' does not exist in {file_path}. No update made.\")\n\ndef main():\n    # Setup argparse for handling -h/--help and arguments\n    parser = argparse.ArgumentParser(description=\"Update a YAML file by modifying an existing key's value.\")\n    \n    # Positional arguments\n    parser.add_argument(\"file_path\", help=\"The path to the YAML file\")\n    parser.add_argument(\"key_path\", help=\"The dot-separated key path (e.g., 'gravity.gunicorn.workers')\")\n    parser.add_argument(\"new_value\", help=\"The new value to set for the specified key\")\n\n    # Parse the arguments\n    args = parser.parse_args()\n\n    # Modify the YAML file using the provided arguments\n    modify_yaml(args.file_path, args.key_path, args.new_value)\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "galaxy/ansible/templates/vault_conf.yml.j2",
    "content": "type: database\npath_prefix: /galaxy\n# Encryption keys must be valid fernet keys\n# To generate a valid key:\n#\n# Use the ascii string value as a key\n# For more details, see: https://cryptography.io/en/latest/fernet/#\nencryption_keys:\n{% for encryption_key in galaxy_vault_encryption_keys %}\n  - {{ encryption_key }}\n{% endfor %}\n"
  },
  {
    "path": "galaxy/ansible/tusd.yml",
    "content": "- hosts: localhost\n  connection: local\n  remote_user: root\n  vars:\n    tusd_version: v2.5.0\n    tusd_systemd: false\n  roles:\n    - role: galaxyproject.tusd\n"
  },
  {
    "path": "galaxy/bashrc",
    "content": "# ~/.bashrc: executed by bash(1) for non-login shells.\n# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)\n# for examples\n\n# If not running interactively, don't do anything\n[ -z \"$PS1\" ] && return\n\n# don't put duplicate lines in the history. See bash(1) for more options\n# ... or force ignoredups and ignorespace\nHISTCONTROL=ignoredups:ignorespace\n\n# append to the history file, don't overwrite it\nshopt -s histappend\n\n# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)\nHISTSIZE=1000\nHISTFILESIZE=2000\n\n# check the window size after each command and, if necessary,\n# update the values of LINES and COLUMNS.\nshopt -s checkwinsize\n\n# make less more friendly for non-text input files, see lesspipe(1)\n[ -x /usr/bin/lesspipe ] && eval \"$(SHELL=/bin/sh lesspipe)\"\n\n# set variable identifying the chroot you work in (used in the prompt below)\nif [ -z \"$debian_chroot\" ] && [ -r /etc/debian_chroot ]; then\n    debian_chroot=$(cat /etc/debian_chroot)\nfi\n\n# set a fancy prompt (non-color, unless we know we \"want\" color)\ncase \"$TERM\" in\n    xterm-color) color_prompt=yes;;\nesac\n\n# uncomment for a colored prompt, if the terminal has the capability; turned\n# off by default to not distract the user: the focus in a terminal window\n# should be on the output of commands, not on the prompt\n#force_color_prompt=yes\n\nif [ -n \"$force_color_prompt\" ]; then\n    if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then\n    # We have color support; assume it's compliant with Ecma-48\n    # (ISO/IEC-6429). (Lack of such support is extremely rare, and such\n    # a case would tend to support setf rather than setaf.)\n    color_prompt=yes\n    else\n    color_prompt=\n    fi\nfi\n\nif [ \"$color_prompt\" = yes ]; then\n    PS1='${debian_chroot:+($debian_chroot)}\\[\\033[01;32m\\]\\u@\\h\\[\\033[00m\\]:\\[\\033[01;34m\\]\\w\\[\\033[00m\\]\\$ '\nelse\n    PS1='${debian_chroot:+($debian_chroot)}\\u@\\h:\\w\\$ '\nfi\nunset color_prompt force_color_prompt\n\n# If this is an xterm set the title to user@host:dir\ncase \"$TERM\" in\nxterm*|rxvt*)\n    PS1=\"\\[\\e]0;${debian_chroot:+($debian_chroot)}\\u@\\h: \\w\\a\\]$PS1\"\n    ;;\n*)\n    ;;\nesac\n\n# enable color support of ls and also add handy aliases\nif [ -x /usr/bin/dircolors ]; then\n    test -r ~/.dircolors && eval \"$(dircolors -b ~/.dircolors)\" || eval \"$(dircolors -b)\"\n    alias ls='ls --color=auto'\n    #alias dir='dir --color=auto'\n    #alias vdir='vdir --color=auto'\n\n    alias grep='grep --color=auto'\n    alias fgrep='fgrep --color=auto'\n    alias egrep='egrep --color=auto'\nfi\n\n# some more ls aliases\nalias ll='ls -lF --color=always'\nalias lt='ls -ltr'\nalias la='ls -A'\nalias l='ls -CF'\n\n# Add an \"alert\" alias for long running commands.  Use like so:\n#   sleep 10; alert\nalias alert='notify-send --urgency=low -i \"$([ $? = 0 ] && echo terminal || echo error)\" \"$(history|tail -n1|sed -e '\\''s/^\\s*[0-9]\\+\\s*//;s/[;&|]\\s*alert$//'\\'')\"'\n\n# Alias definitions.\n# You may want to put all your additions into a separate file like\n# ~/.bash_aliases, instead of adding them here directly.\n# See /usr/share/doc/bash-doc/examples in the bash-doc package.\n\nif [ -f ~/.bash_aliases ]; then\n    . ~/.bash_aliases\nfi\n\n# enable programmable completion features (you don't need to enable\n# this, if it's already enabled in /etc/bash.bashrc and /etc/profile\n# sources /etc/bash.bashrc).\nif [ -f /etc/bash_completion ] && ! shopt -oq posix; then\n    . /etc/bash_completion\nfi\n"
  },
  {
    "path": "galaxy/cgroupfs_mount.sh",
    "content": "#!/bin/bash\nset -e\n\n# DinD: a wrapper script which allows docker to be run inside a docker container.\n# Original version by Jerome Petazzoni <jerome@docker.com>\n# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/\n#\n# This script should be executed inside a docker container in privilieged mode\n# ('docker run --privileged', introduced in docker 0.6).\n\n# Usage: dind CMD [ARG...]\n\n# apparmor sucks and Docker needs to know that it's in a container (c) @tianon\nexport container=docker\n\nif [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then\n\tmount -t securityfs none /sys/kernel/security || {\n\t\techo >&2 'Could not mount /sys/kernel/security.'\n\t\techo >&2 'AppArmor detection and --privileged mode might break.'\n\t}\nfi\n\n# Mount /tmp (conditionally)\nif ! mountpoint -q /tmp; then\n\tmount -t tmpfs none /tmp\nfi\n\n\n# If a pidfile is still around (for example after a container restart),\n# delete it so that docker can start.\nrm -rf /var/run/docker.pid\n"
  },
  {
    "path": "galaxy/common_cleanup.sh",
    "content": "#!/bin/sh\n\nset -x\n\n# This usually drastically reduced the container size\n# at the cost of the startup time of your application\nfind / -name '*.pyc' -delete\n\nfind / -name '*.log' -delete\nfind / -path /root/.cache -prune -o -name '.cache' -type d -prune -exec rm -rf '{}' +\nfind / -path /root/.npm -prune -o -name '.npm' -type d -prune -exec rm -rf '{}' +\nfind / -name '.launchpadlib' -type d -prune -exec rm -rf '{}' +\nrm -rf /var/lib/apt/lists/*\nrm -rf /var/cache/*\nrm -rf /tmp/*\nrm -rf /var/tmp/*\n\n# https://askubuntu.com/questions/266738/how-to-truncate-all-logfiles\ntruncate -s 0 /var/log/*log || true\ntruncate -s 0 /var/log/**/*log || true\n"
  },
  {
    "path": "galaxy/docker-compose.yaml",
    "content": "# docker-compose wrapper for the single Galaxy container. This is useful for systems like EGI IM.\n# Start via `IMAGE_TAG=dev GALAXY_CONFIG_BRAND=foo docker-compose up`\nservices:\n  galaxy-server:\n    image: ${DOCKER_REGISTRY:-quay.io}/${DOCKER_REGISTRY_USERNAME:-bgruening}/galaxy:${IMAGE_TAG:-latest}\n    build: ./\n    environment:\n      - GALAXY_DEFAULT_ADMIN_USER=admin\n      - GALAXY_DEFAULT_ADMIN_EMAIL=admin@example.org\n      - GALAXY_DEFAULT_ADMIN_PASSWORD=password\n      - GALAXY_DEFAULT_ADMIN_KEY=fakekey\n      - GALAXY_DESTINATIONS_DEFAULT=slurm_cluster_docker\n      - GALAXY_CONFIG_BRAND=${GALAXY_CONFIG_BRAND:-My own Galaxy flavour}\n      - GALAXY_AUTO_UPDATE_DB=True\n    hostname: galaxy-server\n    privileged: True\n    ports:\n      - \"8080:80\"\n      - \"9002:9002\"\n      - \"4002:4002\"\n      - \"8021:21\"\n      - \"8022:22\"\n    volumes:\n      # This is the directory where all your files from Galaxy will be stored\n      # on your host system\n      - ${EXPORT_DIR:-./export}/:/export/:delegated\n      - ${EXPORT_DIR:-./export}/tus_upload_store:/tus_upload_store:delegated\n      - /var/run/docker.sock:/var/run/docker.sock\n      # Optional CVMFS mount (shared with the sidecar when enabled).\n      - type: bind\n        source: ${CVMFS_MOUNT_DIR:-/cvmfs}\n        target: /cvmfs\n        bind:\n          # Propagate CVMFS mounts from the sidecar into this container.\n          propagation: rshared\n\n  cvmfs:\n    profiles:\n      - cvmfs\n    build: ../cvmfs\n    image: galaxy-cvmfs:latest\n    privileged: true\n    environment:\n      - CVMFS_REPOSITORIES=data.galaxyproject.org,singularity.galaxyproject.org\n      - CVMFS_CACHE_BASE=/var/lib/cvmfs\n    volumes:\n      - type: bind\n        source: ${CVMFS_MOUNT_DIR:-/cvmfs}\n        target: /cvmfs\n        bind:\n          # Allow mounts created here to propagate to the host and Galaxy container.\n          propagation: rshared\n      - ${EXPORT_DIR:-./export}/cvmfs-cache:/var/lib/cvmfs:delegated\n"
  },
  {
    "path": "galaxy/install_tools_wrapper.sh",
    "content": "#!/bin/bash\nset -euo pipefail\n\n# Basic defaults so set -u does not choke when running outside the normal entrypoint.\nGALAXY_HOME=${GALAXY_HOME:-/galaxy}\nGALAXY_ROOT_DIR=${GALAXY_ROOT_DIR:-$GALAXY_HOME}\nexport GALAXY_VIRTUAL_ENV=${GALAXY_VIRTUAL_ENV:-/galaxy_venv}\nexport PATH=\"${GALAXY_VIRTUAL_ENV}/bin:${PATH}\"\nexport GALAXY_SKIP_REQUIREMENTS_INSTALL=1\nexport GALAXY_SKIP_COMMON_STARTUP=1\nexport GALAXY_SKIP_CLIENT_BUILD=1\nexport GALAXY_CONFIG_FILE=${GALAXY_CONFIG_FILE:-/etc/galaxy/galaxy.yml}\n# Never create conda envs during tool install; rely on cached containers only.\nexport GALAXY_CONFIG_CONDA_AUTO_INSTALL=False\nexport GALAXY_CONFIG_CONDA_AUTO_INIT=False\n# Keep managed configs inside the image, not /export.\nexport GALAXY_CONFIG_MANAGED_CONFIG_DIR=/galaxy/database/config\nexport GALAXY_CONFIG_INTEGRATED_TOOL_PANEL_CONFIG=/galaxy/integrated_tool_panel.xml\nexport GALAXY_CONFIG_FILE_PATH=/galaxy/database/files\nexport GALAXY_CONFIG_NEW_FILE_PATH=/galaxy/database/tmp\nexport GALAXY_CONFIG_TEMPLATE_CACHE_PATH=/galaxy/database/compiled_templates\nexport GALAXY_CONFIG_CITATION_CACHE_DATA_DIR=/galaxy/database/citations/data\nexport GALAXY_CONFIG_JOB_WORKING_DIRECTORY=/galaxy/database/job_working_directory\nmkdir -p \"${GALAXY_CONFIG_MANAGED_CONFIG_DIR}\"\nmkdir -p \"${GALAXY_CONFIG_FILE_PATH}\" \"${GALAXY_CONFIG_NEW_FILE_PATH}\" \\\n         \"${GALAXY_CONFIG_TEMPLATE_CACHE_PATH}\" \"${GALAXY_CONFIG_CITATION_CACHE_DATA_DIR}\" \\\n         \"${GALAXY_CONFIG_JOB_WORKING_DIRECTORY}\"\nchown -R galaxy:galaxy \"${GALAXY_CONFIG_MANAGED_CONFIG_DIR}\" \"${GALAXY_CONFIG_FILE_PATH}\" \\\n    \"${GALAXY_CONFIG_NEW_FILE_PATH}\" \"${GALAXY_CONFIG_TEMPLATE_CACHE_PATH}\" \\\n    \"${GALAXY_CONFIG_CITATION_CACHE_DATA_DIR}\" \"${GALAXY_CONFIG_JOB_WORKING_DIRECTORY}\"\nif [ ! -f \"${GALAXY_CONFIG_INTEGRATED_TOOL_PANEL_CONFIG}\" ]; then\n    cp -f /galaxy/config/integrated_tool_panel.xml.sample \"${GALAXY_CONFIG_INTEGRATED_TOOL_PANEL_CONFIG}\" 2>/dev/null || touch \"${GALAXY_CONFIG_INTEGRATED_TOOL_PANEL_CONFIG}\"\nfi\n\n# Enable Test Tool Shed for flavour installs.\nexport GALAXY_CONFIG_TOOL_SHEDS_CONFIG_FILE=\"${GALAXY_CONFIG_TOOL_SHEDS_CONFIG_FILE:-$GALAXY_HOME/tool_sheds_conf.xml}\"\n\n# Ensure shed-tools is available.\n. /tool_deps/_conda/etc/profile.d/conda.sh\nconda activate base\n\ncd \"${GALAXY_ROOT_DIR}\"\nINSTALL_TOOLS_VERBOSE=\"${INSTALL_TOOLS_VERBOSE:-false}\"\nwait_args=(\"-v\")\naccess_log=\"-\"\nstartup_log=\"/tmp/install_tools_startup.log\"\nstartup_redirect=\"\"\nif ! [[ \"${INSTALL_TOOLS_VERBOSE}\" =~ ^([Tt][Rr][Uu][Ee]|1|[Yy][Ee][Ss])$ ]]; then\n    wait_args=()\n    access_log=\"/dev/null\"\n    startup_redirect=\">> ${startup_log} 2>&1\"\nfi\n\n# If supervisord is already running we assume Galaxy is up (normal runtime).\nif pgrep \"supervisord\" >/dev/null; then\n    echo \"System is up and running. Installing tools against the running Galaxy (port 80).\"\n    PORT=80\n    started_locally=false\nelse\n    PORT=8080\n    started_locally=true\n    install_log='galaxy_install.log'\n\n    echo \"Starting PostgreSQL for tool installation\"\n    PG_VER=\"${PG_VERSION:-15}\"\n    PG_DATA=\"${PG_DATA_DIR_DEFAULT:-/var/lib/postgresql/${PG_VER}/main/}\"\n    sudo -u postgres /usr/lib/postgresql/${PG_VER}/bin/pg_ctl -D \"$PG_DATA\" -l /tmp/pg_install.log -o \"-k /var/run/postgresql\" start\n    until pg_isready -h /var/run/postgresql -U galaxy >/dev/null 2>&1; do\n        echo \"Waiting for PostgreSQL...\"\n        sleep 1\n    done\n    # Ensure supervisord is running so gravity-managed services can start.\n    if ! pgrep \"supervisord\" >/dev/null; then\n        supervisord -c /etc/supervisor/supervisord.conf\n        sleep 2\n    fi\n\n    echo \"Starting Galaxy for tool installation\"\n    export GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=\"http://localhost:${PORT}\"\n    export GRAVITY_MANAGE_TUSD=False\n    export GALAXY_CONFIG_TUS_UPLOAD_ENABLED=False\n    export GRAVITY_MANAGE_GX_IT_PROXY=False\n    # Prefer env overrides instead of mutating config files.\n    export GALAXY_CONFIG_OVERRIDE__galaxy_infrastructure_url=\"http://localhost:${PORT}\"\n    # Keep container resolvers simple (no CVMFS) for the install run; do not overwrite runtime config.\n    container_conf_target=\"$(mktemp /tmp/container_resolvers_conf.install.XXXX.yml)\"\n    cat > \"${container_conf_target}\" <<'EOF'\n- type: explicit\n- type: cached_mulled_singularity\n  cache_directory: \"/export/container_cache/singularity/mulled\"\n- type: mulled\n  namespace: \"biocontainers\"\n- type: build_mulled\n  namespace: local\nEOF\n    chown galaxy:galaxy \"${container_conf_target}\" || true\n    chmod 644 \"${container_conf_target}\" || true\n    export GALAXY_CONFIG_CONTAINER_RESOLVERS_CONFIG_FILE=\"${container_conf_target}\"\n    sudo -E -H -u galaxy -- bash -c \"\n        unset SUDO_UID SUDO_GID SUDO_COMMAND SUDO_USER\n        . /galaxy_venv/bin/activate\n        GALAXY_SKIP_REQUIREMENTS_INSTALL=1 GALAXY_SKIP_COMMON_STARTUP=1 GALAXY_SKIP_CLIENT_BUILD=1 GALAXY_NO_VENV=1 \\\n        GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=http://localhost:${PORT} \\\n        PYTHONPATH=lib GALAXY_CONFIG_FILE=/etc/galaxy/galaxy.yml \\\n        gunicorn 'galaxy.webapps.galaxy.fast_factory:factory()' \\\n            --timeout 300 --pythonpath lib -k galaxy.webapps.galaxy.workers.Worker \\\n            -b 127.0.0.1:${PORT} --workers=1 --config python:galaxy.web_stack.gunicorn_config --preload \\\n            --pid galaxy_install.pid --error-logfile ${install_log} --access-logfile ${access_log} ${startup_redirect} &\n        echo \\$! > /tmp/galaxy_install_wrapper.pid\n    \"\n\n    galaxy-wait -g \"http://localhost:${PORT}\" \"${wait_args[@]}\" --timeout 900\nfi\n\n# Ensure admin user exists (needed for shed-tools with fakekey).\nif [[ -n \"${GALAXY_DEFAULT_ADMIN_USER:-}\" ]]; then\n    echo \"Creating admin user ${GALAXY_DEFAULT_ADMIN_USER} (if missing)\"\n    . \"${GALAXY_VIRTUAL_ENV}/bin/activate\"\n    python /usr/local/bin/create_galaxy_user.py \\\n        --user \"${GALAXY_DEFAULT_ADMIN_EMAIL}\" \\\n        --password \"${GALAXY_DEFAULT_ADMIN_PASSWORD}\" \\\n        -c \"${GALAXY_CONFIG_FILE}\" \\\n        --username \"${GALAXY_DEFAULT_ADMIN_USER}\" \\\n        --key \"${GALAXY_DEFAULT_ADMIN_KEY}\"\n    deactivate\nfi\n\necho \"Installing tools from $1\"\nINSTALL_TOOL_DEPS=\"${INSTALL_TOOL_DEPENDENCIES:-false}\"\nif [[ \"${INSTALL_TOOL_DEPS}\" =~ ^([Tt][Rr][Uu][Ee]|1|[Yy][Ee][Ss])$ ]]; then\n    echo \"Installing tool dependencies as well (INSTALL_TOOL_DEPENDENCIES=${INSTALL_TOOL_DEPS})\"\n    shed-tools install -g \"http://localhost:${PORT}\" -a fakekey -t \"$1\" --install-tool-dependencies\nelse\n    echo \"Skipping tool and resolver dependencies (INSTALL_TOOL_DEPENDENCIES=${INSTALL_TOOL_DEPS})\"\n    shed-tools install -g \"http://localhost:${PORT}\" -a fakekey -t \"$1\" \\\n        --skip-install-resolver-dependencies \\\n        --skip-install-repository-dependencies\nfi\n\nif $started_locally; then\n    echo \"Shutting down temporary Galaxy/PostgreSQL used for tool install\"\n    if [ -f /tmp/galaxy_install_wrapper.pid ]; then\n        kill \"$(cat /tmp/galaxy_install_wrapper.pid)\" 2>/dev/null || true\n        rm -f /tmp/galaxy_install_wrapper.pid\n    fi\n    sudo -E -H -u galaxy kill \"$(cat galaxy_install.pid)\" 2>/dev/null || true\n    rm -f galaxy_install.pid \"$install_log\"\n    PG_VER=\"${PG_VERSION:-15}\"\n    PG_DATA=\"${PG_DATA_DIR_DEFAULT:-/var/lib/postgresql/${PG_VER}/main/}\"\n    sudo -u postgres /usr/lib/postgresql/${PG_VER}/bin/pg_ctl -D \"$PG_DATA\" stop\nfi\n"
  },
  {
    "path": "galaxy/run.sh",
    "content": "#!/bin/sh\n\n\n# Usage: ./run.sh <start|stop|restart>\n#\n#\n# Description: This script can be used to start or stop the galaxy\n# web application.\n\ncd \"$(dirname \"$0\")\"\n\n. ./scripts/common_startup_functions.sh\n\n# If there is a file that defines a shell environment specific to this\n# instance of Galaxy, source the file.\nif [ -z \"$GALAXY_LOCAL_ENV_FILE\" ];\nthen\n    GALAXY_LOCAL_ENV_FILE='./config/local_env.sh'\nfi\n\nif [ -f \"$GALAXY_LOCAL_ENV_FILE\" ];\nthen\n    . \"$GALAXY_LOCAL_ENV_FILE\"\nfi\n\nGALAXY_PID=${GALAXY_PID:-galaxy.pid}\nGALAXY_LOG=${GALAXY_LOG:-galaxy.log}\nPID_FILE=$GALAXY_PID\nLOG_FILE=$GALAXY_LOG\n\nparse_common_args $@\n\nrun_common_start_up\n\nsetup_python\n\nif [ ! -z \"$GALAXY_RUN_WITH_TEST_TOOLS\" ];\nthen\n    export GALAXY_CONFIG_OVERRIDE_TOOL_CONFIG_FILE=\"$(pwd)/test/functional/tools/sample_tool_conf.xml\"\n    export GALAXY_CONFIG_ENABLE_BETA_WORKFLOW_MODULES=\"true\"\n    export GALAXY_CONFIG_OVERRIDE_ENABLE_BETA_TOOL_FORMATS=\"true\"\n    export GALAXY_CONFIG_INTERACTIVETOOLS_ENABLE=\"true\"\n    export GALAXY_CONFIG_OVERRIDE_WEBHOOKS_DIR=\"test/functional/webhooks\"\n    export GALAXY_CONFIG_OVERRIDE_PANEL_VIEWS_DIR=\"$(pwd)/test/integration/panel_views_1/\"\nfi\n\nset_galaxy_config_file_var\n\nif [ \"$INITIALIZE_TOOL_DEPENDENCIES\" -eq 1 ]; then\n    # Install Conda environment if needed.\n    python ./scripts/manage_tool_dependencies.py init_if_needed\nfi\n\nfind_server \"${GALAXY_CONFIG_FILE:-none}\" galaxy\n\necho \"Executing: $run_server $server_args\"\n# args are properly quoted so use eval\neval GALAXY_ROOT_DIR=\".\" $run_server $server_args\n"
  },
  {
    "path": "galaxy/sample_tool_list.yaml",
    "content": "# This is just a sample file. For a fully documented version of this file, see\n# https://github.com/galaxyproject/ansible-galaxy-tools/blob/master/files/tool_list.yaml.sample\n\ninstall_repository_dependencies: false\ninstall_resolver_dependencies: false\ninstall_tool_dependencies: false\n\ntools:\n- name: 'column_maker'\n  owner: 'devteam'\n  tool_panel_section_label: 'Columnmaker section'\n- name: 'tabular_to_fasta'\n  owner: 'devteam'\n  tool_panel_section_label: 'New Converters'\n  revisions:\n  - '0b4e36026794'  # v1.1.0\n"
  },
  {
    "path": "galaxy/setup_postgresql.py",
    "content": "import os\nimport shutil\nimport argparse\nimport subprocess\n\n\ndef pg_ctl(database_path, database_version, mod='start'):\n    \"\"\"\n        Start/Stop PostgreSQL with variable data_directory.\n        mod = [start, end, restart, reload]\n    \"\"\"\n    pg_conf = f'/etc/postgresql/{database_version}/main/postgresql.conf'\n    new_data_directory = f\"'{database_path}'\"\n    cmd = f'sed -i \"s|data_directory = .*|data_directory = {new_data_directory}|g\" {pg_conf}'\n    subprocess.call(cmd, shell=True)\n    subprocess.call(f'service postgresql {mod}', shell=True)\n\n\ndef set_pg_permission(database_path):\n    \"\"\"\n        Set the correct permissions for a newly created PostgreSQL data_directory.\n    \"\"\"\n    subprocess.call(f'chown -R postgres:postgres {database_path}', shell=True)\n    subprocess.call(f'chmod -R 0700 {database_path}', shell=True)\n\n\ndef create_pg_db(user, password, database, database_path, database_version):\n    \"\"\"\n        Initialize PostgreSQL Database, add database user und create the Galaxy Database.\n    \"\"\"\n    pg_bin = f\"/usr/lib/postgresql/{database_version}/bin/\"\n    os.makedirs(database_path)\n    set_pg_permission(database_path)\n    # initialize a new postgres database\n    subprocess.call(\n        f\"su - postgres -c '{os.path.join(pg_bin, 'initdb')} \"\n        f\"--auth=trust --encoding UTF8 --pgdata={database_path}'\",\n        shell=True\n    )\n\n    shutil.copy('/etc/ssl/certs/ssl-cert-snakeoil.pem', os.path.join(database_path, 'server.crt'))\n    shutil.copy('/etc/ssl/private/ssl-cert-snakeoil.key', os.path.join(database_path, 'server.key'))\n    set_pg_permission(os.path.join(database_path, 'server.crt'))\n    set_pg_permission(os.path.join(database_path, 'server.key'))\n\n    # change data_directory in postgresql.conf and start the service with the new location\n    pg_ctl(database_path, database_version, 'start')\n\n    subprocess.call(f\"\"\"su - postgres -c \"psql --command \\\\\"CREATE USER {user} WITH SUPERUSER PASSWORD '{password}'\\\\\";\"\n                    \"\"\", shell=True)\n\n    subprocess.call(f\"su - postgres -c 'createdb -O {user} {database}'\", shell=True)\n    subprocess.call('service postgresql stop', shell=True)\n\n\nif __name__ == \"__main__\":\n\n    parser = argparse.ArgumentParser(description='Initializing a complete Galaxy Database with Tool Shed Tools.')\n\n    parser.add_argument(\"--dbuser\", required=True,\n                        help=\"Username of the Galaxy Database Administrator. That name will be specified in the \"\n                             \"galaxy.yml file.\")\n\n    parser.add_argument(\"--dbpassword\", required=True,\n                        help=\"Password of the Galaxy Database Administrator. That name will be specified in the \"\n                             \"galaxy.yml file.\")\n\n    parser.add_argument(\"--db-name\", dest='db_name', required=True,\n                        help=\"Galaxy Database name. That name will be specified in the galaxy.yml file.\")\n\n    parser.add_argument(\"--dbpath\",\n                        help=\"Galaxy Database path.\")\n\n    parser.add_argument(\"--dbversion\", default='15',\n                        help=\"Postgresql server major version.\")\n\n    options = parser.parse_args()\n\n    \"\"\"\n        Initialize the Galaxy Database + adding an Admin user.\n        This database is the default one, created by the Dockerfile. \n        The user can set a volume (-v /path/:/export/) to get a persistent database.\n    \"\"\"\n    create_pg_db(options.dbuser, options.dbpassword, options.db_name, options.dbpath, options.dbversion)\n"
  },
  {
    "path": "galaxy/startup.sh",
    "content": "#!/usr/bin/env bash\n\n# This is needed for Docker compose to have a unified alias for the main container.\n# Modifying /etc/hosts can only happen during runtime not during build-time\necho \"127.0.0.1      galaxy\" >> /etc/hosts\n\n# If the Galaxy config file is not in the expected place, copy from the sample\n# and hope for the best (that the admin has done all the setup through env vars.)\nif [ ! -f $GALAXY_CONFIG_FILE ]\n  then\n  # this should succesfully copy either .yml or .ini sample file to the expected location\n  cp /export/config/galaxy${GALAXY_CONFIG_FILE: -4}.sample $GALAXY_CONFIG_FILE\nfi\n\n# Set number of Gunicorn workers via GUNICORN_WORKERS or default to 2\npython3 /usr/local/bin/update_yaml_value \"${GRAVITY_CONFIG_FILE}\" \"gravity.gunicorn.workers\" \"${GUNICORN_WORKERS:-2}\" &> /dev/null\n\n# Set number of Celery workers via CELERY_WORKERS or default to 2\npython3 /usr/local/bin/update_yaml_value \"${GRAVITY_CONFIG_FILE}\" \"gravity.celery.concurrency\" \"${CELERY_WORKERS:-2}\" &> /dev/null\n\n# Set number of Galaxy handlers via GALAXY_HANDLER_NUMPROCS or default to 2\npython3 /usr/local/bin/update_yaml_value \"${GRAVITY_CONFIG_FILE}\" \"gravity.handlers.handler.processes\" \"${GALAXY_HANDLER_NUMPROCS:-2}\" &> /dev/null\n\n# Initialize variables for optional ansible parameters\nANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX=\"\"\n\n# Configure proxy prefix filtering\nif [[ ! -z $PROXY_PREFIX ]]\nthen\n    echo \"Configuring with proxy prefix: $PROXY_PREFIX\"\n    export GALAXY_CONFIG_GALAXY_URL_PREFIX=\"$PROXY_PREFIX\"\n\n    # TODO: Set this using GALAXY_CONFIG_INTERACTIVETOOLS_BASE_PATH after gravity config manager is updated to handle env vars properly\n    ansible localhost -m replace -a \"path=${GALAXY_CONFIG_FILE} regexp='^  #interactivetools_base_path:.*' replace='  interactivetools_base_path: ${PROXY_PREFIX}'\" &> /dev/null\n    \n    python3 /usr/local/bin/update_yaml_value \"${GRAVITY_CONFIG_FILE}\" \"gravity.tusd.extra_args\" \"-behind-proxy -base-path $PROXY_PREFIX/api/upload/resumable_upload\" &> /dev/null\n\n    ansible localhost -m replace -a \"path=/etc/flower/flowerconfig.py regexp='^url_prefix.*' replace='url_prefix = \\\"$PROXY_PREFIX/flower\\\"'\" &> /dev/null\n\n    # Fix path to html assets\n    ansible localhost -m replace -a \"dest=$GALAXY_CONFIG_DIR/web/welcome.html regexp='(href=\\\"|\\')[/\\\\w]*(/static)' replace='\\\\1${PROXY_PREFIX}\\\\2'\" &> /dev/null\n    \n    # Set some other vars based on that prefix\n    if [[ -z \"$GALAXY_CONFIG_DYNAMIC_PROXY_PREFIX\" ]]\n    then\n        export GALAXY_CONFIG_DYNAMIC_PROXY_PREFIX=\"$PROXY_PREFIX/gie_proxy\"\n    fi\n\n    if [[ ! -z $GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL ]]\n    then\n        export GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=\"${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL}${PROXY_PREFIX}\"\n    fi\n\n    if [[ \"$USE_HTTPS_LETSENCRYPT\" != \"False\" || \"$USE_HTTPS\" != \"False\" ]]\n    then\n        ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX=\"--extra-vars nginx_prefix_location=$PROXY_PREFIX\"\n    else\n        ansible-playbook -c local /ansible/nginx.yml \\\n        --extra-vars nginx_prefix_location=\"$PROXY_PREFIX\"\n    fi\nfi\n\nif [ \"$USE_HTTPS_LETSENCRYPT\" != \"False\" ]\nthen\n    echo \"Settting up letsencrypt\"\n    PATH=$GALAXY_CONDA_PREFIX/bin/:$PATH ansible-playbook -c local /ansible/nginx.yml \\\n    --extra-vars '{\"nginx_servers\": [\"galaxy_redirect_ssl\", \"interactive_tools_redirect_ssl\"]}' \\\n    --extra-vars '{\"nginx_ssl_servers\": [\"galaxy_https\", \"interactive_tools_https\"]}' \\\n    --extra-vars nginx_ssl_role=usegalaxy_eu.certbot \\\n    --extra-vars \"{\\\"certbot_domains\\\": [\\\"$GALAXY_DOMAIN\\\"]}\" \\\n    --extra-vars nginx_conf_ssl_certificate_key=/etc/ssl/user/privkey-$GALAXY_USER.pem \\\n    --extra-vars nginx_conf_ssl_certificate=/etc/ssl/certs/fullchain.pem \\\n    $ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX\nfi\nif [ \"$USE_HTTPS\" != \"False\" ]\nthen\n    if [ -f /export/server.key -a -f /export/server.crt ]\n    then\n        echo \"Copying SSL keys\"\n        ssl_key_content=$(cat /export/server.key | sed 's/$/\\\\n/' | tr -d '\\n')\n        ansible-playbook -c local /ansible/nginx.yml \\\n        --extra-vars '{\"nginx_servers\": [\"galaxy_redirect_ssl\", \"interactive_tools_redirect_ssl\"]}' \\\n        --extra-vars '{\"nginx_ssl_servers\": [\"galaxy_https\", \"interactive_tools_https\"]}' \\\n        --extra-vars nginx_ssl_src_dir=/export \\\n        --extra-vars \"{\\\"sslkeys\\\": {\\\"server.key\\\": \\\"$ssl_key_content\\\"}}\" \\\n        --extra-vars nginx_conf_ssl_certificate_key=/etc/ssl/private/server.key \\\n        --extra-vars nginx_conf_ssl_certificate=/etc/ssl/certs/server.crt \\\n        $ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX\n    else\n        echo \"Setting up self-signed SSL keys\"\n        ansible-playbook -c local /ansible/nginx.yml \\\n        --extra-vars '{\"nginx_servers\": [\"galaxy_redirect_ssl\", \"interactive_tools_redirect_ssl\"]}' \\\n        --extra-vars '{\"nginx_ssl_servers\": [\"galaxy_https\", \"interactive_tools_https\"]}' \\\n        --extra-vars nginx_ssl_role=galaxyproject.self_signed_certs \\\n        --extra-vars nginx_conf_ssl_certificate_key=/etc/ssl/private/$GALAXY_DOMAIN.pem \\\n        --extra-vars nginx_conf_ssl_certificate=/etc/ssl/certs/$GALAXY_DOMAIN.crt \\\n        --extra-vars \"{\\\"openssl_domains\\\": [\\\"$GALAXY_DOMAIN\\\"]}\" \\\n        $ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX\n    fi\nfi\n\nif [[ \"$USE_HTTPS_LETSENCRYPT\" != \"False\" || \"$USE_HTTPS\" != \"False\" ]]\nthen\n    # Check if GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL has http but not https\n    if [[ $GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL == \"http:\"* ]]\n    then\n        GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL/http:/https:}\n        export GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL\n    fi\nfi\n\n# Disable authentication of flower\nif [[ ! -z $DISABLE_FLOWER_AUTH ]]; then\n    # disable authentification\n    echo \"Disable flower authentification \"\n    cp /etc/nginx/flower_auth.conf /etc/nginx/flower_auth.conf.source\n    echo \"# No authentication defined\" > /etc/nginx/flower_auth.conf\nfi\n\n# Try to guess if we are running under --privileged mode\nif [[ ! -z $HOST_DOCKER_LEGACY ]]; then\n    if mount | grep \"/proc/kcore\"; then\n        PRIVILEGED=false\n    else\n        PRIVILEGED=true\n    fi\nelse\n    # Taken from http://stackoverflow.com/questions/32144575/how-to-know-if-a-docker-container-is-running-in-privileged-mode\n    ip link add dummy0 type dummy 2>/dev/null\n    if [[ $? -eq 0 ]]; then\n        PRIVILEGED=true\n        # clean the dummy0 link\n        ip link delete dummy0 2>/dev/null\n    else\n        PRIVILEGED=false\n    fi\nfi\n\ncd $GALAXY_ROOT_DIR\n. $GALAXY_VIRTUAL_ENV/bin/activate\n\n# Decide container routing based on runtime capabilities; prefer Singularity when available.\ndocker_ok=false\nif [ -S /var/run/docker.sock ] || command -v docker >/dev/null 2>&1; then\n    docker_ok=true\nfi\n\nsingularity_cmd=\"\"\nif command -v singularity >/dev/null 2>&1; then\n    singularity_cmd=\"singularity\"\nelif command -v apptainer >/dev/null 2>&1; then\n    singularity_cmd=\"apptainer\"\nfi\n\nsingularity_ok=false\nif $PRIVILEGED && [ -n \"$singularity_cmd\" ]; then\n    singularity_ok=true\nfi\n\ndest_default=\"${GALAXY_DESTINATIONS_DEFAULT:-}\"\ndest_docker=\"${GALAXY_DESTINATIONS_DOCKER_DEFAULT:-}\"\n\nif [ -z \"$dest_default\" ] || { $singularity_ok && [ \"$dest_default\" = \"slurm_cluster\" ]; }; then\n    if $singularity_ok; then\n        dest_default=\"slurm_cluster_singularity\"\n    elif $docker_ok; then\n        dest_default=\"slurm_cluster_docker\"\n    else\n        dest_default=\"slurm_cluster\"\n    fi\n    export GALAXY_DESTINATIONS_DEFAULT=\"$dest_default\"\nfi\n\nif [ -z \"$dest_docker\" ]; then\n    if $docker_ok; then\n        dest_docker=\"slurm_cluster_docker\"\n    else\n        dest_docker=\"$dest_default\"\n    fi\n    export GALAXY_DESTINATIONS_DOCKER_DEFAULT=\"$dest_docker\"\nelse\n    dest_docker=\"$GALAXY_DESTINATIONS_DOCKER_DEFAULT\"\nfi\n\nif $singularity_ok; then\n    export SINGULARITY_CACHEDIR=\"${SINGULARITY_CACHEDIR:-/export/container_cache/singularity/mulled}\"\n    export APPTAINER_CACHEDIR=\"${APPTAINER_CACHEDIR:-$SINGULARITY_CACHEDIR}\"\n    echo \"Container routing: default -> ${dest_default} (Singularity via ${singularity_cmd}); Docker -> ${dest_docker}\"\nelif $docker_ok; then\n    echo \"Container routing: default -> ${dest_default} (Docker socket detected); Docker -> ${dest_docker}\"\nelse\n    echo \"Container routing: no Docker/Singularity detected; using ${dest_default}\"\nfi\n\ncvmfs_repos=\"${CVMFS_REPOSITORIES:-data.galaxyproject.org singularity.galaxyproject.org}\"\ncvmfs_repos=\"${cvmfs_repos//,/ }\"\n\nif $PRIVILEGED; then\n    umount /var/lib/docker\n\n    if command -v mount.cvmfs >/dev/null 2>&1; then\n        chmod 666 /dev/fuse || true\n        for repo in $cvmfs_repos; do\n            repo_dir=\"/cvmfs/$repo\"\n            mkdir -p \"$repo_dir\"\n            if ! mountpoint -q \"$repo_dir\"; then\n                echo \"Mounting CVMFS repo $repo\"\n                mount -t cvmfs \"$repo\" \"$repo_dir\" || echo \"Warning: failed to mount $repo\"\n            fi\n        done\n    else\n        echo \"Info: CVMFS client not available; install CVMFS or use the sidecar via docker-compose --profile cvmfs.\"\n    fi\nelse\n    echo \"Info: CVMFS mounts disabled (not running privileged). Use --privileged or the CVMFS sidecar in docker-compose.\"\nfi\n\nif ! mountpoint -q /cvmfs 2>/dev/null; then\n    for repo in $cvmfs_repos; do\n        repo_dir=\"/cvmfs/$repo\"\n        mkdir -p \"$repo_dir\"\n        if [ \"$repo\" = \"singularity.galaxyproject.org\" ]; then\n            mkdir -p \"$repo_dir/all\"\n        fi\n    done\n    chown -R \"$GALAXY_USER:$GALAXY_USER\" /cvmfs\nfi\n\nif [[ ! -z $STARTUP_EXPORT_USER_FILES ]]; then\n    # If /export/ is mounted, export_user_files file moving all data to /export/\n    # symlinks will point from the original location to the new path under /export/\n    # If /export/ is not given, nothing will happen in that step\n    echo \"Checking /export...\"\n    python3 /usr/local/bin/export_user_files.py $PG_DATA_DIR_DEFAULT\n    mkdir -p /export/container_cache/singularity/mulled\n    export_cache_owner=\"$(stat -c '%u:%g' /export/container_cache 2>/dev/null || echo '')\"\n    if [[ \"$export_cache_owner\" != \"${GALAXY_UID}:${GALAXY_GID}\" ]]; then\n        chown -R \"$GALAXY_USER:$GALAXY_USER\" /export/container_cache\n    fi\nfi\n\n# Delete compiled templates in case they are out of date\nif [[ ! -z $GALAXY_CONFIG_TEMPLATE_CACHE_PATH ]]; then\n    rm -rf $GALAXY_CONFIG_TEMPLATE_CACHE_PATH/*\nfi\n\n# Enable loading of dependencies on startup. Such as LDAP.\n# Adapted from galaxyproject/galaxy/scripts/common_startup.sh\nif [[ ! -z $LOAD_GALAXY_CONDITIONAL_DEPENDENCIES ]]\n    then\n        echo \"Installing optional dependencies in galaxy virtual environment...\"\n        sudo -E -H -u $GALAXY_USER bash -c '\n            : ${GALAXY_WHEELS_INDEX_URL:=\"https://wheels.galaxyproject.org/simple\"}\n            : ${PYPI_INDEX_URL:=\"https://pypi.python.org/simple\"}\n            GALAXY_CONDITIONAL_DEPENDENCIES=$(PYTHONPATH=lib \"$GALAXY_VIRTUAL_ENV/bin/python\" -c \"import galaxy.dependencies; print(\\\"\\\\n\\\".join(galaxy.dependencies.optional(\\\"$GALAXY_CONFIG_FILE\\\")))\")\n            if [ -n \"$GALAXY_CONDITIONAL_DEPENDENCIES\" ]; then\n                deps_file=\"$(mktemp)\"\n                printf \"%s\\n\" \"$GALAXY_CONDITIONAL_DEPENDENCIES\" > \"$deps_file\"\n                /usr/local/bin/uv pip install \\\n                    --python \"$GALAXY_VIRTUAL_ENV/bin/python\" \\\n                    -r \"$deps_file\" \\\n                    --index-url \"${GALAXY_WHEELS_INDEX_URL}\" \\\n                    --extra-index-url \"${PYPI_INDEX_URL}\"\n                rm -f \"$deps_file\"\n            fi\n        '\nfi\n\nif [[ ! -z $LOAD_GALAXY_CONDITIONAL_DEPENDENCIES ]] && [[ ! -z $LOAD_PYTHON_DEV_DEPENDENCIES ]]\n    then\n        echo \"Installing development requirements in galaxy virtual environment...\"\n        sudo -E -H -u $GALAXY_USER bash -c '\n            : ${GALAXY_WHEELS_INDEX_URL:=\"https://wheels.galaxyproject.org/simple\"}\n            : ${PYPI_INDEX_URL:=\"https://pypi.python.org/simple\"}\n            dev_requirements=\"./lib/galaxy/dependencies/dev-requirements.txt\"\n            if [ -f \"$dev_requirements\" ]; then\n                /usr/local/bin/uv pip install \\\n                    --python \"$GALAXY_VIRTUAL_ENV/bin/python\" \\\n                    -r \"$dev_requirements\" \\\n                    --index-url \"${GALAXY_WHEELS_INDEX_URL}\" \\\n                    --extra-index-url \"${PYPI_INDEX_URL}\"\n            fi\n        '\nfi\n\n# Enable Test Tool Shed\nif [[ ! -z $ENABLE_TTS_INSTALL ]]\n    then\n        echo \"Enable installation from the Test Tool Shed.\"\n        export GALAXY_CONFIG_TOOL_SHEDS_CONFIG_FILE=$GALAXY_HOME/tool_sheds_conf.xml\nfi\n\n# Remove all default tools from Galaxy by default\nif [[ ! -z $BARE ]]\n    then\n        echo \"Remove all tools from the tool_conf.xml file.\"\n        export GALAXY_CONFIG_TOOL_CONFIG_FILE=$GALAXY_ROOT_DIR/test/functional/tools/upload_tool_conf.xml\nfi\n\n# If auto installing conda envs, make sure bcftools is installed for __set_metadata__ tool\nif [[ ! -z $GALAXY_CONFIG_CONDA_AUTO_INSTALL ]]\n    then\n        if [ ! -d \"/tool_deps/_conda/envs/__bcftools@1.5\" ]; then\n            su $GALAXY_USER -c \"/tool_deps/_conda/bin/conda create -y --override-channels --channel iuc --channel conda-forge --channel bioconda --channel defaults --name __bcftools@1.5 bcftools=1.5\"\n            su $GALAXY_USER -c \"/tool_deps/_conda/bin/conda clean --tarballs --yes\"\n        fi\nfi\n\nif [[ $NONUSE != *\"postgres\"* ]]\n    then\n        # Backward compatibility for exported postgresql directories before version 15.08.\n        # In previous versions postgres has the UID/GID of 102/106. We changed this in\n        # https://github.com/bgruening/docker-galaxy-stable/pull/71 to GALAXY_POSTGRES_UID=1550 and\n        # GALAXY_POSTGRES_GID=1550\n        if [ -e /export/postgresql/ ];\n            then\n                if [ `stat -c %g /export/postgresql/` == \"106\" ];\n                    then\n                        chown -R postgres:postgres /export/postgresql/\n                fi\n        fi\nfi\n\n\nif [[ ! -z $ENABLE_CONDOR ]]\n    then\n        if [[ ! -z $CONDOR_HOST ]]\n        then\n            echo \"Enabling Condor with external scheduler at $CONDOR_HOST\"\n        echo \"# Config generated by startup.sh\nCONDOR_HOST = $CONDOR_HOST\nALLOW_ADMINISTRATOR = *\nALLOW_OWNER = *\nALLOW_READ = *\nALLOW_WRITE = *\nALLOW_CLIENT = *\nALLOW_NEGOTIATOR = *\nDAEMON_LIST = MASTER, SCHEDD\nUID_DOMAIN = galaxy\nDISCARD_SESSION_KEYRING_ON_STARTUP = False\nTRUST_UID_DOMAIN = true\" > /etc/condor/condor_config.local\n        fi\n\n        if [[ -e /export/condor_config ]]\n        then\n            echo \"Replacing Condor config by locally supplied config from /export/condor_config\"\n            rm -f /etc/condor/condor_config\n            ln -s /export/condor_config /etc/condor/condor_config\n        fi\nfi\n\n\n# Copy or link the slurm/munge config files\nif [ -e /export/slurm.conf ]\nthen\n    rm -f /etc/slurm/slurm.conf\n    ln -s /export/slurm.conf /etc/slurm/slurm.conf\nelse\n    # Configure SLURM with runtime hostname.\n    # Use absolute path to python so virtualenv is not used.\n    mkdir -p /etc/slurm\n    /usr/bin/python /usr/sbin/configure_slurm.py\nfi\nmkdir -p /tmp/slurm /var/log/slurm /var/lib/slurm/slurmctld\nchown -R $GALAXY_USER:$GALAXY_USER /tmp/slurm /var/log/slurm /var/lib/slurm\nif [ -e /export/munge.key ]\nthen\n    rm -f /etc/munge/munge.key\n    ln -s /export/munge.key /etc/munge/munge.key\n    chmod 400 /export/munge.key\nfi\n\n# link the gridengine config file\nif [ -e /export/act_qmaster ]\nthen\n    rm -f /var/lib/gridengine/default/common/act_qmaster\n    ln -s /export/act_qmaster /var/lib/gridengine/default/common/act_qmaster\nfi\n\n# Waits until postgres is ready\nfunction wait_for_postgres {\n    echo \"Checking if database is up and running\"\n    until /usr/local/bin/check_database.py 2>&1 >/dev/null; do sleep 5; echo \"Waiting for database\"; done\n    echo \"Database connected\"\n}\n\n# Waits until rabbitmq is ready\nfunction wait_for_rabbitmq {\n    echo \"Checking if RabbitMQ is up and running\"\n    until rabbitmqctl status 2>&1 >/dev/null; do sleep 5; echo \"Waiting for RabbitMQ\"; done\n    echo \"RabbitMQ is ready\"\n}\n\n# Waits until docker daemon is ready\nfunction wait_for_docker {\n    echo \"Checking if docker daemon is up and running\"\n    until docker version 2>&1 >/dev/null; do sleep 5; echo \"Waiting for docker daemon\"; done\n    echo \"Docker daemon is ready\"\n}\n\nfunction wait_for_munge {\n    local retries=20\n    echo \"Checking if munge is up and running\"\n    until munge -n >/dev/null 2>&1; do\n        if [[ $retries -le 0 ]]; then\n            echo \"Munge did not become ready\"\n            return 1\n        fi\n        retries=$((retries - 1))\n        sleep 1\n    done\n    echo \"Munge is ready\"\n}\n\n# $NONUSE can be set to include postgres, cron, proftp, nodejs, condor, slurmd, slurmctld,\n# celery, rabbitmq, redis, flower or tusd\n# if included we will _not_ start these services.\nfunction start_supervisor {\n    supervisord -c /etc/supervisor/supervisord.conf\n    sleep 5\n\n    if [[ ! -z $SUPERVISOR_MANAGE_POSTGRES && ! -z $SUPERVISOR_POSTGRES_AUTOSTART ]]; then\n        if [[ $NONUSE != *\"postgres\"* ]]\n        then\n            echo \"Starting postgres\"\n            supervisorctl start postgresql\n        fi\n    fi\n\n    if [[ ! -z $SUPERVISOR_MANAGE_CRON ]]; then\n        if [[ $NONUSE != *\"cron\"* ]]\n        then\n            echo \"Starting cron\"\n            supervisorctl start cron\n        fi\n    fi\n\n    if [[ ! -z $SUPERVISOR_MANAGE_PROFTP ]]; then\n        if [[ $NONUSE != *\"proftp\"* ]]\n        then\n            echo \"Starting ProFTP\"\n            supervisorctl start proftpd\n        fi\n    fi\n\n    if [[ ! -z $SUPERVISOR_MANAGE_CONDOR ]]; then\n        if [[ $NONUSE != *\"condor\"* ]]\n        then\n            echo \"Starting condor\"\n            supervisorctl start condor\n        fi\n    fi\n\n    if [[ ! -z $SUPERVISOR_MANAGE_SLURM ]]; then\n        echo \"Starting munge\"\n        supervisorctl start munge\n        wait_for_munge || true\n\n        if [[ $NONUSE != *\"slurmctld\"* ]]\n        then\n            echo \"Starting slurmctld\"\n            supervisorctl start slurmctld\n        fi\n        if [[ $NONUSE != *\"slurmd\"* ]]\n        then\n            echo \"Starting slurmd\"\n            supervisorctl start slurmd\n        fi\n    else\n        echo \"Starting munge\"\n        mkdir -p /var/run/munge && chown -R root:root /var/run/munge\n        /usr/sbin/munged -f -F --num-threads=\"${MUNGE_NUM_THREADS:-2}\" &\n        wait_for_munge || true\n\n        if [[ $NONUSE != *\"slurmctld\"* ]]\n        then\n            echo \"Starting slurmctld\"\n            /usr/sbin/slurmctld -L $GALAXY_LOGS_DIR/slurmctld.log\n        fi\n        if [[ $NONUSE != *\"slurmd\"* ]]\n        then\n            echo \"Starting slurmd\"\n            /usr/sbin/slurmd -L $GALAXY_LOGS_DIR/slurmd.log\n        fi\n    fi\n\n    if [[ ! -z $SUPERVISOR_MANAGE_RABBITMQ ]]; then\n        if [[ $NONUSE != *\"rabbitmq\"* ]]\n        then\n            echo \"Starting rabbitmq\"\n            supervisorctl start rabbitmq\n\n            wait_for_rabbitmq\n            echo \"Configuring rabbitmq users\"\n            ansible-playbook -c local /usr/local/bin/configure_rabbitmq_users.yml &> /dev/null\n\n            echo \"Restarting rabbitmq\"\n            supervisorctl restart rabbitmq\n        fi    \n    fi\n\n    if [[ ! -z $SUPERVISOR_MANAGE_REDIS ]]; then\n        if [[ $NONUSE != *\"redis\"* ]]\n        then\n            echo \"Starting redis\"\n            supervisorctl start redis\n        fi\n    fi\n\n    if [[ ! -z $SUPERVISOR_MANAGE_FLOWER ]]; then \n        if [[ $NONUSE != *\"flower\"* && $NONUSE != *\"celery\"* && $NONUSE != *\"rabbitmq\"* ]]\n        then\n            echo \"Starting flower\"\n            supervisorctl start flower\n        fi\n    fi\n}\n\nfunction start_gravity {\n    if [[ ! -z $GRAVITY_MANAGE_CELERY ]]; then\n        if [[ $NONUSE == *\"celery\"* ]]\n        then\n            echo \"Disabling Galaxy celery app\"\n            python3 /usr/local/bin/update_yaml_value \"${GRAVITY_CONFIG_FILE}\" \"gravity.celery.enable\" \"false\" &> /dev/null\n            python3 /usr/local/bin/update_yaml_value \"${GRAVITY_CONFIG_FILE}\" \"gravity.celery.enable_beat\" \"false\" &> /dev/null\n        else\n            export GALAXY_CONFIG_ENABLE_CELERY_TASKS='true'\n            if [[ $NONUSE != *\"redis\"* ]]\n            then\n                # Configure Galaxy to use Redis as the result backend for Celery tasks\n                ansible localhost -m replace -a \"path=${GALAXY_CONFIG_FILE} regexp='^  #celery_conf:' replace='  celery_conf:'\" &> /dev/null\n                ansible localhost -m replace -a \"path=${GALAXY_CONFIG_FILE} regexp='^  #  result_backend:.*' replace='    result_backend: redis://127.0.0.1:6379/0'\" &> /dev/null \n            fi\n        fi\n    fi\n\n    if [[ ! -z $GRAVITY_MANAGE_GX_IT_PROXY ]]; then\n        if [[ $NONUSE == *\"nodejs\"* ]]\n        then\n            echo \"Disabling nodejs\"\n            python3 /usr/local/bin/update_yaml_value \"${GRAVITY_CONFIG_FILE}\" \"gravity.gx_it_proxy.enable\" \"false\" &> /dev/null\n        else\n            # TODO: Remove this after gravity config manager is updated to handle env vars properly\n            ansible localhost -m replace -a \"path=${GALAXY_CONFIG_FILE} regexp='^  #interactivetools_enable:.*' replace='  interactivetools_enable: true'\" &> /dev/null\n        fi\n    fi\n\n    if [[ ! -z $GRAVITY_MANAGE_TUSD ]]; then\n        if [[ $NONUSE == *\"tusd\"* ]]\n        then\n            echo \"Disabling Galaxy tusd app\"\n            python3 /usr/local/bin/update_yaml_value \"${GRAVITY_CONFIG_FILE}\" \"gravity.tusd.enable\" \"false\" &> /dev/null\n            cp /etc/nginx/delegated_uploads.conf /etc/nginx/delegated_uploads.conf.source \n            echo \"# No delegated uploads\" > /etc/nginx/delegated_uploads.conf\n        else\n            # TODO: Remove this after gravity config manager is updated to handle env vars properly\n            ansible localhost -m replace -a \"path=${GALAXY_CONFIG_FILE} regexp='^  #galaxy_infrastructure_url:.*' replace='  galaxy_infrastructure_url: ${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL}'\" &> /dev/null\n        fi\n    fi\n\n    if [[ $NONUSE != *\"rabbitmq\"* ]]\n    then\n        # Set AMQP internal connection for Galaxy\n        export GALAXY_CONFIG_AMQP_INTERNAL_CONNECTION=\"pyamqp://galaxy:galaxy@localhost:5672/galaxy\"\n    fi\n\n    # Set the SUPERVISORD_SOCKET to overwrite gravity's default.\n    # The default will put the socket into the export dir, into gravity's state directory. And this caused some problems to start supervisord.  \n    export SUPERVISORD_SOCKET=${SUPERVISORD_SOCKET:-/tmp/galaxy_supervisord.sock}\n    # Start galaxy services using gravity\n    /usr/local/bin/galaxyctl -d start\n}\n\nif [[ ! -z $SUPERVISOR_POSTGRES_AUTOSTART ]]; then\n    if [[ $NONUSE != *\"postgres\"* ]]\n    then\n        # Change the data_directory of postgresql in the main config file\n        ansible localhost -m lineinfile -a \"line='data_directory = \\'$PG_DATA_DIR_HOST\\'' dest=$PG_CONF_DIR_DEFAULT/postgresql.conf backup=yes state=present regexp='data_directory'\" &> /dev/null\n    fi\nfi\n\nif $PRIVILEGED; then\n    # In privileged mode autofs and CVMFS may be available, so only append existing files.\n    export GALAXY_CONFIG_TOOL_DATA_TABLE_CONFIG_PATH=\"${GALAXY_CONFIG_TOOL_DATA_TABLE_CONFIG_PATH},/cvmfs/data.galaxyproject.org/byhand/location/tool_data_table_conf.xml,/cvmfs/data.galaxyproject.org/managed/location/tool_data_table_conf.xml\"\n\n    echo \"Enable Galaxy Interactive Tools.\"\n    export GALAXY_CONFIG_INTERACTIVETOOLS_ENABLE=True\n    export GALAXY_CONFIG_TOOL_CONFIG_FILE=\"$GALAXY_CONFIG_TOOL_CONFIG_FILE,$GALAXY_INTERACTIVE_TOOLS_CONFIG_FILE\"\n\n    # Update domain-based interactive tools nginx configuration with the galaxy domain if provided\n    if [[ ! -z $GALAXY_DOMAIN ]]; then\n        sed -i \"s/\\(\\.interactivetool\\.\\)[^;]*/\\1$GALAXY_DOMAIN/g\" /etc/nginx/interactive_tools_common.conf\n    fi\n\n    if [[ -z $DOCKER_PARENT ]]; then\n        #build the docker in docker environment\n        # Ensure cgroup mounts are set up without triggering dind \"no command\" warnings.\n        bash /root/cgroupfs_mount.sh true\n        start_supervisor\n        start_gravity\n        supervisorctl start docker\n        wait_for_docker\n    else\n        #inheriting /var/run/docker.sock from parent, assume that you need to\n        #run docker with sudo to validate\n        echo \"$GALAXY_USER ALL = NOPASSWD : ALL\" >> /etc/sudoers\n        start_supervisor\n        start_gravity\n    fi\n    if  [[ ! -z $PULL_IT_IMAGES ]]; then\n        echo \"About to pull IT images. Depending on the size, this may take a while!\"\n\n        for it in {JUPYTER,RSTUDIO,ETHERCALC,PHINCH,NEO}; do\n            enabled_var_name=\"GALAXY_IT_FETCH_${it}\";\n            if [[ ${!enabled_var_name} ]]; then\n                # Store name in a var\n                image_var_name=\"GALAXY_IT_${it}_IMAGE\"\n                # And then read from that var\n                docker pull \"${!image_var_name}\"\n            fi\n        done\n    fi\nelse\n    echo \"Disable Galaxy Interactive Tools. Start with --privileged to enable ITs.\"\n    export GALAXY_CONFIG_INTERACTIVETOOLS_ENABLE=False\n    start_supervisor\n    start_gravity\nfi\n\nwait_for_postgres\n\n# Make sure the database is automatically updated\nif [[ ! -z $GALAXY_AUTO_UPDATE_DB ]]\nthen\n    echo \"Updating Galaxy database\"\n    sh manage_db.sh -c $GALAXY_CONFIG_FILE upgrade\nfi\n\n# In case the user wants the default admin to be created, do so.\nif [[ ! -z $GALAXY_DEFAULT_ADMIN_USER ]]\n    then\n        echo \"Creating admin user $GALAXY_DEFAULT_ADMIN_USER with key $GALAXY_DEFAULT_ADMIN_KEY and password $GALAXY_DEFAULT_ADMIN_PASSWORD if not existing\"\n        python /usr/local/bin/create_galaxy_user.py --user \"$GALAXY_DEFAULT_ADMIN_EMAIL\" --password \"$GALAXY_DEFAULT_ADMIN_PASSWORD\" \\\n        -c \"$GALAXY_CONFIG_FILE\" --username \"$GALAXY_DEFAULT_ADMIN_USER\" --key \"$GALAXY_DEFAULT_ADMIN_KEY\"\n    # If there is a need to execute actions that would require a live galaxy instance, such as adding workflows, setting quotas, adding more users, etc.\n    # then place a file with that logic named post-start-actions.sh on the /export/ directory, it should have access to all environment variables\n    # visible here.\n    # The file needs to be executable (chmod a+x post-start-actions.sh)\n        if [ -x /export/post-start-actions.sh ]\n            then\n           # uses ephemeris, present in docker-galaxy-stable, to wait for the local instance\n           /tool_deps/_conda/bin/galaxy-wait -g http://127.0.0.1 -v --timeout 600 > $GALAXY_LOGS_DIR/post-start-actions.log &&\n           /export/post-start-actions.sh >> $GALAXY_LOGS_DIR/post-start-actions.log &\n    fi\nfi\n\n# Reinstall tools if the user want to\nif [[ ! -z $GALAXY_AUTO_UPDATE_TOOLS ]]\n    then\n        /tool_deps/_conda/bin/galaxy-wait -g http://127.0.0.1 -v --timeout 600 > /home/galaxy/logs/post-start-actions.log &&\n        OLDIFS=$IFS\n        IFS=','\n        for TOOL_YML in `echo \"$GALAXY_AUTO_UPDATE_TOOLS\"`\n        do\n            echo \"Installing tools from $TOOL_YML\"\n            /tool_deps/_conda/bin/shed-tools install -g \"http://127.0.0.1\" -a \"$GALAXY_DEFAULT_ADMIN_KEY\" -t \"$TOOL_YML\"\n            /tool_deps/_conda/bin/conda clean --tarballs --yes\n        done\n        IFS=$OLDIFS\nfi\n\n# migrate custom Visualisations (Galaxy plugins)\n# this is needed for by the new client build system\npython3 ${GALAXY_ROOT_DIR}/scripts/plugin_staging.py\n\n# Enable verbose output\nif [ `echo ${GALAXY_LOGGING:-'no'} | tr [:upper:] [:lower:]` = \"full\" ]\n    then\n        tail -f /var/log/supervisor/* /var/log/nginx/* $GALAXY_LOGS_DIR/*.log\n    else\n        tail -f $GALAXY_LOGS_DIR/*.log\nfi\n"
  },
  {
    "path": "galaxy/startup2.sh",
    "content": "#!/usr/bin/env bash\n\nSTARTUP_LOG_DIR=\"${STARTUP_LOG_DIR:-${GALAXY_LOGS_DIR:-/home/galaxy/logs}}\"\nSTARTUP_LOG=\"${STARTUP_LOG:-$STARTUP_LOG_DIR/startup2.log}\"\nSTARTUP_LOG_LEVEL=\"${STARTUP_LOG_LEVEL:-info}\"\nSTARTUP_LOG_TAIL=\"${STARTUP_LOG_TAIL:-200}\"\nSTARTUP_PARALLEL=\"${STARTUP_PARALLEL:-true}\"\nSTARTUP_VALIDATE=\"${STARTUP_VALIDATE:-false}\"\nSTARTUP_WAIT_TIMEOUT=\"${STARTUP_WAIT_TIMEOUT:-600}\"\nSTARTUP_GALAXY_URL=\"${STARTUP_GALAXY_URL:-http://127.0.0.1}\"\nSTARTUP_OUT_FD=3\n\nmkdir -p \"$STARTUP_LOG_DIR\"\nexec 3>&1\nif [ \"$STARTUP_LOG_LEVEL\" = \"verbose\" ]; then\n    exec > >(tee -a \"$STARTUP_LOG\") 2>&1\n    STARTUP_OUT_FD=1\nelse\n    exec >>\"$STARTUP_LOG\" 2>&1\nfi\n\nSTARTUP_COLOR=\"${STARTUP_COLOR:-auto}\"\nSTARTUP_USE_COLOR=false\nif [ \"$STARTUP_COLOR\" = \"always\" ]; then\n    STARTUP_USE_COLOR=true\nelif [ \"$STARTUP_COLOR\" = \"auto\" ] && [ -t \"${STARTUP_OUT_FD}\" ]; then\n    STARTUP_USE_COLOR=true\nfi\n\nif $STARTUP_USE_COLOR; then\n    COLOR_RESET=$'\\033[0m'\n    COLOR_INFO=$'\\033[36m'\n    COLOR_WARN=$'\\033[33m'\n    COLOR_ERROR=$'\\033[31m'\n    COLOR_SUCCESS=$'\\033[32m'\nelse\n    COLOR_RESET=\"\"\n    COLOR_INFO=\"\"\n    COLOR_WARN=\"\"\n    COLOR_ERROR=\"\"\n    COLOR_SUCCESS=\"\"\nfi\n\nprint_log() {\n    local color=\"$1\"\n    shift\n    if [ -n \"$color\" ]; then\n        printf '%s%s%s\\n' \"$color\" \"$*\" \"$COLOR_RESET\" >&${STARTUP_OUT_FD}\n    else\n        printf '%s\\n' \"$*\" >&${STARTUP_OUT_FD}\n    fi\n}\n\nlog_info() {\n    if [ \"$STARTUP_LOG_LEVEL\" != \"quiet\" ]; then\n        print_log \"$COLOR_INFO\" \"$*\"\n    fi\n}\n\nlog_success() {\n    if [ \"$STARTUP_LOG_LEVEL\" != \"quiet\" ]; then\n        print_log \"$COLOR_SUCCESS\" \"$*\"\n    fi\n}\n\nlog_warn() {\n    print_log \"$COLOR_WARN\" \"Warning: $*\"\n}\n\nlog_error() {\n    print_log \"$COLOR_ERROR\" \"Error: $*\"\n}\n\nshow_runtime_summary() {\n    local gunicorn_workers=\"${GUNICORN_WORKERS:-2}\"\n    local handler_processes=\"${GALAXY_HANDLER_NUMPROCS:-2}\"\n    local celery_workers=\"${CELERY_WORKERS:-2}\"\n    local destination_default=\"${GALAXY_DESTINATIONS_DEFAULT:-slurm_cluster}\"\n    local slurm_enabled=\"${GALAXY_RUNNERS_ENABLE_SLURM:-default}\"\n    local condor_enabled=\"${GALAXY_RUNNERS_ENABLE_CONDOR:-default}\"\n    local docker_enabled=\"${GALAXY_DOCKER_ENABLED:-default}\"\n    local mulled_enabled=\"${GALAXY_CONFIG_ENABLE_MULLED_CONTAINERS:-default}\"\n    local conda_auto=\"${GALAXY_CONFIG_CONDA_AUTO_INSTALL:-default}\"\n    local conda_prefix=\"${GALAXY_CONDA_PREFIX:-/tool_deps/_conda}\"\n    local docker_label=\"default (galaxy.yml)\"\n    local mulled_label=\"default (galaxy.yml)\"\n\n    if [ -n \"${GALAXY_DOCKER_ENABLED+x}\" ]; then\n        docker_label=\"$docker_enabled\"\n    fi\n    if [ -n \"${GALAXY_CONFIG_ENABLE_MULLED_CONTAINERS+x}\" ]; then\n        mulled_label=\"$mulled_enabled\"\n    fi\n\n    log_info \"Runtime summary:\"\n    log_info \"  Web workers (gunicorn): ${gunicorn_workers}\"\n    log_info \"  Job handlers: ${handler_processes}\"\n    log_info \"  Celery workers: ${celery_workers}\"\n    log_info \"  Default destination: ${destination_default}\"\n    log_info \"  Runners: slurm=${slurm_enabled}, condor=${condor_enabled}\"\n    log_info \"  Containers: docker=${docker_label}, mulled=${mulled_label}\"\n    log_info \"  Conda: auto_install=${conda_auto}, prefix=${conda_prefix}\"\n    log_info \"  Docs: https://github.com/bgruening/docker-galaxy\"\n}\n\nmask_sensitive_value() {\n    local name=\"$1\"\n    local value=\"$2\"\n\n    case \"$name\" in\n        *KEY*|*SECRET*|*TOKEN*|*PASSWORD*|*PASSPHRASE*)\n            printf '***'\n            ;;\n        *)\n            printf '%s' \"$value\"\n            ;;\n    esac\n}\n\nshow_galaxy_env_summary() {\n    local envs\n    envs=\"$(env | LC_ALL=C sort | grep '^GALAXY_')\" || true\n\n    if [ -z \"$envs\" ]; then\n        log_info \"Environment overrides (GALAXY_*): none\"\n        return\n    fi\n\n    log_info \"Environment overrides (GALAXY_*):\"\n    while IFS='=' read -r name value; do\n        if [ -z \"$name\" ]; then\n            continue\n        fi\n        local display_value\n        display_value=\"$(mask_sensitive_value \"$name\" \"$value\")\"\n        if [ \"${#display_value}\" -gt 200 ]; then\n            display_value=\"${display_value:0:200}...\"\n        fi\n        log_info \"  ${name}=${display_value}\"\n    done <<< \"$envs\"\n}\n\nshow_startup_log_tail() {\n    tail -n \"$STARTUP_LOG_TAIL\" \"$STARTUP_LOG\" >&${STARTUP_OUT_FD} || true\n}\n\nshow_failure_logs() {\n    log_error \"Startup failed; showing recent logs\"\n    show_startup_log_tail\n    if [ -d \"${GALAXY_LOGS_DIR:-}\" ]; then\n        for log in \"$GALAXY_LOGS_DIR\"/*.log; do\n            if [ -f \"$log\" ]; then\n                printf '\\n==> %s <==\\n' \"$log\" >&${STARTUP_OUT_FD}\n                tail -n \"$STARTUP_LOG_TAIL\" \"$log\" >&${STARTUP_OUT_FD} || true\n            fi\n        done\n    fi\n}\n\nlog_info \"Starting Galaxy container (startup2). Logs: $STARTUP_LOG\"\n\n# This is needed for Docker compose to have a unified alias for the main container.\n# Modifying /etc/hosts can only happen during runtime not during build-time\necho \"127.0.0.1      galaxy\" >> /etc/hosts\n\n# If the Galaxy config file is not in the expected place, copy from the sample\n# and hope for the best (that the admin has done all the setup through env vars.)\nif [ ! -f $GALAXY_CONFIG_FILE ]\n  then\n  # this should succesfully copy either .yml or .ini sample file to the expected location\n  cp /export/config/galaxy${GALAXY_CONFIG_FILE: -4}.sample $GALAXY_CONFIG_FILE\nfi\nlog_info \"Configuring runtime settings\"\n\n# Set number of Gunicorn workers via GUNICORN_WORKERS or default to 2\npython3 /usr/local/bin/update_yaml_value \"${GRAVITY_CONFIG_FILE}\" \"gravity.gunicorn.workers\" \"${GUNICORN_WORKERS:-2}\" &> /dev/null\n\n# Set number of Celery workers via CELERY_WORKERS or default to 2\npython3 /usr/local/bin/update_yaml_value \"${GRAVITY_CONFIG_FILE}\" \"gravity.celery.concurrency\" \"${CELERY_WORKERS:-2}\" &> /dev/null\n\n# Set number of Galaxy handlers via GALAXY_HANDLER_NUMPROCS or default to 2\npython3 /usr/local/bin/update_yaml_value \"${GRAVITY_CONFIG_FILE}\" \"gravity.handlers.handler.processes\" \"${GALAXY_HANDLER_NUMPROCS:-2}\" &> /dev/null\n\n# Initialize variables for optional ansible parameters\nANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX=\"\"\n\n# Configure proxy prefix filtering\nif [[ ! -z $PROXY_PREFIX ]]\nthen\n    log_info \"Configuring proxy prefix: $PROXY_PREFIX\"\n    export GALAXY_CONFIG_GALAXY_URL_PREFIX=\"$PROXY_PREFIX\"\n\n    # TODO: Set this using GALAXY_CONFIG_INTERACTIVETOOLS_BASE_PATH after gravity config manager is updated to handle env vars properly\n    ansible localhost -m replace -a \"path=${GALAXY_CONFIG_FILE} regexp='^  #interactivetools_base_path:.*' replace='  interactivetools_base_path: ${PROXY_PREFIX}'\" &> /dev/null\n    \n    python3 /usr/local/bin/update_yaml_value \"${GRAVITY_CONFIG_FILE}\" \"gravity.tusd.extra_args\" \"-behind-proxy -base-path $PROXY_PREFIX/api/upload/resumable_upload\" &> /dev/null\n\n    ansible localhost -m replace -a \"path=/etc/flower/flowerconfig.py regexp='^url_prefix.*' replace='url_prefix = \\\"$PROXY_PREFIX/flower\\\"'\" &> /dev/null\n\n    # Fix path to html assets\n    ansible localhost -m replace -a \"dest=$GALAXY_CONFIG_DIR/web/welcome.html regexp='(href=\\\"|\\')[/\\\\w]*(/static)' replace='\\\\1${PROXY_PREFIX}\\\\2'\" &> /dev/null\n    \n    # Set some other vars based on that prefix\n    if [[ -z \"$GALAXY_CONFIG_DYNAMIC_PROXY_PREFIX\" ]]\n    then\n        export GALAXY_CONFIG_DYNAMIC_PROXY_PREFIX=\"$PROXY_PREFIX/gie_proxy\"\n    fi\n\n    if [[ ! -z $GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL ]]\n    then\n        export GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=\"${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL}${PROXY_PREFIX}\"\n    fi\n\n    if [[ \"$USE_HTTPS_LETSENCRYPT\" != \"False\" || \"$USE_HTTPS\" != \"False\" ]]\n    then\n        ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX=\"--extra-vars nginx_prefix_location=$PROXY_PREFIX\"\n    else\n        ansible-playbook -c local /ansible/nginx.yml \\\n        --extra-vars nginx_prefix_location=\"$PROXY_PREFIX\"\n    fi\nfi\n\nif [ \"$USE_HTTPS_LETSENCRYPT\" != \"False\" ]\nthen\n    log_info \"Setting up LetsEncrypt\"\n    PATH=$GALAXY_CONDA_PREFIX/bin/:$PATH ansible-playbook -c local /ansible/nginx.yml \\\n    --extra-vars '{\"nginx_servers\": [\"galaxy_redirect_ssl\", \"interactive_tools_redirect_ssl\"]}' \\\n    --extra-vars '{\"nginx_ssl_servers\": [\"galaxy_https\", \"interactive_tools_https\"]}' \\\n    --extra-vars nginx_ssl_role=usegalaxy_eu.certbot \\\n    --extra-vars \"{\\\"certbot_domains\\\": [\\\"$GALAXY_DOMAIN\\\"]}\" \\\n    --extra-vars nginx_conf_ssl_certificate_key=/etc/ssl/user/privkey-$GALAXY_USER.pem \\\n    --extra-vars nginx_conf_ssl_certificate=/etc/ssl/certs/fullchain.pem \\\n    $ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX\nfi\nif [ \"$USE_HTTPS\" != \"False\" ]\nthen\n    if [ -f /export/server.key -a -f /export/server.crt ]\n    then\n        log_info \"Using SSL keys from /export\"\n        ssl_key_content=$(cat /export/server.key | sed 's/$/\\\\n/' | tr -d '\\n')\n        ansible-playbook -c local /ansible/nginx.yml \\\n        --extra-vars '{\"nginx_servers\": [\"galaxy_redirect_ssl\", \"interactive_tools_redirect_ssl\"]}' \\\n        --extra-vars '{\"nginx_ssl_servers\": [\"galaxy_https\", \"interactive_tools_https\"]}' \\\n        --extra-vars nginx_ssl_src_dir=/export \\\n        --extra-vars \"{\\\"sslkeys\\\": {\\\"server.key\\\": \\\"$ssl_key_content\\\"}}\" \\\n        --extra-vars nginx_conf_ssl_certificate_key=/etc/ssl/private/server.key \\\n        --extra-vars nginx_conf_ssl_certificate=/etc/ssl/certs/server.crt \\\n        $ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX\n    else\n        log_info \"Setting up self-signed SSL keys\"\n        ansible-playbook -c local /ansible/nginx.yml \\\n        --extra-vars '{\"nginx_servers\": [\"galaxy_redirect_ssl\", \"interactive_tools_redirect_ssl\"]}' \\\n        --extra-vars '{\"nginx_ssl_servers\": [\"galaxy_https\", \"interactive_tools_https\"]}' \\\n        --extra-vars nginx_ssl_role=galaxyproject.self_signed_certs \\\n        --extra-vars nginx_conf_ssl_certificate_key=/etc/ssl/private/$GALAXY_DOMAIN.pem \\\n        --extra-vars nginx_conf_ssl_certificate=/etc/ssl/certs/$GALAXY_DOMAIN.crt \\\n        --extra-vars \"{\\\"openssl_domains\\\": [\\\"$GALAXY_DOMAIN\\\"]}\" \\\n        $ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX\n    fi\nfi\n\nif [[ \"$USE_HTTPS_LETSENCRYPT\" != \"False\" || \"$USE_HTTPS\" != \"False\" ]]\nthen\n    # Check if GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL has http but not https\n    if [[ $GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL == \"http:\"* ]]\n    then\n        GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL/http:/https:}\n        export GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL\n    fi\nfi\n\n# Disable authentication of flower\nif [[ ! -z $DISABLE_FLOWER_AUTH ]]; then\n    # disable authentification\n    log_info \"Disabling flower authentication\"\n    cp /etc/nginx/flower_auth.conf /etc/nginx/flower_auth.conf.source\n    echo \"# No authentication defined\" > /etc/nginx/flower_auth.conf\nfi\n\n# Try to guess if we are running under --privileged mode\nif [[ ! -z $HOST_DOCKER_LEGACY ]]; then\n    if mount | grep \"/proc/kcore\"; then\n        PRIVILEGED=false\n    else\n        PRIVILEGED=true\n    fi\nelse\n    # Taken from http://stackoverflow.com/questions/32144575/how-to-know-if-a-docker-container-is-running-in-privileged-mode\n    ip link add dummy0 type dummy 2>/dev/null\n    if [[ $? -eq 0 ]]; then\n        PRIVILEGED=true\n        # clean the dummy0 link\n        ip link delete dummy0 2>/dev/null\n    else\n        PRIVILEGED=false\n    fi\nfi\n\ncd $GALAXY_ROOT_DIR\n. $GALAXY_VIRTUAL_ENV/bin/activate\n\n# Decide container routing based on runtime capabilities; prefer Singularity when available.\ndocker_ok=false\nif [ -S /var/run/docker.sock ] || command -v docker >/dev/null 2>&1; then\n    docker_ok=true\nfi\n\nsingularity_cmd=\"\"\nif command -v singularity >/dev/null 2>&1; then\n    singularity_cmd=\"singularity\"\nelif command -v apptainer >/dev/null 2>&1; then\n    singularity_cmd=\"apptainer\"\nfi\n\nsingularity_ok=false\nif $PRIVILEGED && [ -n \"$singularity_cmd\" ]; then\n    singularity_ok=true\nfi\n\ndest_default=\"${GALAXY_DESTINATIONS_DEFAULT:-}\"\ndest_docker=\"${GALAXY_DESTINATIONS_DOCKER_DEFAULT:-}\"\n\nif [ -z \"$dest_default\" ] || { $singularity_ok && [ \"$dest_default\" = \"slurm_cluster\" ]; }; then\n    if $singularity_ok; then\n        dest_default=\"slurm_cluster_singularity\"\n    elif $docker_ok; then\n        dest_default=\"slurm_cluster_docker\"\n    else\n        dest_default=\"slurm_cluster\"\n    fi\n    export GALAXY_DESTINATIONS_DEFAULT=\"$dest_default\"\nfi\n\nif [ -z \"$dest_docker\" ]; then\n    if $docker_ok; then\n        dest_docker=\"slurm_cluster_docker\"\n    else\n        dest_docker=\"$dest_default\"\n    fi\n    export GALAXY_DESTINATIONS_DOCKER_DEFAULT=\"$dest_docker\"\nelse\n    dest_docker=\"$GALAXY_DESTINATIONS_DOCKER_DEFAULT\"\nfi\n\nif $singularity_ok; then\n    export SINGULARITY_CACHEDIR=\"${SINGULARITY_CACHEDIR:-/export/container_cache/singularity/mulled}\"\n    export APPTAINER_CACHEDIR=\"${APPTAINER_CACHEDIR:-$SINGULARITY_CACHEDIR}\"\n    log_info \"Container routing: default -> ${dest_default} (Singularity via ${singularity_cmd}); Docker -> ${dest_docker}\"\nelif $docker_ok; then\n    log_info \"Container routing: default -> ${dest_default} (Docker socket detected); Docker -> ${dest_docker}\"\nelse\n    log_warn \"Container routing: no Docker/Singularity detected; using ${dest_default}\"\nfi\n\ncvmfs_repos=\"${CVMFS_REPOSITORIES:-data.galaxyproject.org singularity.galaxyproject.org}\"\ncvmfs_repos=\"${cvmfs_repos//,/ }\"\ncvmfs_autofs_configured=false\nif [ -f /etc/auto.cvmfs ] || [ -f /etc/auto.master.d/cvmfs.autofs ]; then\n    cvmfs_autofs_configured=true\nfi\n\nif $PRIVILEGED; then\n    log_info \"Configuring CVMFS mounts (privileged)\"\n    umount /var/lib/docker\n\n    if command -v mount.cvmfs >/dev/null 2>&1; then\n        chmod 666 /dev/fuse || true\n        if $cvmfs_autofs_configured; then\n            log_info \"CVMFS autofs configured; mounts will appear on first access after services start.\"\n        else\n            for repo in $cvmfs_repos; do\n                repo_dir=\"/cvmfs/$repo\"\n                mkdir -p \"$repo_dir\"\n                if ! mountpoint -q \"$repo_dir\"; then\n                    log_info \"Mounting CVMFS repo $repo\"\n                    if ! mount -t cvmfs \"$repo\" \"$repo_dir\"; then\n                        sleep 2\n                        mount -t cvmfs \"$repo\" \"$repo_dir\" || log_warn \"Failed to mount CVMFS repo $repo\"\n                    fi\n                fi\n            done\n        fi\n    else\n        log_info \"CVMFS client not available; install CVMFS or use the sidecar via docker-compose --profile cvmfs.\"\n    fi\nelse\n    log_info \"CVMFS mounts disabled (not running privileged). Use --privileged or the CVMFS sidecar in docker-compose.\"\nfi\n\nif ! mountpoint -q /cvmfs 2>/dev/null; then\n    for repo in $cvmfs_repos; do\n        repo_dir=\"/cvmfs/$repo\"\n        mkdir -p \"$repo_dir\"\n        if [ \"$repo\" = \"singularity.galaxyproject.org\" ]; then\n            mkdir -p \"$repo_dir/all\"\n        fi\n    done\n    chown -R \"$GALAXY_USER:$GALAXY_USER\" /cvmfs\nfi\n\nshow_runtime_summary\nshow_galaxy_env_summary\n\nif [[ ! -z $STARTUP_EXPORT_USER_FILES ]]; then\n    # If /export/ is mounted, export_user_files file moving all data to /export/\n    # symlinks will point from the original location to the new path under /export/\n    # If /export/ is not given, nothing will happen in that step\n    log_info \"Checking /export...\"\n    python3 /usr/local/bin/export_user_files.py $PG_DATA_DIR_DEFAULT\n    mkdir -p /export/container_cache/singularity/mulled\n    export_cache_owner=\"$(stat -c '%u:%g' /export/container_cache 2>/dev/null || echo '')\"\n    if [[ \"$export_cache_owner\" != \"${GALAXY_UID}:${GALAXY_GID}\" ]]; then\n        chown -R \"$GALAXY_USER:$GALAXY_USER\" /export/container_cache\n    fi\nfi\n\n# Delete compiled templates in case they are out of date\nif [[ ! -z $GALAXY_CONFIG_TEMPLATE_CACHE_PATH ]]; then\n    rm -rf $GALAXY_CONFIG_TEMPLATE_CACHE_PATH/*\nfi\n\n# Enable loading of dependencies on startup. Such as LDAP.\n# Adapted from galaxyproject/galaxy/scripts/common_startup.sh\nif [[ ! -z $LOAD_GALAXY_CONDITIONAL_DEPENDENCIES ]]\n    then\n        log_info \"Installing optional Galaxy dependencies\"\n        sudo -E -H -u $GALAXY_USER bash -c '\n            : ${GALAXY_WHEELS_INDEX_URL:=\"https://wheels.galaxyproject.org/simple\"}\n            : ${PYPI_INDEX_URL:=\"https://pypi.python.org/simple\"}\n            GALAXY_CONDITIONAL_DEPENDENCIES=$(PYTHONPATH=lib \"$GALAXY_VIRTUAL_ENV/bin/python\" -c \"import galaxy.dependencies; print(\\\"\\\\n\\\".join(galaxy.dependencies.optional(\\\"$GALAXY_CONFIG_FILE\\\")))\")\n            if [ -n \"$GALAXY_CONDITIONAL_DEPENDENCIES\" ]; then\n                deps_file=\"$(mktemp)\"\n                printf \"%s\\n\" \"$GALAXY_CONDITIONAL_DEPENDENCIES\" > \"$deps_file\"\n                /usr/local/bin/uv pip install \\\n                    --python \"$GALAXY_VIRTUAL_ENV/bin/python\" \\\n                    -r \"$deps_file\" \\\n                    --index-url \"${GALAXY_WHEELS_INDEX_URL}\" \\\n                    --extra-index-url \"${PYPI_INDEX_URL}\"\n                rm -f \"$deps_file\"\n            fi\n        '\nfi\n\nif [[ ! -z $LOAD_GALAXY_CONDITIONAL_DEPENDENCIES ]] && [[ ! -z $LOAD_PYTHON_DEV_DEPENDENCIES ]]\n    then\n        echo \"Installing development requirements in galaxy virtual environment...\"\n        sudo -E -H -u $GALAXY_USER bash -c '\n            : ${GALAXY_WHEELS_INDEX_URL:=\"https://wheels.galaxyproject.org/simple\"}\n            : ${PYPI_INDEX_URL:=\"https://pypi.python.org/simple\"}\n            dev_requirements=\"./lib/galaxy/dependencies/dev-requirements.txt\"\n            if [ -f \"$dev_requirements\" ]; then\n                /usr/local/bin/uv pip install \\\n                    --python \"$GALAXY_VIRTUAL_ENV/bin/python\" \\\n                    -r \"$dev_requirements\" \\\n                    --index-url \"${GALAXY_WHEELS_INDEX_URL}\" \\\n                    --extra-index-url \"${PYPI_INDEX_URL}\"\n            fi\n        '\nfi\n\n# Enable Test Tool Shed\nif [[ ! -z $ENABLE_TTS_INSTALL ]]\n    then\n        log_info \"Enabling installation from the Test Tool Shed\"\n        export GALAXY_CONFIG_TOOL_SHEDS_CONFIG_FILE=$GALAXY_HOME/tool_sheds_conf.xml\nfi\n\n# Remove all default tools from Galaxy by default\nif [[ ! -z $BARE ]]\n    then\n        log_info \"Removing default tools from tool_conf.xml\"\n        export GALAXY_CONFIG_TOOL_CONFIG_FILE=$GALAXY_ROOT_DIR/test/functional/tools/upload_tool_conf.xml\nfi\n\n# If auto installing conda envs, make sure bcftools is installed for __set_metadata__ tool\nif [[ ! -z $GALAXY_CONFIG_CONDA_AUTO_INSTALL ]]\n    then\n        if [ ! -d \"/tool_deps/_conda/envs/__bcftools@1.5\" ]; then\n            su $GALAXY_USER -c \"/tool_deps/_conda/bin/conda create -y --override-channels --channel iuc --channel conda-forge --channel bioconda --channel defaults --name __bcftools@1.5 bcftools=1.5\"\n            su $GALAXY_USER -c \"/tool_deps/_conda/bin/conda clean --tarballs --yes\"\n        fi\nfi\n\nif [[ $NONUSE != *\"postgres\"* ]]\n    then\n        # Backward compatibility for exported postgresql directories before version 15.08.\n        # In previous versions postgres has the UID/GID of 102/106. We changed this in\n        # https://github.com/bgruening/docker-galaxy-stable/pull/71 to GALAXY_POSTGRES_UID=1550 and\n        # GALAXY_POSTGRES_GID=1550\n        if [ -e /export/postgresql/ ];\n            then\n                if [ `stat -c %g /export/postgresql/` == \"106\" ];\n                    then\n                        chown -R postgres:postgres /export/postgresql/\n                fi\n        fi\nfi\n\n\nif [[ ! -z $ENABLE_CONDOR ]]\n    then\n        if [[ ! -z $CONDOR_HOST ]]\n        then\n            log_info \"Enabling Condor with external scheduler at $CONDOR_HOST\"\n        echo \"# Config generated by startup.sh\nCONDOR_HOST = $CONDOR_HOST\nALLOW_ADMINISTRATOR = *\nALLOW_OWNER = *\nALLOW_READ = *\nALLOW_WRITE = *\nALLOW_CLIENT = *\nALLOW_NEGOTIATOR = *\nDAEMON_LIST = MASTER, SCHEDD\nUID_DOMAIN = galaxy\nDISCARD_SESSION_KEYRING_ON_STARTUP = False\nTRUST_UID_DOMAIN = true\" > /etc/condor/condor_config.local\n        fi\n\n        if [[ -e /export/condor_config ]]\n        then\n            echo \"Replacing Condor config by locally supplied config from /export/condor_config\"\n            rm -f /etc/condor/condor_config\n            ln -s /export/condor_config /etc/condor/condor_config\n        fi\nfi\n\n\n# Copy or link the slurm/munge config files\nif [ -e /export/slurm.conf ]\nthen\n    rm -f /etc/slurm/slurm.conf\n    ln -s /export/slurm.conf /etc/slurm/slurm.conf\nelse\n    # Configure SLURM with runtime hostname.\n    # Use absolute path to python so virtualenv is not used.\n    mkdir -p /etc/slurm\n    /usr/bin/python /usr/sbin/configure_slurm.py\nfi\nmkdir -p /tmp/slurm /var/log/slurm /var/lib/slurm/slurmctld\nchown -R $GALAXY_USER:$GALAXY_USER /tmp/slurm /var/log/slurm /var/lib/slurm\nif [ -e /export/munge.key ]\nthen\n    rm -f /etc/munge/munge.key\n    ln -s /export/munge.key /etc/munge/munge.key\n    chmod 400 /export/munge.key\nfi\n\n# link the gridengine config file\nif [ -e /export/act_qmaster ]\nthen\n    rm -f /var/lib/gridengine/default/common/act_qmaster\n    ln -s /export/act_qmaster /var/lib/gridengine/default/common/act_qmaster\nfi\n\n# Waits until postgres is ready\nfunction wait_for_postgres {\n    local retries=\"${STARTUP_POSTGRES_RETRIES:-60}\"\n    log_info \"Waiting for database...\"\n    until /usr/local/bin/check_database.py >/dev/null 2>&1; do\n        retries=$((retries - 1))\n        if [[ $retries -le 0 ]]; then\n            log_warn \"Database did not become ready\"\n            return 1\n        fi\n        sleep 5\n    done\n    log_success \"Database ready\"\n}\n\n# Waits until rabbitmq is ready\nfunction wait_for_rabbitmq {\n    local retries=\"${STARTUP_RABBITMQ_RETRIES:-60}\"\n    log_info \"Waiting for RabbitMQ...\"\n    until rabbitmqctl status >/dev/null 2>&1; do\n        retries=$((retries - 1))\n        if [[ $retries -le 0 ]]; then\n            log_warn \"RabbitMQ did not become ready\"\n            return 1\n        fi\n        sleep 5\n    done\n    log_success \"RabbitMQ ready\"\n}\n\n# Waits until docker daemon is ready\nfunction wait_for_docker {\n    local retries=\"${STARTUP_DOCKER_RETRIES:-60}\"\n    log_info \"Waiting for docker daemon...\"\n    until docker version >/dev/null 2>&1; do\n        retries=$((retries - 1))\n        if [[ $retries -le 0 ]]; then\n            log_warn \"Docker daemon did not become ready\"\n            return 1\n        fi\n        sleep 5\n    done\n    log_success \"Docker daemon ready\"\n}\n\nfunction wait_for_munge {\n    local retries=\"${STARTUP_MUNGE_RETRIES:-20}\"\n    log_info \"Waiting for munge...\"\n    until munge -n >/dev/null 2>&1; do\n        if [[ $retries -le 0 ]]; then\n            log_warn \"Munge did not become ready\"\n            return 1\n        fi\n        retries=$((retries - 1))\n        sleep 1\n    done\n    log_success \"Munge ready\"\n}\n\n# $NONUSE can be set to include postgres, cron, proftp, nodejs, condor, slurmd, slurmctld,\n# celery, rabbitmq, redis, flower or tusd\n# if included we will _not_ start these services.\nfunction start_supervisor {\n    supervisord -c /etc/supervisor/supervisord.conf\n    sleep 5\n\n    local parallel=false\n    case \"$STARTUP_PARALLEL\" in\n        1|true|yes|on) parallel=true ;;\n    esac\n    local pids=()\n    local names=()\n\n    start_service() {\n        local name=\"$1\"\n        shift\n        if $parallel; then\n            \"$@\" &\n            pids+=(\"$!\")\n            names+=(\"$name\")\n        else\n            if ! \"$@\"; then\n                if ! supervisorctl status \"$name\" 2>/dev/null | grep -q RUNNING; then\n                    log_warn \"Service start failed: $name\"\n                fi\n            fi\n        fi\n    }\n\n    wait_services() {\n        local i\n        for i in \"${!pids[@]}\"; do\n            if ! wait \"${pids[$i]}\"; then\n                if ! supervisorctl status \"${names[$i]}\" 2>/dev/null | grep -q RUNNING; then\n                    log_warn \"Service start failed: ${names[$i]}\"\n                fi\n            fi\n        done\n        pids=()\n        names=()\n    }\n\n    if [[ ! -z $SUPERVISOR_MANAGE_POSTGRES && ! -z $SUPERVISOR_POSTGRES_AUTOSTART ]]; then\n        if [[ $NONUSE != *\"postgres\"* ]]\n        then\n            start_service \"postgres\" supervisorctl start postgresql\n        fi\n    fi\n\n    if [[ ! -z $SUPERVISOR_MANAGE_CRON ]]; then\n        if [[ $NONUSE != *\"cron\"* ]]\n        then\n            start_service \"cron\" supervisorctl start cron\n        fi\n    fi\n\n    if [[ ! -z $SUPERVISOR_MANAGE_PROFTP ]]; then\n        if [[ $NONUSE != *\"proftp\"* ]]\n        then\n            start_service \"proftpd\" supervisorctl start proftpd\n        fi\n    fi\n\n    if [[ ! -z $SUPERVISOR_MANAGE_CONDOR ]]; then\n        if [[ $NONUSE != *\"condor\"* ]]\n        then\n            start_service \"condor\" supervisorctl start condor\n        fi\n    fi\n\n    if [[ ! -z $SUPERVISOR_MANAGE_REDIS ]]; then\n        if [[ $NONUSE != *\"redis\"* ]]\n        then\n            start_service \"redis\" supervisorctl start redis\n        fi\n    fi\n\n    wait_services\n\n    if [[ ! -z $SUPERVISOR_MANAGE_SLURM ]]; then\n        log_info \"Starting munge\"\n        mkdir -p /tmp/slurm && chown -R \"${GALAXY_USER:-galaxy}:${GALAXY_USER:-galaxy}\" /tmp/slurm\n        supervisorctl start munge\n        wait_for_munge || true\n\n        if [[ $NONUSE != *\"slurmctld\"* ]]\n        then\n            log_info \"Starting slurmctld\"\n            supervisorctl start slurmctld\n        fi\n        if [[ $NONUSE != *\"slurmd\"* ]]\n        then\n            log_info \"Starting slurmd\"\n            supervisorctl start slurmd\n        fi\n    else\n        log_info \"Starting munge\"\n        mkdir -p /var/run/munge && chown -R root:root /var/run/munge\n        mkdir -p /tmp/slurm && chown -R \"${GALAXY_USER:-galaxy}:${GALAXY_USER:-galaxy}\" /tmp/slurm\n        /usr/sbin/munged -f -F --num-threads=\"${MUNGE_NUM_THREADS:-2}\" &\n        wait_for_munge || true\n\n        if [[ $NONUSE != *\"slurmctld\"* ]]\n        then\n            log_info \"Starting slurmctld\"\n            /usr/sbin/slurmctld -L $GALAXY_LOGS_DIR/slurmctld.log\n        fi\n        if [[ $NONUSE != *\"slurmd\"* ]]\n        then\n            log_info \"Starting slurmd\"\n            /usr/sbin/slurmd -L $GALAXY_LOGS_DIR/slurmd.log\n        fi\n    fi\n\n    if [[ ! -z $SUPERVISOR_MANAGE_RABBITMQ ]]; then\n        if [[ $NONUSE != *\"rabbitmq\"* ]]\n        then\n            log_info \"Starting rabbitmq\"\n            supervisorctl start rabbitmq\n\n            wait_for_rabbitmq\n            log_info \"Configuring rabbitmq users\"\n            ansible-playbook -c local /usr/local/bin/configure_rabbitmq_users.yml &> /dev/null\n\n            log_info \"Restarting rabbitmq\"\n            supervisorctl restart rabbitmq\n        fi    \n    fi\n\n    if [[ ! -z $SUPERVISOR_MANAGE_FLOWER ]]; then \n        if [[ $NONUSE != *\"flower\"* && $NONUSE != *\"celery\"* && $NONUSE != *\"rabbitmq\"* ]]\n        then\n            log_info \"Starting flower\"\n            supervisorctl start flower\n        fi\n    fi\n}\n\nfunction start_gravity {\n    if [[ ! -z $GRAVITY_MANAGE_CELERY ]]; then\n        if [[ $NONUSE == *\"celery\"* ]]\n        then\n            log_info \"Disabling Galaxy celery app\"\n            python3 /usr/local/bin/update_yaml_value \"${GRAVITY_CONFIG_FILE}\" \"gravity.celery.enable\" \"false\" &> /dev/null\n            python3 /usr/local/bin/update_yaml_value \"${GRAVITY_CONFIG_FILE}\" \"gravity.celery.enable_beat\" \"false\" &> /dev/null\n        else\n            export GALAXY_CONFIG_ENABLE_CELERY_TASKS='true'\n            if [[ $NONUSE != *\"redis\"* ]]\n            then\n                # Configure Galaxy to use Redis as the result backend for Celery tasks\n                ansible localhost -m replace -a \"path=${GALAXY_CONFIG_FILE} regexp='^  #celery_conf:' replace='  celery_conf:'\" &> /dev/null\n                ansible localhost -m replace -a \"path=${GALAXY_CONFIG_FILE} regexp='^  #  result_backend:.*' replace='    result_backend: redis://127.0.0.1:6379/0'\" &> /dev/null \n            fi\n        fi\n    fi\n\n    if [[ ! -z $GRAVITY_MANAGE_GX_IT_PROXY ]]; then\n        if [[ $NONUSE == *\"nodejs\"* ]]\n        then\n            log_info \"Disabling nodejs\"\n            python3 /usr/local/bin/update_yaml_value \"${GRAVITY_CONFIG_FILE}\" \"gravity.gx_it_proxy.enable\" \"false\" &> /dev/null\n        else\n            # TODO: Remove this after gravity config manager is updated to handle env vars properly\n            ansible localhost -m replace -a \"path=${GALAXY_CONFIG_FILE} regexp='^  #interactivetools_enable:.*' replace='  interactivetools_enable: true'\" &> /dev/null\n        fi\n    fi\n\n    if [[ ! -z $GRAVITY_MANAGE_TUSD ]]; then\n        if [[ $NONUSE == *\"tusd\"* ]]\n        then\n            log_info \"Disabling Galaxy tusd app\"\n            python3 /usr/local/bin/update_yaml_value \"${GRAVITY_CONFIG_FILE}\" \"gravity.tusd.enable\" \"false\" &> /dev/null\n            cp /etc/nginx/delegated_uploads.conf /etc/nginx/delegated_uploads.conf.source \n            echo \"# No delegated uploads\" > /etc/nginx/delegated_uploads.conf\n        else\n            # TODO: Remove this after gravity config manager is updated to handle env vars properly\n            ansible localhost -m replace -a \"path=${GALAXY_CONFIG_FILE} regexp='^  #galaxy_infrastructure_url:.*' replace='  galaxy_infrastructure_url: ${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL}'\" &> /dev/null\n        fi\n    fi\n\n    if [[ $NONUSE != *\"rabbitmq\"* ]]\n    then\n        # Set AMQP internal connection for Galaxy\n        export GALAXY_CONFIG_AMQP_INTERNAL_CONNECTION=\"pyamqp://galaxy:galaxy@localhost:5672/galaxy\"\n    fi\n\n    # Set the SUPERVISORD_SOCKET to overwrite gravity's default.\n    # The default will put the socket into the export dir, into gravity's state directory. And this caused some problems to start supervisord.  \n    export SUPERVISORD_SOCKET=${SUPERVISORD_SOCKET:-/tmp/galaxy_supervisord.sock}\n    # Start galaxy services using gravity\n    /usr/local/bin/galaxyctl -d start\n}\n\nif [[ ! -z $SUPERVISOR_POSTGRES_AUTOSTART ]]; then\n    if [[ $NONUSE != *\"postgres\"* ]]\n    then\n        # Change the data_directory of postgresql in the main config file\n        ansible localhost -m lineinfile -a \"line='data_directory = \\'$PG_DATA_DIR_HOST\\'' dest=$PG_CONF_DIR_DEFAULT/postgresql.conf backup=yes state=present regexp='data_directory'\" &> /dev/null\n    fi\nfi\n\nif $PRIVILEGED; then\n    # In privileged mode autofs and CVMFS may be available, so only append existing files.\n    export GALAXY_CONFIG_TOOL_DATA_TABLE_CONFIG_PATH=\"${GALAXY_CONFIG_TOOL_DATA_TABLE_CONFIG_PATH},/cvmfs/data.galaxyproject.org/byhand/location/tool_data_table_conf.xml,/cvmfs/data.galaxyproject.org/managed/location/tool_data_table_conf.xml\"\n\n    log_info \"Enabling Galaxy Interactive Tools\"\n    export GALAXY_CONFIG_INTERACTIVETOOLS_ENABLE=True\n    export GALAXY_CONFIG_TOOL_CONFIG_FILE=\"$GALAXY_CONFIG_TOOL_CONFIG_FILE,$GALAXY_INTERACTIVE_TOOLS_CONFIG_FILE\"\n\n    # Update domain-based interactive tools nginx configuration with the galaxy domain if provided\n    if [[ ! -z $GALAXY_DOMAIN ]]; then\n        sed -i \"s/\\(\\.interactivetool\\.\\)[^;]*/\\1$GALAXY_DOMAIN/g\" /etc/nginx/interactive_tools_common.conf\n    fi\n\n    if [[ -z $DOCKER_PARENT ]]; then\n        #build the docker in docker environment\n        # Ensure cgroup mounts are set up without triggering dind \"no command\" warnings.\n        bash /root/cgroupfs_mount.sh true\n        log_info \"Starting services (supervisord)\"\n        start_supervisor\n        log_info \"Starting Galaxy (gunicorn=${GUNICORN_WORKERS:-2}, handlers=${GALAXY_HANDLER_NUMPROCS:-2}, celery=${CELERY_WORKERS:-2})\"\n        start_gravity\n        supervisorctl start docker\n        wait_for_docker\n    else\n        #inheriting /var/run/docker.sock from parent, assume that you need to\n        #run docker with sudo to validate\n        echo \"$GALAXY_USER ALL = NOPASSWD : ALL\" >> /etc/sudoers\n        log_info \"Starting services (supervisord)\"\n        start_supervisor\n        log_info \"Starting Galaxy (gunicorn=${GUNICORN_WORKERS:-2}, handlers=${GALAXY_HANDLER_NUMPROCS:-2}, celery=${CELERY_WORKERS:-2})\"\n        start_gravity\n    fi\n    if  [[ ! -z $PULL_IT_IMAGES ]]; then\n        log_info \"Pulling interactive tool images (this may take a while)\"\n\n        for it in {JUPYTER,RSTUDIO,ETHERCALC,PHINCH,NEO}; do\n            enabled_var_name=\"GALAXY_IT_FETCH_${it}\";\n            if [[ ${!enabled_var_name} ]]; then\n                # Store name in a var\n                image_var_name=\"GALAXY_IT_${it}_IMAGE\"\n                # And then read from that var\n                docker pull \"${!image_var_name}\"\n            fi\n        done\n    fi\nelse\n    log_info \"Interactive Tools disabled (start with --privileged to enable)\"\n    export GALAXY_CONFIG_INTERACTIVETOOLS_ENABLE=False\n    log_info \"Starting services (supervisord)\"\n    start_supervisor\n    log_info \"Starting Galaxy (gunicorn=${GUNICORN_WORKERS:-2}, handlers=${GALAXY_HANDLER_NUMPROCS:-2}, celery=${CELERY_WORKERS:-2})\"\n    start_gravity\nfi\n\nwait_for_postgres\n\nif [[ \"$STARTUP_VALIDATE\" == \"true\" ]]; then\n    log_info \"Validating Galaxy readiness...\"\n    if ! /tool_deps/_conda/bin/galaxy-wait -g \"$STARTUP_GALAXY_URL\" -v --timeout \"$STARTUP_WAIT_TIMEOUT\"; then\n        show_failure_logs\n        exit 1\n    fi\n    log_success \"Galaxy is ready\"\nfi\n\n# Make sure the database is automatically updated\nif [[ ! -z $GALAXY_AUTO_UPDATE_DB ]]\nthen\n    log_info \"Updating Galaxy database\"\n    sh manage_db.sh -c $GALAXY_CONFIG_FILE upgrade\nfi\n\n# In case the user wants the default admin to be created, do so.\nif [[ ! -z $GALAXY_DEFAULT_ADMIN_USER ]]\n    then\n        log_info \"Ensuring admin user $GALAXY_DEFAULT_ADMIN_USER exists\"\n        python /usr/local/bin/create_galaxy_user.py --user \"$GALAXY_DEFAULT_ADMIN_EMAIL\" --password \"$GALAXY_DEFAULT_ADMIN_PASSWORD\" \\\n        -c \"$GALAXY_CONFIG_FILE\" --username \"$GALAXY_DEFAULT_ADMIN_USER\" --key \"$GALAXY_DEFAULT_ADMIN_KEY\"\n    # If there is a need to execute actions that would require a live galaxy instance, such as adding workflows, setting quotas, adding more users, etc.\n    # then place a file with that logic named post-start-actions.sh on the /export/ directory, it should have access to all environment variables\n    # visible here.\n    # The file needs to be executable (chmod a+x post-start-actions.sh)\n        if [ -x /export/post-start-actions.sh ]\n            then\n           # uses ephemeris, present in docker-galaxy-stable, to wait for the local instance\n           /tool_deps/_conda/bin/galaxy-wait -g http://127.0.0.1 -v --timeout 600 > $GALAXY_LOGS_DIR/post-start-actions.log &&\n           /export/post-start-actions.sh >> $GALAXY_LOGS_DIR/post-start-actions.log &\n    fi\nfi\n\n# Reinstall tools if the user want to\nif [[ ! -z $GALAXY_AUTO_UPDATE_TOOLS ]]\n    then\n        /tool_deps/_conda/bin/galaxy-wait -g http://127.0.0.1 -v --timeout 600 > /home/galaxy/logs/post-start-actions.log &&\n        OLDIFS=$IFS\n        IFS=','\n            for TOOL_YML in `echo \"$GALAXY_AUTO_UPDATE_TOOLS\"`\n        do\n            log_info \"Installing tools from $TOOL_YML\"\n            /tool_deps/_conda/bin/shed-tools install -g \"http://127.0.0.1\" -a \"$GALAXY_DEFAULT_ADMIN_KEY\" -t \"$TOOL_YML\"\n            /tool_deps/_conda/bin/conda clean --tarballs --yes\n        done\n        IFS=$OLDIFS\nfi\n\n# migrate custom Visualisations (Galaxy plugins)\n# this is needed for by the new client build system\npython3 ${GALAXY_ROOT_DIR}/scripts/plugin_staging.py\n\n# Enable verbose output\nif [ `echo ${GALAXY_LOGGING:-'no'} | tr [:upper:] [:lower:]` = \"full\" ]\n    then\n        log_success \"Startup complete; streaming logs\"\n        tail -f /var/log/supervisor/* /var/log/nginx/* $GALAXY_LOGS_DIR/*.log >&${STARTUP_OUT_FD}\n    else\n        log_success \"Startup complete; streaming logs\"\n        tail -f $GALAXY_LOGS_DIR/*.log >&${STARTUP_OUT_FD}\nfi\n"
  },
  {
    "path": "galaxy/tool_conf_interactive.xml.sample",
    "content": "<?xml version='1.0' encoding='utf-8'?>\n<toolbox monitor=\"true\">\n  <section id=\"interactivetools\" name=\"Interactive Tools\">\n    <tool file=\"interactive/interactivetool_askomics.xml\" />\n    <tool file=\"interactive/interactivetool_bam_iobio.xml\" />\n    <tool file=\"interactive/interactivetool_blobtoolkit.xml\" />\n    <tool file=\"interactive/interactivetool_cellxgene_0.16.2.xml\" />\n    <tool file=\"interactive/interactivetool_cellxgene_1.1.1.xml\" />\n    <tool file=\"interactive/interactivetool_ethercalc.xml\" />\n    <tool file=\"interactive/interactivetool_guacamole_desktop.xml\" />\n    <tool file=\"interactive/interactivetool_hicbrowser.xml\" />\n    <tool file=\"interactive/interactivetool_jupyter_notebook_1.0.0.xml\" />\n    <tool file=\"interactive/interactivetool_jupyter_notebook.xml\" />\n    <tool file=\"interactive/interactivetool_neo4j.xml\" />\n    <tool file=\"interactive/interactivetool_openrefine.xml\" />\n    <tool file=\"interactive/interactivetool_phinch.xml\" />\n    <tool file=\"interactive/interactivetool_rstudio.xml\" />\n  </section>\n</toolbox>\n"
  },
  {
    "path": "galaxy/tool_sheds_conf.xml",
    "content": "<?xml version=\"1.0\"?>\n<!-- This file is only used if the container is started with -e ENABLE_TTS_INSTALL=True -->\n<tool_sheds>\n    <tool_shed name=\"Galaxy Main Tool Shed\" url=\"https://toolshed.g2.bx.psu.edu/\"/>\n    <tool_shed name=\"Galaxy Test Tool Shed\" url=\"https://testtoolshed.g2.bx.psu.edu/\"/>\n</tool_sheds>\n"
  },
  {
    "path": "galaxy/welcome.html",
    "content": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n    <meta charset=\"utf-8\">\n    <link rel=\"stylesheet\" href=\"/static/dist/base.css\" type=\"text/css\" />\n</head>\n<body style=\"margin: 0\">\n\n    <div class=\"jumbotron\">\n        <div class=\"container\">\n            <table><tr><td>\n            <h2>Hello, your <strong>Galaxy Docker</strong> container is running!</h2>\n            To customize this page you can create a <code>welcome.html</code> page in your directory mounted to <code>/export</code>.\n\n            <br>\n            <a target=\"_blank\" href=\"https://docs.galaxyproject.org/en/latest/admin/config.html\" class=\"btn btn-primary btn-lg\">Configuring Galaxy »</a>\n            <a target=\"_blank\" href=\"https://galaxyproject.org/admin/tools/add-tool-from-toolshed-tutorial/\" class=\"btn btn-primary btn-lg\">Installing Tools »</a>\n            <a target=\"_parent\" href=\"/tours/core.galaxy_ui\" class=\"btn btn-primary btn-lg\">Guided Tour »</a>\n            </td><td>\n                <div align=center>\n                    <img src=\"./welcome_image.png\" width=\"90%\" height=\"90%\" />\n                </div>\n\n        </td></tr></table>\n        </div>\n    </div>\n\n    <div class=\"container\">\n\n        <p class=\"lead\">\n            <a target=\"_blank\" class=\"reference\" href=\"http://galaxyproject.org/\">\n                Galaxy</a> is an open platform for supporting data intensive\n                research. Galaxy is developed by <a target=\"_blank\" class=\"reference\" href=\"https://galaxyproject.org/galaxy-team/\">The Galaxy Team</a>\n                with the support of  <a target=\"_blank\" class=\"reference\" href=\"https://github.com/galaxyproject/galaxy/blob/dev/CONTRIBUTORS.md\">many contributors</a>.\n            The Galaxy Docker project is supported by the University of Freiburg, part of de.NBI.\n        </p>\n\n        <footer>\n            The <a target=\"_blank\" class=\"reference\" href=\"http://galaxyproject.org/\">Galaxy Project</a>\n            is supported in part by <a target=\"_blank\" class=\"reference\" href=\"http://www.genome.gov\">NHGRI</a>,\n            <a target=\"_blank\" class=\"reference\" href=\"http://www.nsf.gov\">NSF</a>,\n            <a target=\"_blank\" class=\"reference\" href=\"http://www.huck.psu.edu\">The Huck Institutes of the Life Sciences</a>,\n            <a target=\"_blank\" class=\"reference\" href=\"http://www.ics.psu.edu\">The Institute for CyberScience at Penn State</a>,\n            and <a target=\"_blank\" class=\"reference\" href=\"http://www.jhu.edu/\">Johns Hopkins University</a>.\n        </footer>\n\n    </div>\n\n</body>\n</html>\n"
  },
  {
    "path": "skills/galaxy-docker/SKILL.md",
    "content": "---\nname: galaxy-docker\ndescription: Maintain and upgrade the bgruening/docker-galaxy project: bump Galaxy/Ubuntu versions, update Ansible roles and scheduler support, adjust startup/CI/tests, and manage CVMFS.\n---\n\n# Galaxy Docker skill\n\nUse this skill when working in the `bgruening/docker-galaxy` repo to upgrade Galaxy releases or refresh runtime, scheduler, CVMFS, and CI behavior.\n\n## Quick start workflow\n\n1. **Define targets**: Galaxy release, Ubuntu base, scheduler expectations (Slurm/HTCondor), and CI scope.\n2. **Update build**: `galaxy/Dockerfile` (release ARGs, build stages, slurm-drmaa, uv usage, npm cleanup).\n3. **Update Ansible**: `galaxy/ansible/requirements.yml` and playbooks (`rabbitmq.yml`, `condor.yml`, `slurm.yml`, `nginx.yml`, `proftpd.yml`).\n4. **Update runtime**: `galaxy/startup.sh`, `galaxy/startup2.sh`, and `galaxy/ansible/templates/export_user_files.py.j2`.\n5. **CVMFS changes**: `cvmfs/` sidecar + `galaxy/docker-compose.yaml` + resolver config.\n6. **Tests/CI**: `test/` scripts and `.github/workflows/` (buildx caches, test orchestration).\n7. **Run tests**: Use both `--privileged` and non-privileged runs where relevant.\n\n## Repo map (files to touch)\n\n- Build: `galaxy/Dockerfile`\n- Startup: `galaxy/startup.sh`, `galaxy/startup2.sh`\n- Galaxy config export: `galaxy/ansible/templates/export_user_files.py.j2`\n- Ansible roles: `galaxy/ansible/requirements.yml`\n- Services: `galaxy/ansible/rabbitmq.yml`, `galaxy/ansible/condor.yml`, `galaxy/ansible/slurm.yml`, `galaxy/ansible/nginx.yml`, `galaxy/ansible/proftpd.yml`\n- Slurm config template: `galaxy/ansible/templates/configure_slurm.py.j2`\n- Container resolvers: `galaxy/ansible/templates/container_resolvers_conf.yml.j2`\n- CVMFS sidecar: `cvmfs/` and `galaxy/docker-compose.yaml`\n- Tests: `test/bioblend/`, `test/slurm/`, `test/gridengine/`, `test/cvmfs/`, `test/container_resolvers_conf.ci.yml`\n- CI: `.github/workflows/*.yml` and `.github/workflows/single.sh`\n\n## Guardrails and expectations\n\n- Keep Python installs on `uv` (build and runtime). Avoid `pip install` directly.\n- Prefer buildx cache mounts in Dockerfiles and `cache-to/cache-from` in GitHub Actions.\n- Use `--rm` for test containers and clean up by name to avoid conflicts.\n- If `/tmp` fills up on CI, use `TMPDIR=/var/tmp` for heavy Docker tests.\n- Use `startup2` for richer diagnostics; keep `startup.sh` minimal.\n\n## CVMFS\n\n- Privileged runs use full CVMFS client + autofs.\n- Sidecar is optional via compose profile (`cvmfs/` image).\n- Container resolver config should include cached mulled paths on both CVMFS and `/export`.\n- See `references/upgrade-25.1.md` for the exact sidecar design and tests.\n\n## Slurm\n\n- Ensure Slurm works in containers without systemd/cgroup v2 requirements.\n- `configure_slurm.py.j2` writes `cgroup.conf` with `CgroupPlugin=disabled`.\n- Slurm-DRMAA is built from source when ABI mismatches exist (documented in references).\n\n## Tests (typical order)\n\n- `test/slurm/test.sh` (set `GALAXY_IMAGE=galaxy:test` if needed)\n- `test/gridengine/test.sh` (uses ephemeris container for wait)\n- `test/bioblend/test.sh`\n- `test/cvmfs/test.sh` (sidecar + mount propagation)\n- `startup2` sanity: `docker run --rm --privileged ... /usr/bin/startup2`\n\n## References\n\n- `references/upgrade-25.1.md` for 25.1 upgrade decisions, pins, and pitfalls.\n"
  },
  {
    "path": "skills/galaxy-docker/references/upgrade-25.1.md",
    "content": "# 25.1 upgrade reference (docker-galaxy)\n\nThis reference captures the key decisions, pins, and fixes applied during the 25.1 upgrade.\nUse it as a **lessons-learned checklist** and re-validate each item for the next release.\n\n## Base versions and build decisions\n\n- **Ubuntu base**: `ubuntu:24.04` in `galaxy/Dockerfile` (`galaxy_cluster_base` stage).\n- **Galaxy release**: set via `ARG GALAXY_RELEASE` in `galaxy/Dockerfile` (target `release_25.1`).\n- **gx-it-proxy**: preinstalled via npm during build, then npm removed to save space.\n- **Python installs**: migrate to `uv` for optional dependencies and tests.\n- **jemalloc**: custom build kept for Grid Engine compatibility (see comment in Dockerfile).\n\n## Slurm and slurm-drmaa (25.1-specific)\n\n- **Slurm version**: for 25.1 on Ubuntu 24.04, Slurm 24.11 was required for ABI compatibility in this image. Re-check available packages and ABI compatibility each upgrade.\n- **Slurm-DRMAA**: built from source in a dedicated build stage because the natefoo PPA binaries were built against Slurm 23.11 and broke at runtime with 24.11.\n  - Build stage in `galaxy/Dockerfile` has a large comment that explains this as temporary and should be removed once 24.11-compatible packages are available.\n- **Cgroups**: container-friendly configuration writes `/etc/slurm/cgroup.conf` with `CgroupPlugin=disabled` (via `configure_slurm.py.j2`).\n- **Runtime config**: `configure_slurm.py.j2` merges `slurmd -C`, `lscpu -J`, and `/proc/meminfo` to avoid hardware mismatch errors; also forces `TaskPlugin=task/none`, `JobAcctGatherType=jobacct_gather/none`, `MpiDefault=none`, `ProctrackType=proctrack/pgid`.\n\n## RabbitMQ\n\n- Use Team RabbitMQ repositories (per rabbitmq.com install instructions).\n- Pin `rabbitmq_version` in `galaxy/ansible/rabbitmq.yml`.\n- Install Erlang packages explicitly and enable `rabbitmq_management`.\n\n## HTCondor\n\n- Prefer upstream roles and official repositories when they support the target OS and version.\n- If upstream lags (e.g., no packages yet), document the temporary workaround and remove it once upstream catches up.\n\n## CVMFS\n\n- Main container supports CVMFS only in `--privileged` mode.\n- Sidecar container added under `cvmfs/` with autofs and a minimal Ansible playbook.\n- Compose profile `cvmfs` in `galaxy/docker-compose.yaml` uses rshared mount propagation so the Galaxy container sees CVMFS mounts.\n- Container resolver config adds cached mulled paths:\n  - `/cvmfs/singularity.galaxyproject.org/all`\n  - `/export/container_cache/singularity/mulled`\n\n## Startup scripts\n\n- `startup2` adds colored logging, runtime summary, and a `GALAXY_*` env summary with masking.\n- CVMFS messaging avoids early warnings by skipping manual mounts when autofs is configured.\n- `startup2` and `startup.sh` call `/root/cgroupfs_mount.sh true` to avoid the \"No command specified\" warning.\n- Optional dependency installs use `uv` when `LOAD_GALAXY_CONDITIONAL_DEPENDENCIES` is set.\n- Creates `/tmp/slurm`, `/var/log/slurm`, and `/var/lib/slurm/slurmctld` to avoid missing state file errors.\n\n## Job handlers\n\n- `galaxy/ansible/galaxy_job_conf.yml` ensures `job_handler_assignment_method: db-skip-locked` when dynamic handlers are enabled.\n- `galaxy/Dockerfile` runs `ansible-playbook /ansible/galaxy_job_conf.yml` after copying the `galaxy.yml.sample` so the setting persists in the built image.\n\n## CI and tests\n\n- Buildx caching enabled in workflows; `single.sh` uses buildx with cache-to/cache-from.\n- `test/container_resolvers_conf.ci.yml` keeps resolver tests fast.\n- `test/cvmfs/test.sh` validates mount propagation from sidecar to Galaxy.\n- `test/gridengine/test.sh` uses ephemeris container to `galaxy-wait`.\n- `test/bioblend` updated for Galaxy 25.1 and newer Bioblend.\n\n## Known pitfalls and fixes\n\n- **CVMFS warnings on startup**: resolved by checking autofs config before manual mounts.\n- **Munge readiness**: add a wait loop and configurable `MUNGE_NUM_THREADS` (default 2).\n- **Dynamic handler warning in Gravity**: fix by setting `job_handler_assignment_method` via Ansible.\n- **No command specified**: avoid by running `/root/cgroupfs_mount.sh true` instead of no args.\n- **/tmp full on CI**: run tests with `TMPDIR=/var/tmp`.\n\n## Files touched during the 25.1 upgrade\n\nHigh-signal files for reference:\n\n- `galaxy/Dockerfile`\n- `galaxy/startup.sh`, `galaxy/startup2.sh`\n- `galaxy/ansible/requirements.yml`\n- `galaxy/ansible/rabbitmq.yml`, `galaxy/ansible/condor.yml`, `galaxy/ansible/slurm.yml`\n- `galaxy/ansible/templates/configure_slurm.py.j2`\n- `galaxy/ansible/templates/container_resolvers_conf.yml.j2`\n- `galaxy/ansible/templates/export_user_files.py.j2`\n- `galaxy/docker-compose.yaml`\n- `cvmfs/` (sidecar)\n- `test/` (slurm, gridengine, bioblend, cvmfs)\n- `.github/workflows/` (buildx caching, single-container tests, CVMFS workflow)\n"
  },
  {
    "path": "test/bioblend/Dockerfile",
    "content": "FROM alpine:3.17 as build\n\nENV BIOBLEND_VERSION=1.7.0 \\\n    TOX_ENV=py310 \\\n    BIOBLEND_GALAXY_API_KEY=fakekey \\\n    BIOBLEND_GALAXY_URL=http://galaxy \\\n    BIOBLEND_TEST_JOB_TIMEOUT=\"240\" \\\n    GALAXY_VERSION=release_25.1 \\\n    UV_INSTALL_DIR=/usr/local/bin\n\nADD \"https://github.com/galaxyproject/bioblend/archive/v$BIOBLEND_VERSION.zip\" /src/bioblend.zip\nRUN apk update && apk add bash curl python3-dev unzip \\\n    && curl -LsSf https://astral.sh/uv/install.sh | bash \\\n    && uv pip install --system pep8 tox \\\n    && cd /src \\\n    && unzip bioblend.zip && rm bioblend.zip \\\n    && mv \"bioblend-$BIOBLEND_VERSION\" bioblend \\\n    && cd bioblend \\\n    && uv pip install --system .\n\nWORKDIR /src/bioblend\n\nCMD /bin/sh -c \"tox -e $TOX_ENV -- -k 'not test_upload_from_galaxy_filesystem and not test_get_datasets and not test_datasets_from_fs and not test_cancel_invocation and not test_run_step_actions'\"\n\n# library tests, needs share /tmp filesystem\n# * test_upload_from_galaxy_filesystem\n# * test_get_datasets\n# * test_datasets_from_fs\n"
  },
  {
    "path": "test/bioblend/test.sh",
    "content": "#!/bin/bash\nif ! docker build -t bioblend_test .; then\n    echo \"Bioblend docker image build failed.\"\n    exit 1\nfi\n\nif ! docker run --rm --name bioblend_test --link galaxy -v /tmp/:/tmp/ bioblend_test; then\n    echo \"Bioblend tests failed.\"\n    exit 1\nfi\ndocker rmi bioblend_test\n"
  },
  {
    "path": "test/container_resolvers_conf.ci.yml",
    "content": "# Minimal container resolvers for CI to keep resolve_toolbox fast.\n- type: explicit\n- type: cached_mulled_singularity\n  cache_directory: \"/export/container_cache/singularity/mulled\"\n"
  },
  {
    "path": "test/cvmfs/test.sh",
    "content": "#!/usr/bin/env bash\nset -euo pipefail\n\nif ! docker build -t galaxy:test ./galaxy; then\n    echo \"Galaxy docker image build failed.\"\n    exit 1\nfi\n\nif ! docker build -t galaxy-cvmfs:test ./cvmfs; then\n    echo \"CVMFS sidecar image build failed.\"\n    exit 1\nfi\n\ncvmfs_mount_dir=\"$(mktemp -d)\"\ncvmfs_cache_dir=\"$(mktemp -d)\"\n\ncleanup() {\n    docker exec galaxy-cvmfs-test sh -c \"umount -l /cvmfs/data.galaxyproject.org /cvmfs/singularity.galaxyproject.org >/dev/null 2>&1 || true\" || true\n    docker exec galaxy-cvmfs-test sh -c \"service autofs stop >/dev/null 2>&1 || true\" || true\n    docker stop galaxy-cvmfs-test >/dev/null 2>&1 || true\n    rm -rf \"$cvmfs_mount_dir\" \"$cvmfs_cache_dir\" >/dev/null 2>&1 || true\n}\ntrap cleanup EXIT\n\nif ! docker run -d --rm --name galaxy-cvmfs-test --privileged \\\n    -e CVMFS_REPOSITORIES=data.galaxyproject.org,singularity.galaxyproject.org \\\n    -v \"$cvmfs_mount_dir:/cvmfs:rshared\" \\\n    -v \"$cvmfs_cache_dir:/var/lib/cvmfs:delegated\" \\\n    galaxy-cvmfs:test >/dev/null; then\n    echo \"CVMFS sidecar container failed to start.\"\n    exit 1\nfi\n\nmounted=false\nfor _ in $(seq 1 90); do\n    if docker exec galaxy-cvmfs-test ls /cvmfs/data.galaxyproject.org/byhand >/dev/null 2>&1; then\n        mounted=true\n        break\n    fi\n    sleep 2\ndone\n\nif ! $mounted; then\n    echo \"CVMFS mount test failed in the sidecar.\"\n    exit 1\nfi\n\nif ! docker run --rm \\\n    -v \"$cvmfs_mount_dir:/cvmfs:rshared\" \\\n    galaxy:test /bin/sh -c \"ls /cvmfs/data.galaxyproject.org/byhand >/dev/null\"; then\n    echo \"CVMFS mount not visible in the Galaxy container.\"\n    exit 1\nfi\n"
  },
  {
    "path": "test/gridengine/Dockerfile",
    "content": "FROM ubuntu:22.04 AS sge_master\n\nENV DEBIAN_FRONTEND=noninteractive\n\nRUN apt-get update -qq \\\n    && apt-get install -y wget gridengine-exec gridengine-client \\\n    # need to run this before gridengine-master installation (https://bugs.launchpad.net/ubuntu/+source/gridengine/+bug/1774302)\n    && wget http://ftp.debian.org/debian/pool/main/g/gridengine/gridengine-client_8.1.9+dfsg-10+b1_amd64.deb \\\n    && dpkg -x gridengine-client_8.1.9+dfsg-10+b1_amd64.deb ge-client \\\n    && cp ge-client/usr/lib/gridengine/spooldefaults.bin /usr/lib/gridengine/ \\\n    && cp ge-client/usr/lib/gridengine/libspool*.so /usr/lib/gridengine/ \\\n    && rm -rf gridengine-client_8.1.9+dfsg-10+b1_amd64.deb ge-client \\\n    && apt purge -y wget && apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*\n\nRUN apt-get update -qq \\\n    && apt-get install -y gridengine-master \\\n    && apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*\n\n# dummy user account\nRUN useradd -m dummy \\\n    && echo \"dummy:dummy\" | chpasswd\n\nADD --chmod=755 setup_gridengine.sh /usr/local/bin/setup_gridengine.sh\n\n\nFROM python:3.10.15 AS sge_bioblend_test\nENV UV_INSTALL_DIR=/usr/local/bin\nRUN apt-get update -qq \\\n    && apt-get install -y curl \\\n    && curl -LsSf https://astral.sh/uv/install.sh | sh \\\n    && uv pip install --system bioblend==1.3.0 \\\n    && apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*\n"
  },
  {
    "path": "test/gridengine/act_qmaster",
    "content": "sgemaster\n"
  },
  {
    "path": "test/gridengine/job_conf.xml.sge",
    "content": "<?xml version=\"1.0\"?>\n<job_conf>\n    <plugins workers=\"8\">\n        <plugin id=\"sge\" type=\"runner\" load=\"galaxy.jobs.runners.drmaa:DRMAAJobRunner\">\n            <param id=\"drmaa_library_path\">/usr/lib/gridengine-drmaa/lib/libdrmaa.so.1.0</param>\n        </plugin>\n    </plugins>\n    <handlers>\n        <handler id=\"main\"/>\n    </handlers>\n    <destinations default=\"cluster\">\n        <destination id=\"cluster\" runner=\"sge\">\n            <param id=\"embed_metadata_in_job\">False</param>\n        </destination>\n    </destinations>\n    <limits>\n        <limit type=\"registered_user_concurrent_jobs\">1</limit>\n        <limit type=\"anonymous_user_concurrent_jobs\">1</limit>\n        <limit type=\"destination_user_concurrent_jobs\">1</limit>\n        <limit type=\"destination_total_concurrent_jobs\">1</limit>\n        <limit type=\"unregistered_user_concurrent_jobs\">1</limit>\n    </limits>\n</job_conf>\n"
  },
  {
    "path": "test/gridengine/master_script.sh",
    "content": "#!/bin/bash\nuseradd -u 1450 -m galaxy\n/usr/local/bin/setup_gridengine.sh\ntail -f /var/spool/gridengine/qmaster/messages\n"
  },
  {
    "path": "test/gridengine/outputhostname/outputhostname.xml",
    "content": "<tool id=\"outputhostname\" name=\"Output Hostname\" version=\"0.0.1\">\n    <description>data in ascending or descending order</description>\n    <command>\n        hostname > ${out_file1}\n    </command>\n    <outputs>\n        <data format=\"tabular\" name=\"out_file1\" label=\"${tool.name} on ${on_string}\" />\n    </outputs>\n</tool>\n"
  },
  {
    "path": "test/gridengine/outputhostname.tool.xml",
    "content": "<?xml version=\"1.0\"?>\n<toolbox tool_path=\"${tool_conf_dir}\" is_shed_conf=\"false\">\n  <section id=\"outputhostname\" name=\"Output Hostname\">\n    <tool file=\"outputhostname.xml\"/>\n  </section>\n</toolbox>\n"
  },
  {
    "path": "test/gridengine/setup_gridengine.sh",
    "content": "#!/bin/bash\n\n# hostname > /var/lib/gridengine/default/common/act_qmaster\n/etc/init.d/gridengine-master start\n/etc/init.d/gridengine-exec start\n\ncat << EOS  > /tmp/qconf-ae.txt\nhostname              $(hostname)\nload_scaling          NONE\ncomplex_values        NONE\nuser_lists            NONE\nxuser_lists           NONE\nprojects              NONE\nxprojects             NONE\nusage_scaling         NONE\nreport_variables      NONE\nEOS\n\nqconf -Ae /tmp/qconf-ae.txt\n\n\n# Add submit host\nqconf -as `hostname`\n\n# shell bash\ncat << EOS > /tmp/qconf-aq.txt\nqname                 testq\nhostlist              $(hostname)\nseq_no                0\nload_thresholds       np_load_avg=1.75\nsuspend_thresholds    NONE\nnsuspend              1\nsuspend_interval      00:05:00\npriority              0\nmin_cpu_interval      00:05:00\nprocessors            UNDEFINED\nqtype                 BATCH INTERACTIVE\nckpt_list             NONE\npe_list               make\nrerun                 FALSE\nslots                 1\ntmpdir                /tmp\nshell                 /bin/bash\nprolog                NONE\nepilog                NONE\nshell_start_mode      posix_compliant\nstarter_method        NONE\nsuspend_method        NONE\nresume_method         NONE\nterminate_method      NONE\nnotify                00:00:60\nowner_list            NONE\nuser_lists            NONE\nxuser_lists           NONE\nsubordinate_list      NONE\ncomplex_values        NONE\nprojects              NONE\nxprojects             NONE\ncalendar              NONE\ninitial_state         default\ns_rt                  INFINITY\nh_rt                  INFINITY\ns_cpu                 INFINITY\nh_cpu                 INFINITY\ns_fsize               INFINITY\nh_fsize               INFINITY\ns_data                INFINITY\nh_data                INFINITY\ns_stack               INFINITY\nh_stack               INFINITY\ns_core                INFINITY\nh_core                INFINITY\ns_rss                 INFINITY\nh_rss                 INFINITY\ns_vmem                INFINITY\nh_vmem                INFINITY\nEOS\n\nqconf -Aq /tmp/qconf-aq.txt\n\n# avoid 'stdin: is not a tty'\nsed -i -e 's/^mesg n//' /root/.profile\n\n# echo \"hostname ; date\" | qsub\n\n\n#\nfor HOST in $@\ndo\n  qconf -as $HOST\ndone\n"
  },
  {
    "path": "test/gridengine/setup_tool.sh",
    "content": "#!/bin/bash\n# cp tool_conf.xml config\nexport GALAXY_CONFIG_TOOL_CONFIG_FILE=/galaxy/tool_conf.xml\n/usr/bin/startup\ntailf /home/galaxy/logs/*\n"
  },
  {
    "path": "test/gridengine/test.sh",
    "content": "#!/usr/bin/env bash\n\necho \"Test that jobs run successfully on an external gridengine cluster\"\n\ndocker build --target sge_master --tag sge_master .\ndocker build --target sge_bioblend_test --tag sge_bioblend_test .\n\n# start master\n# We use a temporary directory as an export dir that will hold the shared data between\n# galaxy and gridengine:\nEXPORT=`mktemp --directory`\nchmod 777 ${EXPORT}\ndocker run -d --rm --hostname sgemaster --name sgemaster -v ${EXPORT}:/export -v $PWD/master_script.sh:/usr/local/bin/master_script.sh sge_master /usr/local/bin/master_script.sh\n# wait for sge master\nsleep 10\n\n# start galaxy\nGALAXY_CONTAINER=${GALAXY_CONTAINER:-quay.io/bgruening/galaxy}\nEPHEMERIS_IMAGE=${EPHEMERIS_IMAGE:-quay.io/biocontainers/ephemeris:0.10.11--pyhdfd78af_0}\nGALAXY_WAIT_TIMEOUT=${GALAXY_WAIT_TIMEOUT:-600}\nGALAXY_CONTAINER_NAME=galaxytest\nGALAXY_CONTAINER_HOSTNAME=galaxytest\nGALAXY_ROOT_DIR=/galaxy\n\ndocker run -d --rm \\\n           -e SGE_ROOT=/var/lib/gridengine \\\n           --link sgemaster:sgemaster \\\n           --name ${GALAXY_CONTAINER_NAME} \\\n           --hostname ${GALAXY_CONTAINER_HOSTNAME} \\\n           -p 20080:80 -e NONUSE=\"condor\" \\\n           -v $PWD/job_conf.xml.sge:/etc/galaxy/job_conf.xml \\\n           -v ${EXPORT}:/export \\\n           -v $PWD/outputhostname:$GALAXY_ROOT_DIR/tools/outputhostname \\\n           -v $PWD/outputhostname.tool.xml:$GALAXY_ROOT_DIR/outputhostname.tool.xml \\\n           -v $PWD/setup_tool.sh:$GALAXY_ROOT_DIR/setup_tool.sh \\\n           -v $PWD/tool_conf.xml:$GALAXY_ROOT_DIR/tool_conf.xml \\\n           -v $PWD/act_qmaster:/var/lib/gridengine/default/common/act_qmaster \\\n           ${GALAXY_CONTAINER} \\\n           $GALAXY_ROOT_DIR/setup_tool.sh\necho \"Wait 30sec\"\nsleep 30\n\necho \"show logs from ${GALAXY_CONTAINER_NAME}\"\ndocker logs ${GALAXY_CONTAINER_NAME}\n\n# Add host setting galaxytest to sgemaster\necho \"Get host info from ${GALAXY_CONTAINER_HOSTNAME}\"\nSGECLIENT=$(docker exec ${GALAXY_CONTAINER_NAME} cat /etc/hosts | grep ${GALAXY_CONTAINER_HOSTNAME})\necho \"Add host info to sgemaster\"\ndocker exec sgemaster bash -c \"echo ${SGECLIENT} >> /etc/hosts ; /etc/init.d/gridengine-master restart\"\necho \"Output /etc/hosts on sgemaster\"\ndocker exec sgemaster cat /etc/hosts\n\n# Add gridengine client host\necho \"Add submit host ${GALAXY_CONTAINER_HOSTNAME}\"\ndocker exec sgemaster bash -c \"qconf -as ${GALAXY_CONTAINER_HOSTNAME}\"\necho \"Waiting for Galaxy to become ready\"\nif ! docker run --rm --link ${GALAXY_CONTAINER_NAME}:galaxytest \\\n    ${EPHEMERIS_IMAGE} galaxy-wait -g http://galaxytest --timeout ${GALAXY_WAIT_TIMEOUT}; then\n    echo \"Galaxy did not become ready within ${GALAXY_WAIT_TIMEOUT}s.\"\n    docker logs ${GALAXY_CONTAINER_NAME} || true\n    exit 1\nfi\n\necho \"Exec test\"\ndocker run --rm --link galaxytest:galaxytest -v $PWD/test_outputhostname.py:/work/test_outputhostname.py sge_bioblend_test python /work/test_outputhostname.py > out\ngrep sgemaster out\nRET=$?\n\n# remove container\ndocker stop sgemaster || true\ndocker stop galaxytest || true\n\n# Remove images \ndocker rmi sge_master\ndocker rmi sge_bioblend_test\n\nif [ $RET -ne 0 ]; then\n    echo \"Grid Engine test failed\"\n    exit $RET\nfi\n"
  },
  {
    "path": "test/gridengine/test_outputhostname.py",
    "content": "#!/usr/bin/python\nimport time\n\nfrom bioblend.galaxy import GalaxyInstance\ngi = GalaxyInstance('http://galaxytest', key='fakekey')\ngi.histories.create_history()\n# print(gi.tools.get_tool_panel())\nhistory = gi.histories.get_most_recently_used_history()\n# print(dir(history))\nhistory_id = history['id']\n# print(history_id)\ntool_output = gi.tools.run_tool(\n    history_id=history_id,\n    tool_id=\"outputhostname\",\n    tool_inputs={}\n)\n\n# print(tool_output)\n\n# loop until job finish timeout is 30sec\nresult = \"noresult\"\nfor x in range(0, 30):\n    time.sleep(1)\n    show_history = gi.histories.show_history(history_id)\n    if len(show_history['state_ids']['ok']) > 0:\n        dataset_id = show_history['state_ids']['ok'][0]\n        dataset = gi.datasets.show_dataset(dataset_id)\n        result = dataset['peek']\n        break\nprint(result)\n"
  },
  {
    "path": "test/gridengine/tool_conf.xml",
    "content": "<?xml version='1.0' encoding='utf-8'?>\n<toolbox monitor=\"true\">\n  <section id=\"testtool\" name=\"Output Hostname\">\n    <tool file=\"outputhostname/outputhostname.xml\" />\n  </section>\n</toolbox>\n"
  },
  {
    "path": "test/slurm/Dockerfile",
    "content": "FROM ubuntu:24.04\n\nENV DEBIAN_FRONTEND=noninteractive\nENV UV_INSTALL_DIR=/usr/local/bin\n\nRUN apt-get update -qq && apt-get install -y --no-install-recommends \\\n    munge \\\n    python3-psutil supervisor samtools apt-transport-https software-properties-common dirmngr gpg curl sudo gpg-agent && \\\n    add-apt-repository ppa:ubuntu-hpc/slurm-wlm-24.11 && \\\n    apt-get update -qq && \\\n    apt-get install -y slurm-wlm && \\\n    curl -LsSf https://astral.sh/uv/install.sh | sh && \\\n    curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - && \\\n    add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\" && \\\n    apt update && \\\n    apt install -y docker-ce && \\\n    cd / && \\\n    ldconfig && \\\n    apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && rm -rf ~/.cache/ && \\\n    adduser --disabled-password --gecos \"\" galaxy &&\\\n    mkdir -p /var/log/slurm /tmp/slurm && \\\n    touch /var/log/slurm/slurmctld.log /var/log/slurm/slurmd.log\n\nADD configure_slurm.py /usr/local/bin/configure_slurm.py\nADD munge.conf /etc/default/munge\nRUN service munge start && service munge stop\nADD startup.sh /usr/bin/startup.sh\nADD supervisor_slurm.conf /etc/supervisor/conf.d/slurm.conf\nRUN chmod +x /usr/bin/startup.sh\n#RUN locale-gen en_US.UTF-8 && dpkg-reconfigure locales\nENV GALAXY_DIR=/export/galaxy \\\n    SYMLINK_TARGET=/galaxy \\\n    SLURM_USER_NAME=galaxy \\\n    SLURM_UID=1450 \\\n    SLURM_GID=1450 \\\n    SLURM_PARTITION_NAME=work \\\n    SLURM_CLUSTER_NAME=Cluster \\\n    SLURMD_AUTOSTART=True \\\n    SLURMCTLD_AUTOSTART=True \\\n    SLURM_CONF_PATH=/export/slurm.conf \\\n    MUNGE_KEY_PATH=/export/munge.key\n\nVOLUME [\"/export/\", \"/var/lib/docker\"]\nCMD [\"/usr/bin/startup.sh\"]\n"
  },
  {
    "path": "test/slurm/configure_slurm.py",
    "content": "from socket import gethostname\nfrom string import Template\nfrom os import environ\nimport subprocess\nimport json\n\n\nSLURM_CONFIG_TEMPLATE = '''\n# slurm.conf file generated by configurator.html.\n# Put this file on all nodes of your cluster.\n# See the slurm.conf man page for more information.\n#\nSlurmctldHost=$control_machine\n#SlurmctldAddr=\n#\nAuthType=auth/munge\n#CheckpointType=checkpoint/none\nCryptoType=crypto/munge\nMpiDefault=none\n#PluginDir=\n#PlugStackConfig=\n#PrivateData=jobs\nProctrackType=proctrack/pgid\n#Prolog=\n#PrologSlurmctld=\n#PropagatePrioProcess=0\n#PropagateResourceLimits=\n#PropagateResourceLimitsExcept=\nReturnToService=1\n#SallocDefaultCommand=\nSlurmctldPidFile=/var/run/slurmctld.pid\nSlurmctldPort=6817\nSlurmdPidFile=/var/run/slurmd.pid\nSlurmdPort=6818\nSlurmdSpoolDir=/tmp/slurmd\nSlurmUser=$user\n#SlurmdUser=root\n#SrunEpilog=\n#SrunProlog=\nStateSaveLocation=/tmp/slurm\nSwitchType=switch/none\n#TaskEpilog=\nTaskPlugin=task/none\n#TaskPluginParam=\n#TaskProlog=\nJobAcctGatherType=jobacct_gather/none\nInactiveLimit=0\nKillWait=30\nMinJobAge=300\n#OverTimeLimit=0\nSlurmctldTimeout=120\nSlurmdTimeout=300\n#UnkillableStepTimeout=60\n#VSizeFactor=0\nWaittime=0\nSchedulerType=sched/backfill\nSelectType=select/cons_tres\nSelectTypeParameters=CR_Core_Memory\nAccountingStorageType=accounting_storage/none\n#AccountingStorageUser=\nAccountingStoreFlags=job_comment\nClusterName=$cluster_name\n#DebugFlags=\n#JobCompHost=\n#JobCompLoc=\n#JobCompPass=\n#JobCompPort=\nJobCompType=jobcomp/none\n#JobCompUser=\nJobAcctGatherFrequency=30\nJobAcctGatherType=jobacct_gather/none\nSlurmctldDebug=3\n#SlurmctldLogFile=\nSlurmdDebug=3\n#SlurmdLogFile=\nNodeName=$hostname CPUs=$cpus RealMemory=$memory State=UNKNOWN$topology\nPartitionName=$partition_name Nodes=$nodes Default=YES MaxTime=INFINITE State=UP Shared=YES\n'''\n\nENV_MAP = {\n    \"CPUs\": \"SLURM_CPUS\",\n    \"RealMemory\": \"SLURM_MEMORY\",\n    \"Boards\": \"SLURM_BOARDS\",\n    \"SocketsPerBoard\": \"SLURM_SOCKETS_PER_BOARD\",\n    \"CoresPerSocket\": \"SLURM_CORES_PER_SOCKET\",\n    \"ThreadsPerCore\": \"SLURM_THREADS_PER_CORE\",\n}\n\ndef _as_int(value):\n    try:\n        return int(str(value).split()[0])\n    except (TypeError, ValueError):\n        return None\n\ndef _slurmd_status():\n    try:\n        output = subprocess.check_output([\"slurmd\", \"-C\"], stderr=subprocess.DEVNULL).decode(\"utf-8\")\n    except Exception:\n        return {}\n    info = {}\n    for chunk in output.split():\n        if \"=\" in chunk:\n            key, value = chunk.split(\"=\", 1)\n            info[key] = value\n    return info\n\ndef _lscpu_status():\n    try:\n        output = subprocess.check_output([\"lscpu\", \"-J\"], stderr=subprocess.DEVNULL).decode(\"utf-8\")\n        data = json.loads(output)\n    except Exception:\n        return {}\n    fields = {}\n    for entry in data.get(\"lscpu\", []):\n        field = entry.get(\"field\", \"\").strip().strip(\":\")\n        fields[field] = entry.get(\"data\")\n    cpus = _as_int(fields.get(\"CPU(s)\"))\n    sockets = _as_int(fields.get(\"Socket(s)\"))\n    cores = _as_int(fields.get(\"Core(s) per socket\"))\n    threads = _as_int(fields.get(\"Thread(s) per core\"))\n    info = {}\n    if cpus is not None:\n        info[\"CPUs\"] = str(cpus)\n    if sockets is not None:\n        info[\"SocketsPerBoard\"] = str(sockets)\n    if cores is not None:\n        info[\"CoresPerSocket\"] = str(cores)\n    if threads is not None:\n        info[\"ThreadsPerCore\"] = str(threads)\n    info.setdefault(\"Boards\", \"1\")\n    return info\n\ndef _real_memory_mb():\n    try:\n        with open(\"/proc/meminfo\", \"r\") as handle:\n            for line in handle:\n                if line.startswith(\"MemTotal:\"):\n                    parts = line.split()\n                    if len(parts) >= 2:\n                        return int(int(parts[1]) / 1024)\n    except Exception:\n        return None\n    return None\n\ndef main():\n    hostname = gethostname()\n    dict_status = _slurmd_status()\n    for key, value in _lscpu_status().items():\n        dict_status.setdefault(key, value)\n    if \"RealMemory\" not in dict_status:\n        real_memory = _real_memory_mb()\n        if real_memory is not None:\n            dict_status[\"RealMemory\"] = str(real_memory)\n    cpus = dict_status.get(\"CPUs\") or \"1\"\n    memory = dict_status.get(\"RealMemory\") or \"1024\"\n    topology_parts = []\n    for key in (\"Boards\", \"SocketsPerBoard\", \"CoresPerSocket\", \"ThreadsPerCore\"):\n        env_key = ENV_MAP.get(key)\n        value = environ.get(env_key) if env_key else None\n        if value is None:\n            value = dict_status.get(key)\n        if value is not None:\n            topology_parts.append(f\" {key}={value}\")\n    template_params = {\n        \"hostname\": hostname,\n        \"nodes\": \",\".join(environ.get('SLURM_NODES', hostname).split(',')),\n        \"cluster_name\": environ.get('SLURM_CLUSTER_NAME', 'Cluster'),\n        \"control_machine\": environ.get('SLURM_CONTROL_MACHINE', hostname),\n        \"user\": environ.get('SLURM_USER_NAME', '{{ galaxy_user_name }}'),\n        \"cpus\": environ.get(\"SLURM_CPUS\", cpus),\n        \"partition_name\": environ.get('SLURM_PARTITION_NAME', 'debug'),\n        \"memory\": environ.get(\"SLURM_MEMORY\", memory),\n        \"topology\": \"\".join(topology_parts),\n    }\n    config_contents = Template(SLURM_CONFIG_TEMPLATE).substitute(template_params)\n    open(\"/etc/slurm/slurm.conf\", \"w\").write(config_contents)\n    # Slurm 24.11 supports disabling cgroups to avoid systemd/cgroup requirements in containers.\n    with open(\"/etc/slurm/cgroup.conf\", \"w\") as handle:\n        handle.write(\"CgroupPlugin=disabled\\n\")\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "test/slurm/job_conf.xml",
    "content": "<?xml version=\"1.0\"?>\n<job_conf>\n    <plugins workers=\"2\">\n        <plugin id=\"slurm\" type=\"runner\" load=\"galaxy.jobs.runners.slurm:SlurmJobRunner\">\n            <param id=\"drmaa_library_path\">/usr/lib/slurm-drmaa/lib/libdrmaa.so</param>\n        </plugin>\n        <plugin id=\"local\" type=\"runner\" load=\"galaxy.jobs.runners.local:LocalJobRunner\" workers=\"2\"/>\n    </plugins>\n    <handlers default=\"handlers\">\n        <handler id=\"handler0\" tags=\"handlers\"/>\n        <handler id=\"handler1\" tags=\"handlers\"/>\n    </handlers>\n    <destinations default=\"slurm_cluster\">\n        <destination id=\"slurm_cluster\" runner=\"slurm\">\n            <param id=\"nativeSpecification\">-p work -n 2</param>\n            <param id=\"embed_metadata_in_job\">False</param>\n            <env file=\"/export/galaxy/.venv/bin/activate\" />\n            <param id=\"docker_enabled\" from_environ=\"GALAXY_DOCKER_ENABLED\">False</param>\n            <param id=\"docker_sudo\" from_environ=\"GALAXY_DOCKER_SUDO\">False</param>\n            <!-- The empty volumes from shouldn't affect\n                 Galaxy, set GALAXY_DOCKER_VOLUMES_FROM to use.\n            -->\n            <param id=\"docker_volumes_from\" from_environ=\"GALAXY_DOCKER_VOLUMES_FROM\">galaxy</param>\n          <!-- For a stock Galaxy instance and traditional job runner $defaults will\n               expand out as:\n               $galaxy_root:ro,$tool_directory:ro,$working_directory:rw,$default_file_path:rw\n          -->\n          <param id=\"docker_volumes\" from_environ=\"GALAXY_DOCKER_VOLUMES\">$defaults</param>\n          <param id=\"docker_net\" from_environ=\"GALAXY_DOCKER_NET\">bridge</param>\n          <param id=\"docker_auto_rm\" from_environ=\"GALAXY_DOCKER_AUTO_RM\">True</param>\n          <param id=\"docker_set_user\" from_environ=\"GALAXY_DOCKER_SET_USER\"></param>\n        </destination>\n        <destination id=\"local\" runner=\"local\"/>\n    </destinations>\n    <tools>\n        <!-- Tools can be configured to use specific destinations or handlers,\n             identified by either the \"id\" or \"tags\" attribute.  If assigned to\n             a tag, a handler or destination that matches that tag will be\n             chosen at random.\n         -->\n        <tool id=\"upload1\" destination=\"local\"/>\n    </tools>\n    <limits>\n    </limits>\n</job_conf>\n"
  },
  {
    "path": "test/slurm/munge.conf",
    "content": "###############################################################################\n# $Id: munge.sysconfig 507 2006-05-11 20:28:55Z dun $\n###############################################################################\n\n##\n# Pass additional command-line options to the daemon.\n##\nOPTIONS=\"--force --key-file /etc/munge/munge.key --num-threads 1\"\n\n##\n# Adjust the scheduling priority of the daemon.\n##\n# NICE=\n"
  },
  {
    "path": "test/slurm/startup.sh",
    "content": "#!/usr/bin/env bash\n\n# Setup the galaxy user UID/GID and pass control on to supervisor\nif id \"$SLURM_USER_NAME\" >/dev/null 2>&1; then\n        echo \"user exists\"\nelse\n        echo \"user does not exist, creating\"\n        useradd -m -d /var/\"$SLURM_USER_NAME\" \"$SLURM_USER_NAME\"\nfi\nusermod -u $SLURM_UID  $SLURM_USER_NAME\ngroupmod -g $SLURM_GID $SLURM_USER_NAME\nif [ ! -f \"$MUNGE_KEY_PATH\" ]\n  then\n    cp /etc/munge/munge.key \"$MUNGE_KEY_PATH\"\nfi\n\nif [ ! -f \"$SLURM_CONF_PATH\" ]\n  then\n    mkdir -p /etc/slurm\n    python3 /usr/local/bin/configure_slurm.py\n    cp /etc/slurm/slurm.conf \"$SLURM_CONF_PATH\"\n    if [ -f /etc/slurm/cgroup.conf ]\n      then\n        cp /etc/slurm/cgroup.conf \"$(dirname \"$SLURM_CONF_PATH\")/cgroup.conf\"\n        rm -f /etc/slurm/cgroup.conf\n    fi\n    rm /etc/slurm/slurm.conf\nfi\nif [ ! -f \"$GALAXY_DIR\"/.venv ]\n  then\n    mkdir -p \"$GALAXY_DIR\"/.venv\n    chown $SLURM_USER_NAME:$SLURM_USER_NAME \"$GALAXY_DIR\"/.venv\n    su $SLURM_USER_NAME -c \\\n        \"GALAXY_DIR=$GALAXY_DIR uv venv \\\"$GALAXY_DIR\\\"/.venv && \\\n        uv pip install --python \\\"$GALAXY_DIR\\\"/.venv/bin/python galaxy-lib\"\nfi\nmkdir -p /tmp/slurmd\nchown $SLURM_USER_NAME /tmp/slurm /tmp/slurmd\nln -s \"$GALAXY_DIR\" \"$SYMLINK_TARGET\"\nln -sf \"$SLURM_CONF_PATH\" /etc/slurm/slurm.conf\nif [ -f \"$(dirname \"$SLURM_CONF_PATH\")/cgroup.conf\" ]\n  then\n    ln -sf \"$(dirname \"$SLURM_CONF_PATH\")/cgroup.conf\" /etc/slurm/cgroup.conf\nfi\nexec /usr/bin/supervisord -n -c /etc/supervisor/supervisord.conf\n"
  },
  {
    "path": "test/slurm/supervisor_slurm.conf",
    "content": "[program:munge]\nuser=root\ncommand=/usr/sbin/munged --key-file=%(ENV_MUNGE_KEY_PATH)s -F --force\n\n[program:slurmctld]\nuser=root\ncommand=/usr/sbin/slurmctld -D -L /var/log/slurm/slurmctld.log -f %(ENV_SLURM_CONF_PATH)s\nautostart       = %(ENV_SLURMCTLD_AUTOSTART)s\nautorestart     = true\npriority        = 200\n\n[program:slurmd]\nuser=root\ncommand=/usr/sbin/slurmd -f %(ENV_SLURM_CONF_PATH)s -D -L /var/log/slurm/slurmd.log\nautostart       = %(ENV_SLURMD_AUTOSTART)s\nautorestart     = true\npriority        = 300\n\n"
  },
  {
    "path": "test/slurm/test.sh",
    "content": "#!/usr/bin/env bash\n\nset -euo pipefail\nset -x\n# Test that jobs run successfully on an external slurm cluster\n\n# We use a temporary directory as an export dir that will hold the shared data between\n# galaxy and slurm:\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\nEXPORT=`mktemp --directory -p /var/tmp`\nchmod 777 \"$EXPORT\"\nGALAXY_IMAGE=\"${GALAXY_IMAGE:-galaxy:test}\"\n# Ensure leftover containers from previous runs don't conflict.\ndocker rm -f slurm galaxy-slurm-test >/dev/null 2>&1 || true\n# We build the slurm image\ndocker build -t slurm \"$SCRIPT_DIR\"\n# We fire up a slurm node (with hostname slurm)\ndocker run -d --rm -v \"$EXPORT\":/export -v /sys/fs/cgroup:/sys/fs/cgroup:rw --name slurm \\\n           --hostname slurm \\\n           slurm\n# We start galaxy (without the internal slurm, but with a modified job_conf.xml)\n# and link it to the slurm container (so that galaxy resolves the slurm container's hostname)\ndocker run -d --rm -e \"NONUSE=slurmd,slurmctld\" \\\n   --link slurm --name galaxy-slurm-test -h galaxy \\\n   -p 80:80 -v \"$EXPORT\":/export \"${GALAXY_IMAGE}\"\n# We wait for the creation of the /galaxy/config/ if it does not exist yet\nsleep 180s\n# We restart galaxy\ndocker stop galaxy-slurm-test || true\nfor i in $(seq 1 30); do\n    if ! docker ps -a --format '{{.Names}}' | grep -qx galaxy-slurm-test; then\n        break\n    fi\n    sleep 1s\ndone\n\n# We copy the job_conf.xml to the $EXPORT folder\ndocker run --rm -v \"$EXPORT\":/export -v \"$SCRIPT_DIR\":/workspace busybox sh -c \\\n  \"mkdir -p /export/galaxy/config && cp /workspace/job_conf.xml /export/galaxy/config/job_conf.xml && chown 1450:1450 /export/galaxy/config/job_conf.xml\"\n\ndocker run -d --rm -e \"NONUSE=slurmd,slurmctld\" \\\n   --link slurm --name galaxy-slurm-test -h galaxy \\\n   -p 80:80 -v \"$EXPORT\":/export \"${GALAXY_IMAGE}\"\n# Let's submit a job from the galaxy container and check it runs in the slurm container\nsleep 60s\nfor i in $(seq 1 30); do\n    if docker exec galaxy-slurm-test scontrol ping 2>/dev/null | grep -q \"UP\"; then\n        break\n    fi\n    sleep 2s\ndone\ndocker exec galaxy-slurm-test scontrol ping | grep -q \"UP\"\ndocker exec galaxy-slurm-test su - galaxy -c 'srun hostname' | grep slurm\ndocker exec -i galaxy-slurm-test /bin/sh -s <<'EOF' | grep slurm\nset -e\nrm -f /export/drmaa.out /export/drmaa.err\nDRMAA_LIBRARY_PATH=/usr/lib/slurm-drmaa/lib/libdrmaa.so /galaxy_venv/bin/python - <<'PY'\nimport drmaa\n\nwith drmaa.Session() as session:\n    jt = session.createJobTemplate()\n    jt.remoteCommand = \"/bin/hostname\"\n    jt.outputPath = \":\" + \"/export/drmaa.out\"\n    jt.errorPath = \":\" + \"/export/drmaa.err\"\n    jt.nativeSpecification = \"-n 1\"\n    jobid = session.runJob(jt)\n    session.deleteJobTemplate(jt)\n    session.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)\n\nwith open(\"/export/drmaa.out\", \"r\") as handle:\n    print(handle.read().strip())\nPY\nEOF\ndocker stop galaxy-slurm-test slurm || true\ndocker rmi slurm || true\n# TODO: Run a galaxy tool and check it runs on the cluster\n"
  }
]