diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000..6deef159b --- /dev/null +++ b/.flake8 @@ -0,0 +1,14 @@ +[flake8] +exclude = + .venv + collections + roles/htcondor + roles/hxr.monitor-galaxy + roles/hxr.monitor-squid + roles/hxr.simple-nagios + roles/jasonroyle.rabbitmq + templates/encoder/yaml_converter.py +ignore = + E203, + W503 +docstring-convention = google diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 000000000..30e1e5ea3 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,6 @@ +# Run isort on galaxy_jwd.py +c573ecd02e0f1ce97e74c21b753faf2467e9a227 +# Run black on galaxy_jwd.py +e44dc2711a3bb70e62848049f09b449667b13ad1 +# Run flake8 on galaxy_jwd.py +40095d807803bcc8faa49278ee9904c079313439 diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml new file mode 100644 index 000000000..83ac304ba --- /dev/null +++ b/.github/workflows/python.yml @@ -0,0 +1,31 @@ +--- +name: Python formatting + +"on": pull_request + +jobs: + PEP8: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - run: pip install --upgrade pip + + - id: pip_install + run: pip install --upgrade isort~=5.0 flake8~=6.0 flake8-docstrings~=1.0 + + - name: isort + run: isort . --check --diff + if: (success() || failure()) && steps.pip_install.conclusion == 'success' + + - name: Black + uses: psf/black@stable + with: + version: "~=23.0" + options: "--check --diff" + src: "." + if: (success() || failure()) && steps.pip_install.conclusion == 'success' + + - name: Flake8 + run: flake8 . + if: (success() || failure()) && steps.pip_install.conclusion == 'success' diff --git a/.github/workflows/tpv.py b/.github/workflows/tpv.py new file mode 100755 index 000000000..0df7810a2 --- /dev/null +++ b/.github/workflows/tpv.py @@ -0,0 +1,168 @@ +"""Collection of functions to generate Ansible playbooks that template files. + +Given an Ansible playbook, this file helps you modify it in order to produce +another playbook with the sole purpose of templating files. +""" + +import glob +import os +import shutil +import subprocess +import tempfile +from pathlib import Path +from typing import Iterable + +import yaml +from jinja2 import Environment, FileSystemLoader, meta + +# Keys from plays to be kept in the generated playbooks +KEEP_KEYS = { + "name", + "hosts", + "vars", + "vars_files", +} + +# Set the default editor to "true" create empty Ansible vaults easily +os.environ["EDITOR"] = "true" + + +# Tasks to execute in each play. +def templating_task( + src: str | Path, + dest: str | Path, +) -> dict: + """Generate an ansible templating task. + + Args: + src: Template file. + dest: Templated file. + + Returns: + Ansible templating task (in the form of a dictionary). + """ + return { + "name": f"Template {src}", + "template": { + "src": str(src), + "dest": str(dest), + }, + } + + +def get_variables(path: str | Path) -> set[str]: + """Get names of the variables used in a Jinja template. + + Args: + path: Path of the Jinja template. + + Returns: + Set of variables used in the Jinja template. + """ + path = Path(path) + dirname = path.parent + + env = Environment( + loader=FileSystemLoader(dirname), + extensions=["jinja2_ansible_filters.AnsibleCoreFiltersExtension"], + ) + source = open(path, "r").read() + parsed_content = env.parse(source) + variables = set(meta.find_undeclared_variables(parsed_content)) + + return variables + + +def make_playbook( + model: str | Path, + templates: Iterable[tuple[str | Path, str | Path]], +) -> Path: + """Generate a playbook from a model. + + Takes a playbook as reference, keeps the skeleton of the playbook (e.g. + variables, and variable files) and replaces the existing tasks with + templating tasks. + + The working directory of the model playbook will be copied to a temporary + directory and in such directory, the newly generated playbook will replace + the model playbook. The function returns the absolute path of the newly + generated playbook. + + Args: + model: Playbook to be taken as reference. The skeleton of the playbook + is kept (keys from KEEP_KEYS, for example the vars and + vars_files), while the existing tasks are stripped out. + templates: Files to be templated, given as tuples where the first + element is the path of the template and the second element the + path of the templated file. + + Returns: + The absolute path of the newly generated playbook. + """ + playbook = Path(model) + dirname = playbook.parent + basename = playbook.name + + # copy working directory to the temporary directory and chdir into it + directory = Path(tempfile.mkdtemp()) / "ansible" + shutil.copytree(dirname, directory) + playbook = directory / basename + dirname = directory + + playbook = yaml.safe_load(open(playbook)) + for i, play in enumerate(playbook): + # filter keys + play = {key: value for key, value in play.items() if key in KEEP_KEYS} + + # replace vaults with empty vaults + vault_string = "$ANSIBLE_VAULT" + for filename in play.get("vars_files", []): + try: + with open(filename, "r") as file: + is_vault = file.read(14) == vault_string + except FileNotFoundError: + is_vault = False + if is_vault: + os.remove(directory / filename) + subprocess.run( + ["ansible-vault", "create", directory / filename] + ) + + # generate a file with dummy variables + dummy_vars = {} + # - determinate what is already defined in group variables + group_vars = set() + for file_path in glob.glob(str(directory / "group_vars" / "*")): + contents = yaml.safe_load(open(file_path)) + group_vars |= set(contents) + # - for vars files + dummy_vars |= { + var: "undefined" + for vars_file in play.get("vars_files", []) + for var in get_variables(vars_file) + if var not in group_vars + } + # - for templates + dummy_vars |= { + var: "undefined" + for src, _ in templates + for var in get_variables(src) + if var not in group_vars + } + # - save generated file + with open(directory / "dummy_vars_file.yml", "w") as file: + yaml.dump(dummy_vars, file) + + # insert file with dummy variables at the top of the vars_files list + play["vars_files"] = play.get("vars_files", []) + play["vars_files"].insert(0, "dummy_vars_file.yml") + + play["tasks"] = [templating_task(src, dest) for src, dest in templates] + + playbook[i] = play + + playbook_path = (directory / basename).absolute() + with open(playbook_path, "w") as file: + yaml.dump(playbook, file) + + return playbook_path diff --git a/.github/workflows/tpv.yml b/.github/workflows/tpv.yml new file mode 100644 index 000000000..bb83c2705 --- /dev/null +++ b/.github/workflows/tpv.yml @@ -0,0 +1,426 @@ +--- +name: Total Perspective Vortex + +'on': + pull_request: + paths: + - 'files/galaxy/tpv/**' + push: + branches: + - master + paths: + - 'files/galaxy/tpv/**' + +jobs: + lint: + name: Total Perspective Vortex linter + runs-on: ubuntu-latest + steps: + + - name: Check out the codebase. + uses: actions/checkout@v2 + with: + path: 'infrastructure-playbook' + + - name: Update git submodules. + working-directory: 'infrastructure-playbook' + run: | + git submodule update --init --recursive --remote --checkout + + - name: Set up Python 3. + uses: actions/setup-python@v2 + with: + python-version: '3.x' + cache: 'pip' + + # Install Ansible. + - name: Cache Ansible. + id: cache-ansible + uses: actions/cache@v3 + with: + path: /opt/hostedtoolcache/Python/*/*/lib/python*/site-packages/ansible* + key: ${{ hashFiles('infrastructure-playbook/requirements.txt') }} + - name: Install Ansible (fast when an existing installation was already cached). + working-directory: 'infrastructure-playbook' + run: | + # Install an Ansible version compatible with the version of + # ansible-core specified in requirements.txt for the + # infrastructure-playbook repo. + ANSIBLE_CORE_REQ=$(perl -pe 's/\\\n/ /' requirements.txt | grep ansible-core) + pip3 install --use-feature=fast-deps ansible "$ANSIBLE_CORE_REQ" + - name: Save Ansible cache. + uses: actions/cache/save@v3 + if: ${{ steps.cache-ansible.outputs.cache-hit != 'true' }} + with: + path: /opt/hostedtoolcache/Python/*/*/lib/python*/site-packages/ansible* + key: ${{ hashFiles('infrastructure-playbook/requirements.txt') }} + + # Total Perspective Vortex needs the Galaxy logic, which should be + # installed automatically when running `pip3 install + # total-perspective-vortex[cli]` (the `galaxy-app` package). + # However: + # - `galaxy-app` package on PyPI is outdated (see issue #15999 on + # the Galaxy repo: https://github.com/galaxyproject/galaxy/issues/15999) + # - Ideally the version of Galaxy should exactly match the one running on + # usegalaxy.eu. + # Therefore, we clone Galaxy and add it to the PYTHONPATH. + - name: Get Galaxy repo and commit id. + working-directory: 'infrastructure-playbook' + run: | + # Get the Galaxy repository URL and commit from Ansible variables. + export TMP_FILE=`mktemp` + openssl rand -base64 24 > .vault_password + ansible localhost --connection local \ + --inventory hosts --module-name copy \ + --args "content={{hostvars['sn06.galaxyproject.eu']}} dest=${TMP_FILE}" \ + > /dev/null + export GALAXY_COMMIT_ID=$(cat ${TMP_FILE} | jq -r .galaxy_commit_id) + export GALAXY_REPO=$(cat ${TMP_FILE} | jq -r .galaxy_repo) + rm ${TMP_FILE} + echo $GALAXY_COMMIT_ID > ../galaxy_commit_id + echo $GALAXY_REPO > ../galaxy_repo + - name: Cache Galaxy + id: cache-galaxy + uses: actions/cache@v3 + with: + path: galaxy + key: ${{ hashFiles('galaxy_repo') }}-${{ hashFiles('galaxy_commit_id') }} + - name: Clone Galaxy + if: ${{ steps.cache-galaxy.outputs.cache-hit != 'true' }} + run: | + export GALAXY_COMMIT_ID="$(cat ./galaxy_commit_id)" + export GALAXY_REPO="$(cat ./galaxy_repo)" + # git clone -b $GALAXY_COMMIT_ID --single-branch --depth=1 $GALAXY_REPO galaxy # does not work with commit hashes + git clone $GALAXY_REPO galaxy && cd galaxy && git checkout $GALAXY_COMMIT_ID + - name: Save Galaxy cache. + uses: actions/cache/save@v3 + if: ${{ steps.cache-galaxy.outputs.cache-hit != 'true' }} + with: + path: galaxy + key: ${{ hashFiles('galaxy_repo') }}-${{ hashFiles('galaxy_commit_id') }} + - name: Install Galaxy requirements. + working-directory: 'galaxy' + run: pip install -r requirements.txt + + # Install the Total Perspective Vortex version that should be running on + # usegalaxy.eu + - name: Install Total Perspective Vortex. + working-directory: 'galaxy' + run: | + TPV_REQ=$(perl -pe 's/\\\n/ /' lib/galaxy/dependencies/conditional-requirements.txt | grep total-perspective-vortex) + pip3 install --upgrade "$TPV_REQ" + + - name: Install port of Ansible filters for Jinja (required for the next step). + run: | + pip3 install jinja2-ansible-filters + + - name: Create mounts vars file. + working-directory: 'infrastructure-playbook/mounts' + run: | + make dest/all.yml + + - name: Create a playbook to template the TPV files. + shell: python -u {0} + working-directory: 'infrastructure-playbook' + run: | + import glob + import importlib + import sys + + # import tpv.py + spec = importlib.util.spec_from_file_location('tpv_ci', '.github/workflows/tpv.py') + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + make_playbook = module.make_playbook + + tpv_path = "files/galaxy/tpv/" + templates = tuple( + (file, '.'.join(file.split('.')[:-1])) # remove j2 extension + for file in glob.glob(f"{tpv_path}/*.yaml.j2") + glob.glob(f"{tpv_path}/*.yml.j2") + ) + + playbook = make_playbook('sn06.yml', templates=templates) + with open('../playbook_path', 'w') as file: + file.write(str(playbook)) + + - name: Render TPV configuration files. + run: | + PLAYBOOK="$(cat playbook_path)" + BASENAME="$(basename $PLAYBOOK)" + DIRNAME="$(dirname $PLAYBOOK)" + cd "$DIRNAME" + + shopt -s nullglob + + ansible-playbook --connection=local "$BASENAME" + + - name: Run Total Perspective Vortex linter. + run: | + export PYTHONPATH=$(realpath ./galaxy/lib) + + PLAYBOOK="$(cat playbook_path)" + BASENAME="$(basename $PLAYBOOK)" + DIRNAME="$(dirname $PLAYBOOK)" + cd "$DIRNAME" + + shopt -s nullglob + + for file in files/galaxy/tpv/*.{yml,yaml}; do + echo Running TPV linter on "$file"... + tpv lint $file || exit 1 + done + + dry-run: + name: Total Perspective Vortex dry-run + runs-on: ubuntu-latest + steps: + + - name: Check out the codebase. + uses: actions/checkout@v3 + with: + fetch-depth: ${{ github.event_name == 'pull_request' && 2 || 0 }} + path: 'infrastructure-playbook' + + - name: Update git submodules. + working-directory: 'infrastructure-playbook' + run: | + git submodule update --init --recursive --remote --checkout + + - name: Set up Python 3. + uses: actions/setup-python@v2 + with: + python-version: '3.x' + cache: 'pip' + + # Install Ansible. + - name: Cache Ansible. + id: cache-ansible + uses: actions/cache@v3 + with: + path: /opt/hostedtoolcache/Python/*/*/lib/python*/site-packages/ansible* + key: ${{ hashFiles('infrastructure-playbook/requirements.txt') }} + - name: Install Ansible. + working-directory: 'infrastructure-playbook' + run: | + # Install an Ansible version compatible with the version of + # ansible-core specified in requirements.txt for the + # infrastructure-playbook repo. + ANSIBLE_CORE_REQ=$(perl -pe 's/\\\n/ /' requirements.txt | grep ansible-core) + pip3 install --use-feature=fast-deps ansible "$ANSIBLE_CORE_REQ" + - name: Save Ansible cache. + uses: actions/cache/save@v3 + if: ${{ steps.cache-ansible.outputs.cache-hit != 'true' }} + with: + path: /opt/hostedtoolcache/Python/*/*/lib/python*/site-packages/ansible* + key: ${{ hashFiles('infrastructure-playbook/requirements.txt') }} + + # Total Perspective Vortex needs the Galaxy logic, which should be + # installed automatically when running `pip3 install + # total-perspective-vortex[cli]` (the `galaxy-app` package). + # However: + # - `galaxy-app` package on PyPI is outdated (see issue #15999 on + # the Galaxy repo: https://github.com/galaxyproject/galaxy/issues/15999) + # - Ideally the version of Galaxy should exactly match the one running on + # usegalaxy.eu. + # Therefore, we clone Galaxy and add it to the PYTHONPATH. + - name: Get Galaxy repo and commit id. + id: commits-galaxy + working-directory: 'infrastructure-playbook' + run: | + # Get the Galaxy repository URL and commit from Ansible variables. + export TMP_FILE=`mktemp` + openssl rand -base64 24 > .vault_password + ansible localhost --connection local \ + --inventory hosts --module-name copy \ + --args "content={{hostvars['sn06.galaxyproject.eu']}} dest=${TMP_FILE}" \ + > /dev/null + export GALAXY_COMMIT_ID=$(cat ${TMP_FILE} | jq -r .galaxy_commit_id) + export GALAXY_REPO=$(cat ${TMP_FILE} | jq -r .galaxy_repo) + rm ${TMP_FILE} + echo "commit=$GALAXY_COMMIT_ID" >> $GITHUB_OUTPUT + echo "repo=$GALAXY_REPO" >> $GITHUB_OUTPUT + - name: Cache Galaxy + id: cache-galaxy + uses: actions/cache@v3 + with: + path: galaxy + key: ${{ steps.commits-galaxy.outputs.repo }}-${{ steps.commits-galaxy.outputs.commit }} + - name: Clone Galaxy + if: ${{ steps.cache-galaxy.outputs.cache-hit != 'true' }} + run: | + # git clone -b ${{ steps.commits-galaxy.outputs.commit }} \ + # --single-branch \ + # --depth=1 ${{ steps.commits-galaxy.outputs.repo }} \ + # galaxy # does not work with commit hashes + git clone ${{ steps.commits-galaxy.outputs.repo }} galaxy && cd galaxy && git checkout ${{ steps.commits-galaxy.outputs.commit }} + - name: Save Galaxy cache. + uses: actions/cache/save@v3 + if: ${{ steps.cache-galaxy.outputs.cache-hit != 'true' }} + with: + path: galaxy + key: ${{ steps.commits-galaxy.outputs.repo }}-${{ steps.commits-galaxy.outputs.commit }} + - name: Install Galaxy requirements. + working-directory: 'galaxy' + run: pip install -r requirements.txt + + # Install the Total Perspective Vortex version that should be running on + # usegalaxy.eu + - name: Install Total Perspective Vortex. + working-directory: 'galaxy' + run: | + TPV_REQ=$(perl -pe 's/\\\n/ /' lib/galaxy/dependencies/conditional-requirements.txt | grep total-perspective-vortex) + pip3 install --upgrade "$TPV_REQ" + + - name: Install port of Ansible filters for Jinja. + run: | + pip3 install jinja2-ansible-filters + + - name: Create mounts vars file. + working-directory: 'infrastructure-playbook/mounts' + run: | + make dest/all.yml + + - name: Get commit ids before/after push or pull request. + id: commits-infrastructure-playbook + working-directory: 'infrastructure-playbook' + run: | + set -Eeo pipefail + if ${{ github.event_name == 'pull_request' }}; then + echo "before=$(git rev-parse HEAD^1)" >> $GITHUB_OUTPUT + echo "after=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT + else + echo "before=${{ github.event.before }}" >> $GITHUB_OUTPUT + echo "after=${{ github.event.after }}" >> $GITHUB_OUTPUT + fi + + - name: Create playbooks to template the TPV files. + id: playbooks + shell: python -u {0} + working-directory: 'infrastructure-playbook' + run: | + import glob + import importlib + import os + import subprocess + import sys + + # import make_playbook from tpv.py + spec = importlib.util.spec_from_file_location('tpv_ci', '.github/workflows/tpv.py') + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + make_playbook = module.make_playbook + + tpv_path = 'files/galaxy/tpv/' + templates = tuple( + # TPV configuration files + (file, '.'.join(file.split('.')[:-1])) # remove j2 extension + for file in glob.glob(f'{tpv_path}/*.yaml.j2') + glob.glob(f'{tpv_path}/*.yml.j2') + ) + ( + # job_conf.yml + ('templates/galaxy/config/job_conf.yml.j2', 'templates/galaxy/config/job_conf.yml'), + ) + + # template files based on the new version + playbook = make_playbook('sn06.yml', templates=templates) + with open(os.environ["GITHUB_OUTPUT"], "a") as file: + file.write(f"new={playbook}\n") + + # template files based on the old version + subprocess.run( + ["git", "checkout", "${{ steps.commits-infrastructure-playbook.outputs.before }}"] + ) + playbook = make_playbook('sn06.yml', templates=templates) + with open(os.environ["GITHUB_OUTPUT"], "a") as file: + file.write(f"old={playbook}\n") + + - name: Render TPV configuration files. + run: | + shopt -s nullglob + + PLAYBOOK="${{ steps.playbooks.outputs.old }}" + BASENAME="$(basename $PLAYBOOK)" + DIRNAME="$(dirname $PLAYBOOK)" + cd "$DIRNAME" + ansible-playbook --connection=local "$BASENAME" + + PLAYBOOK="${{ steps.playbooks.outputs.new }}" + BASENAME="$(basename $PLAYBOOK)" + DIRNAME="$(dirname $PLAYBOOK)" + cd "$DIRNAME" + ansible-playbook --connection=local "$BASENAME" + + - name: Change paths of TPV configuration files in job_conf.yml + shell: python -u {0} + run: | + import os + from pathlib import Path + from urllib.parse import urlparse + + import yaml + + repository = Path("${{ steps.playbooks.outputs.new }}").parent + job_conf_path = Path('templates/galaxy/config/job_conf.yml') + tpv_path = Path('files/galaxy/tpv/') + + job_conf = yaml.safe_load(open(repository / job_conf_path, 'r')) + tpv_config_files = job_conf['execution']['environments']['tpv_dispatcher']['tpv_config_files'] + for i, file in enumerate(tpv_config_files): + if urlparse(file).scheme in {'file', ''}: + tpv_config_files[i] = str(repository / tpv_path / Path(file).name) + job_conf['execution']['environments']['tpv_dispatcher']['tpv_config_files'] = tpv_config_files + + yaml.dump(job_conf, open(repository / job_conf_path, 'w')) + + - name: Detect tools that have changed. + id: tools-changed + shell: python -u {0} + run: | + import glob + import hashlib + import os + from pathlib import Path + + import yaml + + playbooks = { + 'old': Path("${{ steps.playbooks.outputs.old }}"), + 'new': Path("${{ steps.playbooks.outputs.new }}"), + } + os.chdir(playbooks['new'].parent) + + tpv_path = Path('files/galaxy/tpv/') + + changed = set() + for file in (Path(path) for path in glob.glob(f"{tpv_path}/*.yml") + glob.glob(f"{tpv_path}/*.yaml")): + comparison = {} + for key, playbook in playbooks.items(): + comparison[key] = yaml.safe_load(open(playbooks[key].parent / file, 'r')).get("tools", {}) + comparison[key] = { + key: hashlib.sha256(yaml.dump(value, sort_keys=True).encode('utf-8')).hexdigest() + for key, value in comparison[key].items() + } + changed |= set(comparison['new']) - set(comparison['old']) + changed |= {key for key in set(comparison['new']) & set(comparison['old']) if comparison['new'][key] != comparison['old'][key]} + + with open(os.environ["GITHUB_OUTPUT"], 'a') as file: + file.write(f"changed={' '.join(changed)}\n") + + - name: Run Total Perspective Vortex dry-run. + env: + TOOLS: ${{ steps.tools-changed.outputs.changed }} + PLAYBOOK: ${{ steps.playbooks.outputs.new }} + run: | + set -Eeo pipefail + shopt -s nullglob + + export PYTHONPATH=$(realpath ./galaxy/lib) + + BASENAME="$(basename $PLAYBOOK)" + DIRNAME="$(dirname $PLAYBOOK)" + cd "$DIRNAME" + + IFS=" " read -a tools <<< "$TOOLS" + for tool in "${tools[@]}"; do + echo Running TPV dry-run for "$tool..." + tpv dry-run --job-conf templates/galaxy/config/job_conf.yml --tool "$tool" + done diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..0547b52c9 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "mounts"] + path = mounts + url = https://github.com/usegalaxy-eu/mounts diff --git a/README.md b/README.md index fd565f737..3835561ae 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,9 @@ Ansible playbook for managing UseGalaxy.EU infrastructure. For the playbook managing Galaxy itself, see https://github.com/galaxyproject/usegalaxy-playbook/ +## Detailed documentation +* Is available [here](https://github.com/usegalaxy-eu/operations/blob/main/infrastructure_playbook_repo.md) + ## Running Notes This probably won't work for your infra. We require everything to run on diff --git a/beacon-import.yml b/beacon-import.yml index fa615029d..ab9aa69f5 100644 --- a/beacon-import.yml +++ b/beacon-import.yml @@ -4,10 +4,25 @@ hosts: - beacon_import vars_files: + - "secret_group_vars/all.yml" - secret_group_vars/beacon.yml vars: + hostname: beacon-import.galaxyproject.eu script_user: beacon script_dir: /home/beacon/script galaxy_api_url: https://usegalaxy.eu + collections: + - devsec.hardening roles: + - role: usegalaxy_eu.handy.os_setup + vars: + enable_hostname: true + enable_powertools: true # geerlingguy.repo-epel role doesn't enable PowerTools repository + - geerlingguy.repo-epel # Install EPEL repository + - usegalaxy-eu.autoupdates # keep all of our packages up to date + - influxdata.chrony + - dj-wasabi.telegraf + - usegalaxy-eu.dynmotd # nicer MOTD/welcome message - paprikant.beacon-importer + - os_hardening + - ssh_hardening diff --git a/bin/check-tool-mem-cpu.py b/bin/check-tool-mem-cpu.py deleted file mode 100644 index c7fec69c2..000000000 --- a/bin/check-tool-mem-cpu.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python -import yaml -import os -import sys - -D = os.path.dirname(os.path.realpath(os.path.join(__file__, ".."))) - -jcaas_conf = yaml.safe_load( - open( - os.path.join(D, "files/galaxy/dynamic_rules/usegalaxy/tool_destinations.yaml"), - "r", - ) -) -jcaas_conf2 = {} -for (k, v) in jcaas_conf.items(): - jcaas_conf2[k.lower()] = v - - -def get_tool_id(tool_id): - if tool_id.count("/") == 0: - return tool_id - - if tool_id.count("/") == 5: - (server, _, owner, repo, name, version) = tool_id.split("/") - return name - - return tool_id - - -max_mem = 0 -max_cpu = 0 - - -for v in sys.stdin.read().split("\n"): - tool_id = get_tool_id(v).lower().strip() - - if tool_id in jcaas_conf2: - tool_conf = jcaas_conf2[tool_id] - print(tool_id, tool_conf) - - if tool_conf.get("mem", 4) > max_mem: - max_mem = tool_conf.get("mem", 4) - - if tool_conf.get("cores", 4) > max_cpu: - max_cpu = tool_conf.get("cores", 4) - -print("Maximums: memory=%s cpu=%s" % (max_mem, max_cpu)) diff --git a/bin/diff-before-update b/bin/diff-before-update index 539796b73..9deb09cdd 100755 --- a/bin/diff-before-update +++ b/bin/diff-before-update @@ -19,6 +19,8 @@ fi if [[ $2 == "emacs" ]]; then difftool='emacs-ediff.sh' +if [[ $2 == "meld" ]]; then + difftool='meld' else difftool='vimdiff' fi diff --git a/bin/workflow-extract-tools.py b/bin/workflow-extract-tools.py deleted file mode 100644 index 1bd3d6be5..000000000 --- a/bin/workflow-extract-tools.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python -import json -import sys - - -def get_tool_id(tool_id): - if tool_id.count("/") == 0: - return tool_id - - if tool_id.count("/") == 5: - (server, _, owner, repo, name, version) = tool_id.split("/") - return name - - return tool_id - - -def tools_from_wf(data): - - for k, v in data["steps"].items(): - if v["tool_id"] is None: - continue - - if "subworkflow" in v: - yield from tools_from_wf(v["subworkflow"]) - else: - yield get_tool_id(v["tool_id"]) - - -def obtain(): - for f in sys.argv[1:]: - with open(f, "r") as handle: - data = json.load(handle) - yield from tools_from_wf(data) - - -for x in obtain(): - print(x) diff --git a/celery.yml b/celery.yml index 3d7f3294c..2399d8233 100644 --- a/celery.yml +++ b/celery.yml @@ -6,6 +6,8 @@ vars_files: - "secret_group_vars/all.yml" - "secret_group_vars/pulsar.yml" + - mounts/mountpoints.yml + - mounts/dest/all.yml pre_tasks: - name: Install Dependencies package: diff --git a/cvmfs.yml b/cvmfs.yml index a0e865841..fa4d8ef56 100644 --- a/cvmfs.yml +++ b/cvmfs.yml @@ -5,6 +5,8 @@ hostname: cvmfs1-ufr0.internal.galaxyproject.eu vars_files: - "secret_group_vars/all.yml" + - mounts/mountpoints.yml + - mounts/dest/all.yml collections: - devsec.hardening pre_tasks: @@ -19,6 +21,15 @@ group: root state: link force: true + post_tasks: + - name: Disable SELinux + selinux: + state: disabled + register: + selinux_disabled + # - name: Reboot if SELinux was disabled + # reboot: + # when: selinux_disabled.reboot_required == true roles: # Starting configuration of the operating system - role: usegalaxy_eu.handy.os_setup diff --git a/files/galaxy-test/dynamic_rules/readme.txt b/files/galaxy-test/dynamic_rules/readme.txt deleted file mode 100644 index 85b011f9b..000000000 --- a/files/galaxy-test/dynamic_rules/readme.txt +++ /dev/null @@ -1,5 +0,0 @@ -Rules need to go in subdir otherwise can't be added to galaxy path because -galaxy path demands it is in a submodule - -This file is necessary so the role detects the parent dir to mark for adding -__init__.py diff --git a/files/galaxy-test/dynamic_rules/usegalaxy/blast_destinations.py b/files/galaxy-test/dynamic_rules/usegalaxy/blast_destinations.py deleted file mode 100644 index c8521d63a..000000000 --- a/files/galaxy-test/dynamic_rules/usegalaxy/blast_destinations.py +++ /dev/null @@ -1,51 +0,0 @@ -from galaxy.jobs import JobDestination -import os - - -def blast_destinations( job, tool ): - # Allocate extra time - inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] ) - inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] ) - query_file = inp_data[ "query" ].file_name - vmem = 2000 - cores = 6 - - inp_params = dict( [ ( param.name, param.value ) for param in job.parameters ] ) - - blast_type = inp_params.get( "blast_type", 'None' ) - if str(blast_type) == '"dc-megablast"': - # vmem per core and in MB - vmem = 6000 - cores = 5 - - sizeBinMap = {} - binPriorityMap = {} - params = {} - query_file_size = os.path.getsize(query_file) / (1024*1024.0) - - params["nativeSpecification"] = """-l galaxy1_slots=1 -l h_vmem=%sM -pe "pe*" %s -v _JAVA_OPTIONS -v TEMP -v TMPDIR -v PATH -v PYTHONPATH -v LD_LIBRARY_PATH -v XAPPLRESDIR -v GDFONTPATH -v GNUPLOT_DEFAULT_GDFONT -v MPLCONFIGDIR -soft -l galaxy1_dedicated=1 """ % (vmem, cores) - - if query_file_size < 5: - params["nativeSpecification"] += " -p -129 " - else: - params["nativeSpecification"] += " -hard -l hblast=1 " - for c, i in enumerate( range(5, 1000, (1000-5)/100) ): - sizeBinMap[i] = c - - - for c, i in enumerate( range(130, 512, (512-130)/100) ): - binPriorityMap[c] = i - - query_bin = 1 - for bound in sorted(sizeBinMap): - if query_file_size > bound: - query_bin = sizeBinMap[bound] - params["nativeSpecification"] += " -p -%s" % binPriorityMap[query_bin] - - params['request_memory'] = vmem / 1024 - params['request_cpus'] = cores - params['requirements'] = '(GalaxyGroup == "compute")' - params['priority'] = 128 - - # return JobDestination(id="blast_dynamic_job_destination", runner="drmaa", params=params) - return JobDestination(id="blast_dynamic_job_destination", runner="condor", params=params) diff --git a/files/galaxy-test/dynamic_rules/usegalaxy/destination_specifications.yaml b/files/galaxy-test/dynamic_rules/usegalaxy/destination_specifications.yaml deleted file mode 100644 index 0e22f165d..000000000 --- a/files/galaxy-test/dynamic_rules/usegalaxy/destination_specifications.yaml +++ /dev/null @@ -1,833 +0,0 @@ ---- -condor: - info: - remote: false - scheduler: 'condor' - limits: - cores: 64 - mem: 1000 - env: - NUMBA_CACHE_DIR: /data/2/galaxy_db/tmp - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - params: - priority: -{PRIORITY} - request_cpus: '{PARALLELISATION}' - request_memory: '{MEMORY}' - requirements: 'GalaxyGroup == "compute"' - tmp_dir: 'True' - metadata_strategy: 'extended' - -# TBD -condor_extended: - info: - remote: false - scheduler: 'condor' - limits: - cores: 64 - mem: 1000 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - params: - priority: -{PRIORITY} - request_cpus: '{PARALLELISATION}' - request_memory: '{MEMORY}' - requirements: 'GalaxyGroup == "compute"' - tmp_dir: 'True' - metadata_strategy: 'extended' - -condor_upload: - info: - remote: false - scheduler: 'condor' - limits: - cores: 20 - mem: 10 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - params: - priority: '{PRIORITY}' - request_cpus: '{PARALLELISATION}' - request_memory: '{MEMORY}' - requirements: 'GalaxyTraining == false' - rank: 'GalaxyGroup == "upload"' - tmp_dir: 'True' - metadata_strategy: 'extended' - -# needed because of this https://github.com/galaxyproject/tools-iuc/issues/3663 -condor_mothur: - info: - remote: false - scheduler: 'condor' - limits: - cores: 64 - mem: 400 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - params: - priority: -{PRIORITY} - request_cpus: '{PARALLELISATION}' - request_memory: '{MEMORY}' - requirements: 'GalaxyGroup == "compute_mothur"' - tmp_dir: 'True' - metadata_strategy: 'extended' - -condor_gpu: - info: - remote: false - scheduler: 'condor' - limits: - cores: 8 - mem: 16 - gpus: 1 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - GPU_AVAILABLE: 1 - params: - priority: -{PRIORITY} - request_cpus: '{PARALLELISATION}' - request_memory: '{MEMORY}' - requirements: 'GalaxyGroup == "compute_gpu"' - request_gpus: '{GPUS}' - tmp_dir: 'True' - metadata_strategy: 'extended' - -condor_singularity: - info: - remote: false - scheduler: 'condor' - limits: - cores: 64 - mem: 1000 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - params: - priority: -{PRIORITY} - request_cpus: '{PARALLELISATION}' - request_memory: '{MEMORY}' - requirements: 'GalaxyGroup == "compute"' - tmp_dir: 'True' - metadata_strategy: 'extended' - singularity_enabled: true - # can and should be made stricter at some point - singularity_volumes: '$_CONDOR_SCRATCH_DIR:rw,$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw,/data/db/:ro,/data/dnb01/galaxy_db/:ro,/data/dnb02/galaxy_db/:ro,/data/dnb03/galaxy_db/:ro,/data/dnb05/galaxy_db/:ro,/data/dnb06/galaxy_db/:rw,/data/dnb07/galaxy_db/:rw,/data/dp01/galaxy_db/:rw,/data/0/galaxy_db/:ro,/data/1/galaxy_db/:ro,/data/2/galaxy_db/:ro,/data/3/galaxy_db/:ro,/data/4/galaxy_db/:ro,/data/5/galaxy_import/galaxy_user_data/:ro,/data/6/galaxy_db/:ro,/data/7/galaxy_db/:ro,/usr/local/tools/:ro' - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/all/centos:8.3.2011' - - -condor_singularity_with_gpu: - info: - remote: false - scheduler: 'condor' - limits: - cores: 8 - mem: 18 - gpus: 1 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - params: - priority: -{PRIORITY} - request_cpus: '{PARALLELISATION}' - request_memory: '{MEMORY}' - request_gpus: '{GPUS}' - dependency_resolution: 'none' - require_container: true - requirements: 'GalaxyGroup == "compute_gpu"' - tmp_dir: 'True' - metadata_strategy: 'extended' - singularity_enabled: true - singularity_run_extra_arguments: '--nv' - # can and should be made stricter at some point - singularity_volumes: '$_CONDOR_SCRATCH_DIR:rw,$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw,/data/db/:ro,/data/dnb01/galaxy_db/:ro,/data/dnb02/galaxy_db/:ro,/data/dnb03/galaxy_db/:ro,/data/dnb05/galaxy_db/:ro,/data/dnb06/galaxy_db/:rw,/data/dnb07/galaxy_db/:rw,/data/dp01/galaxy_db/:rw,/data/0/galaxy_db/:ro,/data/1/galaxy_db/:ro,/data/2/galaxy_db/:ro,/data/3/galaxy_db/:ro,/data/4/galaxy_db/:ro,/data/5/galaxy_import/galaxy_user_data/:ro,/data/6/galaxy_db/:ro,/data/7/galaxy_db/:ro,/usr/local/tools/:ro' - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/all/centos:8.3.2011' - - -condor_singularity_with_conda: - info: - remote: false - scheduler: 'condor' - limits: - cores: 64 - mem: 1000 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - params: - priority: -{PRIORITY} - request_cpus: '{PARALLELISATION}' - request_memory: '{MEMORY}' - requirements: 'GalaxyGroup == "compute"' - tmp_dir: 'True' - metadata_strategy: 'extended' - require_container: true - singularity_enabled: true - # can and should be made stricter at some point - singularity_volumes: '$_CONDOR_SCRATCH_DIR:rw,$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw,/data/db/:ro,/data/dnb01/galaxy_db/:ro,/data/dnb02/galaxy_db/:ro,/data/dnb03/galaxy_db/:ro,/data/dnb05/galaxy_db/:ro,/data/dnb06/galaxy_db/:rw,/data/dnb07/galaxy_db/:rw,/data/dp01/galaxy_db/:rw,/data/0/galaxy_db/:ro,/data/1/galaxy_db/:ro,/data/2/galaxy_db/:ro,/data/3/galaxy_db/:ro,/data/4/galaxy_db/:ro,/data/5/galaxy_import/galaxy_user_data/:ro,/data/6/galaxy_db/:ro,/data/7/galaxy_db/:ro,/usr/local/tools/:ro' - container_override: - - type: singularity - shell: '/bin/bash' - resolve_dependencies: true - identifier: '/cvmfs/singularity.galaxyproject.org/all/centos:8.3.2011' - -condor_docker_ie: - info: - remote: false - scheduler: 'condor' - limits: - cores: 10 - mem: 50 - env: {} - params: - priority: -{PRIORITY} - request_cpus: '{PARALLELISATION}' - request_memory: '{MEMORY}' - requirements: 'GalaxyDockerHack == True && GalaxyGroup == "interactive"' - tmp_dir: 'True' - metadata_strategy: 'extended' - docker_enabled: true - # can and should be made stricter at some point - docker_volumes: '/data/dnb01/galaxy_db/:ro,/data/dnb02/galaxy_db/:ro,/data/dnb03/galaxy_db/:ro,/data/dnb05/galaxy_db/:ro,/data/dnb06/galaxy_db/:rw,/data/dnb07/galaxy_db/:rw,/data/dp01/galaxy_db/:rw,/data/0/galaxy_import/galaxy_user_data/:ro,/data/1/galaxy_import/galaxy_user_data/:ro,/data/2/galaxy_db/:ro,/data/3/galaxy_db/:ro,/data/4/galaxy_db/:ro,/data/5/galaxy_import/galaxy_user_data/:ro,/data/6/galaxy_db/:ro,/data/7/galaxy_db/:ro,$galaxy_root:ro,$tool_directory:ro,$working_directory:rw,$job_directory:rw,/data/db/:ro' - docker_memory: '{MEMORY}' - docker_auto_rm: true - docker_default_container_id: busybox:ubuntu-14.04 - docker_sudo: false - docker_set_user: '' - -condor_docker_ie_interactive: - info: - remote: false - scheduler: 'condor' - limits: - cores: 10 - mem: 50 - env: {} - params: - priority: -{PRIORITY} - request_cpus: '{PARALLELISATION}' - request_memory: '{MEMORY}' - requirements: 'GalaxyDockerHack == True && GalaxyGroup == "interactive"' - tmp_dir: 'True' - metadata_strategy: 'extended' - docker_enabled: true - # can and should be made stricter at some point - docker_volumes: '$working_directory:rw,$job_directory:rw,$tool_directory:ro' - docker_memory: '{MEMORY}' - docker_auto_rm: true - docker_default_container_id: busybox:ubuntu-14.04 - docker_sudo: false - docker_set_user: '' - -condor_docker_ie_interactive_gpu: - info: - remote: false - scheduler: 'condor' - limits: - cores: 8 - mem: 18 - gpus: 1 - env: {} - params: - priority: -{PRIORITY} - request_cpus: '{PARALLELISATION}' - request_memory: '{MEMORY}' - request_gpus: '{GPUS}' - requirements: 'GalaxyDockerHack == True && GalaxyGroup == "compute_gpu"' - tmp_dir: 'True' - metadata_strategy: 'extended' - docker_enabled: true - # can and should be made stricter at some point - docker_volumes: '$working_directory:rw,$job_directory:rw,$tool_directory:ro' - docker_memory: '{MEMORY}' - docker_auto_rm: true - docker_default_container_id: busybox:ubuntu-14.04 - docker_sudo: false - docker_set_user: '' - -condor_docker: - info: - remote: false - scheduler: 'condor' - limits: - cores: 40 - mem: 1000 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - params: - priority: -{PRIORITY} - request_cpus: '{PARALLELISATION}' - request_memory: '{MEMORY}' - requirements: 'GalaxyDockerHack == True && GalaxyGroup == "compute"' - tmp_dir: 'True' - metadata_strategy: 'extended' - docker_enabled: true - # can and should be made stricter at some point - docker_volumes: '/data/dnb01/galaxy_db/:ro,/data/dnb02/galaxy_db/:ro,/data/dnb03/galaxy_db/:ro,/data/dnb05/galaxy_db/:ro,/data/dnb06/galaxy_db/:rw,/data/dnb07/galaxy_db/:rw,/data/dp01/galaxy_db/:rw,/data/0/galaxy_import/galaxy_user_data/:ro,/data/1/galaxy_import/galaxy_user_data/:ro,/data/2/galaxy_db/:ro,/data/3/galaxy_db/:ro,/data/4/galaxy_db/:ro,/data/5/galaxy_import/galaxy_user_data/:ro,/data/6/galaxy_db/:ro,/data/7/galaxy_db/:ro,$galaxy_root:ro,$tool_directory:ro,$working_directory:rw,$job_directory:rw,/data/db/:ro' - docker_memory: '{MEMORY}' - docker_auto_rm: true - docker_default_container_id: busybox:ubuntu-14.04 - docker_sudo: false - docker_set_user: '' - -# TBD -condor_docker_interactive: - info: - remote: false - scheduler: 'condor' - limits: - cores: 40 - mem: 1000 - env: - # this can be removed, as soon as we don't expose this env on cn029 anymore - DOCKER_HOST: '' - params: - priority: -{PRIORITY} - request_cpus: '{PARALLELISATION}' - request_memory: '{MEMORY}' - requirements: 'GalaxyDockerHack == True && GalaxyGroup == "interactive"' - tmp_dir: 'True' - metadata_strategy: 'extended' - docker_enabled: true - # can and should be made stricter at some point - docker_volumes: '$working_directory:rw,$job_directory:rw,$tool_directory:ro' - docker_memory: '{MEMORY}' - docker_auto_rm: true - docker_default_container_id: busybox:ubuntu-14.04 - docker_sudo: false - docker_set_user: '' - -remote_cluster_mq_it01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 16 - mem: 31 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - -remote_cluster_mq_be01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 8 - mem: 15 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - params: - priority: -{PRIORITY} - submit_submit_request_cpus: '{PARALLELISATION}' - submit_submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - dependency_resolution: 'remote' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - -remote_cluster_mq_de01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 16 - mem: 31 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - singularity_volumes: '$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw' - -remote_cluster_mq_singularity_2wdtrue_de01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 16 - mem: 31 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: true - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - singularity_volumes: '$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw' - -remote_cluster_mq_docker_de01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 16 - mem: 31 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'remote' - docker_enabled: true - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - docker_volumes: '$job_directory:ro,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw' - docker_memory: '{MEMORY}' - docker_auto_rm: true - docker_default_container_id: busybox:glibc - docker_sudo: false - docker_set_user: '' - tmp_dir: 'True' - -remote_cluster_mq_de02: - ## our singularity test bed - info: - remote: true - scheduler: 'condor' - limits: - cores: 16 - mem: 31 - env: {} - params: - priority: -{PRIORITY} - submit_submit_request_cpus: '{PARALLELISATION}' - submit_submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - # dependency_resolution: 'remote' --> we should enable it and test the difference, I don't think it has an effect - singularity_enabled: true - singularity_default_container_id: '/data/0/singularity/ubuntu:18.04' - tmp_dir: 'True' - -remote_cluster_mq_de03: - info: - remote: true - scheduler: 'condor' - limits: - cores: 8 - mem: 19 - gpus: 1 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - submit_request_gpus: '{GPUS}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'remote' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - -remote_cluster_mq_docker_de03: - info: - remote: true - scheduler: 'condor' - nodes: 18 - limits: - cores: 8 - mem: 19 - gpus: 1 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - submit_request_gpus: '{GPUS}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'remote' - docker_enabled: true - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - # can and should be made stricter at some point - docker_volumes: '$galaxy_root:ro,$tool_directory:ro,$working_directory:rw,$job_directory:rw,/data/share/staging/:ro' - docker_memory: '{MEMORY}' - docker_auto_rm: true - docker_default_container_id: busybox:ubuntu-14.04 - docker_sudo: false - docker_set_user: '' - docker_run_extra_arguments: '--gpus all' - tmp_dir: 'True' - - -remote_cluster_mq_pt01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 4 - mem: 15 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'remote' - outputs_to_working_directory: true - rewrite_parameters: true - transport: 'curl' - -remote_cluster_mq_uk01: - info: - remote: true - scheduler: 'condor' - nodes: 40 - limits: - cores: 60 - mem: 300 - gpus: 1 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - GPU_AVAILABLE: 1 - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - submit_request_gpus: '{GPUS}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'remote' - outputs_to_working_directory: true - rewrite_parameters: true - transport: 'curl' - -remote_cluster_mq_singularity_uk01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 60 - mem: 300 - gpus: 1 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - GPU_AVAILABLE: 1 - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - submit_request_gpus: '{GPUS}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - -remote_cluster_mq_docker_uk01: - info: - remote: true - scheduler: 'condor' - nodes: 40 - limits: - cores: 60 - mem: 300 - gpus: 1 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - GPU_AVAILABLE: 1 - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - submit_request_gpus: '{GPUS}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - docker_enabled: true - # can and should be made stricter at some point - docker_volumes: '$galaxy_root:ro,$tool_directory:ro,$working_directory:rw,$job_directory:rw,/data/share/staging/:ro' - docker_memory: '{MEMORY}' - docker_auto_rm: true - docker_default_container_id: busybox:glibc - docker_sudo: false - docker_set_user: '' - docker_run_extra_arguments: '--gpus all' - tmp_dir: 'True' - -remote_cluster_mq_it02: - info: - remote: true - scheduler: 'condor' - limits: - cores: 4 - mem: 7 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - -remote_cluster_mq_2wdfalse_it03: - info: - remote: true - scheduler: 'condor' - limits: - cores: 8 - mem: 15 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - singularity_volumes: '$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw' - -remote_cluster_mq_2wdtrue_it03: - info: - remote: true - scheduler: 'condor' - limits: - cores: 8 - mem: 15 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: true - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - singularity_volumes: '$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw' - -remote_cluster_mq_fr01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 8 - mem: 31 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - -remote_cluster_mq_fi01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 8 - mem: 31 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - -remote_cluster_mq_no01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 4 - mem: 15 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: true - rewrite_parameters: true - transport: 'curl' - -remote_cluster_mq_es01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 4 - mem: 16 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - singularity_volumes: '$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw' - -remote_cluster_mq_cz01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 4 - mem: 16 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /storage/praha5-elixir/home/galaxyeu/pulsar/singularity_cache - SINGULARITY_TMPDIR: $SCRATCHDIR - TMPDIR: $SCRATCHDIR - TMP: $SCRATCHDIR - TEMP: $SCRATCHDIR - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/storage/praha5-elixir/home/galaxyeu/pulsar/files/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: true - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/storage/praha5-elixir/home/galaxyeu/pulsar/ubuntu_20.04' - singularity_volumes: '$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw' diff --git a/files/galaxy-test/dynamic_rules/usegalaxy/dexseq.py b/files/galaxy-test/dynamic_rules/usegalaxy/dexseq.py deleted file mode 100644 index af598f674..000000000 --- a/files/galaxy-test/dynamic_rules/usegalaxy/dexseq.py +++ /dev/null @@ -1,34 +0,0 @@ -from galaxy.jobs import JobDestination -import os - -def dexseq_memory_mapper( job, tool ): - # Assign admin users' jobs to special admin_project. - # Allocate extra time - inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] ) - inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] ) - gtf_file = inp_data[ "gtf" ].file_name - vmem = 5200 - cores = 6 - params = {} - gtf_file_size = os.path.getsize(gtf_file) / (1024*1024.0) - if gtf_file_size > 150: - vmem = 30000 - cores = 6 - - # TODO(hxr): fix? - # params["nativeSpecification"] = """ - # -q galaxy1.q,all.q -l galaxy1_slots=1 -l h_vmem=%sM -pe "pe*" %s -v - # _JAVA_OPTIONS -v TEMP -v TMPDIR -v PATH -v PYTHONPATH -v - # LD_LIBRARY_PATH -v XAPPLRESDIR -v GDFONTPATH -v GNUPLOT_DEFAULT_GDFONT - # -v MPLCONFIGDIR -soft -l galaxy1_dedicated=1 - # """ % (vmem, cores) - params['request_memory'] = vmem / 1024 - params['request_cpus'] = cores - params['requirements'] = '(GalaxyGroup == "compute")' - params['priority'] = 128 - env = { - '_JAVA_OPTIONS': "-Xmx4G -Xms1G", - } - - return JobDestination(id="dexseq_dynamic_memory_mapping", runner="condor", params=params, env=env) - # return JobDestination(id="dexseq_dynamic_memory_mapping", runner="drmaa", params=params) diff --git a/files/galaxy-test/dynamic_rules/usegalaxy/joint_destinations.yaml b/files/galaxy-test/dynamic_rules/usegalaxy/joint_destinations.yaml deleted file mode 100644 index 2c4dd6719..000000000 --- a/files/galaxy-test/dynamic_rules/usegalaxy/joint_destinations.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -remote_condor_cluster_gpu_docker: - - remote_cluster_mq_docker_uk01 - -remote_condor_cluster_gpu: - - remote_cluster_mq_uk01 - -remote_condor_cluster_singularity: - - remote_cluster_mq_de01 - - remote_cluster_mq_it01 - - remote_cluster_mq_fi01 diff --git a/files/galaxy-test/dynamic_rules/usegalaxy/sorting_hat.py b/files/galaxy-test/dynamic_rules/usegalaxy/sorting_hat.py deleted file mode 100644 index 52848a159..000000000 --- a/files/galaxy-test/dynamic_rules/usegalaxy/sorting_hat.py +++ /dev/null @@ -1,458 +0,0 @@ -#!/usr/bin/env python -# usegalaxy.eu sorting hat -""" - - .'lddc,. - 'cxOOOOOOOOOxoc;,... - .:dOOOOOOOOOOOOOOOOOOOOOOOl - .;dOOOOOOOOOOOOOOxcdOOOOOOOkl. - oOOOOOOOOOOOOOOOx, ...... - .xOOkkkOOOOOOOOOk' - .xOOkkkOOOOOOOOO00. - dOOkkkOOOOOOOOOOOOd - cOOkkkOOOOOOOOOOOOOO' - .OOOkkOOOOOOOOOOOOOOOd - dOOkkOOOOOOOOOOOOOOOOO, - .OOOOOOOOOOOOOOOOOOOOOOx - cOOOOOOOOOOOOOOOOOOOOOOO; - kOOOOOOOxddddddddxOOOOOOk. - ..,:cldxdlodxxkkO;'''''''';Okkxxdookxdlc:,.. - .;lxO00000000d;;;;;;;;,'';;;;'',;;;;;;;:k00000000Oxl;. - d0000000000000xl::;;;;;,'''''''',;;;;;::lk0000000000000d - .d00000000000000000OkxxxdoooooooodxxxkO00000000000000000d. - .;lxO00000000000000000000000000000000000000000000Oxl;. - ..,;cloxkOO0000000000000000000000OOkxdlc;,.. - .................. - -"Oh, you may not think I'm pretty, -But don't judge on what you see," - -"For I'm the [Galaxy] Sorting Hat -And I can cap them all." - -You might belong in Condor, -Where dwell the slow to compute, - -You might belong in Pulsar, -Far flung and remote, - -Or yet in wise old Singularity, -If you're evil and insecure - ---hexylena -""" -import copy -import os -import yaml - -from galaxy.jobs import JobDestination -from galaxy.jobs.mapper import JobMappingException -from random import sample - - -class DetailsFromYamlFile: - """ - Retrieve details from a yaml file - """ - def __init__(self, yaml_file): - yaml_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), yaml_file) - if os.path.isfile(yaml_file_path): - with open(yaml_file_path, 'r') as handle: - self._conf = yaml.load(handle, Loader=yaml.SafeLoader) - - @property - def conf(self): - return self._conf - - def get(self, first_level_label, second_level_label=None): - for key, value in self._conf.items(): - if key == first_level_label: - if second_level_label is None: - return value - else: - return value.get(second_level_label) - return None - - def get_path(self, label): - return os.path.join(os.path.dirname(os.path.realpath(__file__)), self.get('file_paths', label)) - - - -# Sorting Hat configuration details are defined in this file -SH_CONFIGURATION_FILENAME = 'sorting_hat.yaml' - -sh_conf = DetailsFromYamlFile(SH_CONFIGURATION_FILENAME) -DEFAULT_DESTINATION = sh_conf.get('default_destination') -DEFAULT_TOOL_SPEC = sh_conf.get('default_tool_specification') -FAST_TURNAROUND = sh_conf.get('fast_turnaround') -FDID_PREFIX = sh_conf.get('force_destination_id_prefix') -SPECIAL_TOOLS = sh_conf.get('special_tools') - - -# The default / base specification for the different environments. -SPECIFICATION_PATH = sh_conf.get_path('destination_specifications') -SPECIFICATIONS = DetailsFromYamlFile(SPECIFICATION_PATH).conf - -TOOL_DESTINATION_PATH = sh_conf.get_path('tool_destinations') -TOOL_DESTINATIONS = DetailsFromYamlFile(TOOL_DESTINATION_PATH).conf - -JOINT_DESTINATIONS_PATH = sh_conf.get_path('joint_destinations') -JOINT_DESTINATIONS = DetailsFromYamlFile(JOINT_DESTINATIONS_PATH).conf - - -def assert_permissions(tool_spec, user_email, user_roles): - """ - Permissions testing. - - - default state is to allow everyone to run everything. - - If there is a permissions block, `deny: all` is the default. - - We ONLY support allowing specific users to run something. This DOES NOT - support preventing specific users from running something. - - """ - exception_text = "This tool is temporarily disabled due to internal policy. Please contact us if you have issues." - # If there is no permissions block then it's going to be fine for everyone. - if 'permissions' not in tool_spec: - return - - permissions = tool_spec['permissions'] - - # TODO(hxr): write a custom tool thing linter. - # We'll be extra defensive here since I don't think I trust us to get - # linting right for now. - if len(permissions.keys()) == 0: - raise Exception("JCaaS Configuration error 1") - - # And for typos. - if 'allow' not in permissions: - raise Exception("JCaaS Configuration error 2") - - if 'users' not in permissions['allow'] and 'roles' not in permissions['allow']: - raise Exception("JCaaS Configuration error 3") - # ENDTODO - - # Pull out allowed users and roles, defaulting to empty lists if the keys - # aren't there. - allowed_users = permissions['allow'].get('users', []) - allowed_roles = permissions['allow'].get('roles', []) - - # If the user is on our list, yay, return. - if user_email in allowed_users: - return - - # If one of their roles is in our list - if any([user_role in allowed_roles for user_role in user_roles]): - return - - # Auth failure. - raise Exception(exception_text) - - -def get_tool_id(tool_id): - """ - Convert ``toolshed.g2.bx.psu.edu/repos/devteam/column_maker/Add_a_column1/1.1.0`` - to ``Add_a_column`` - - :param str tool_id: a tool id, can be the short kind (e.g. upload1) or the long kind with the full TS path. - - :returns: a short tool ID. - :rtype: str - """ - if tool_id.count('/') == 0: - # E.g. upload1, etc. - return tool_id - - # what about odd ones. - if tool_id.count('/') == 5: - (server, _, owner, repo, name, version) = tool_id.split('/') - return name - - return tool_id - - -def name_it(tool_spec, prefix=FDID_PREFIX): - """ - Create a destination's name using the tool's specification. - Can be also forced to return a specific string - """ - if 'cores' in tool_spec: - name = '%scores_%sG' % (tool_spec.get('cores', 1), tool_spec.get('mem', 4)) - elif len(tool_spec.keys()) == 0 or (len(tool_spec.keys()) == 1 and 'runner' in tool_spec): - name = '%s_default' % tool_spec.get('runner') - else: - name = '%sG_memory' % tool_spec.get('mem', 4) - - if tool_spec.get('tmp', None) == 'large': - name += '_large' - - if 'name' in tool_spec: - name += '_' + tool_spec['name'] - - # Force a replacement of the destination's id - if tool_spec.get('force_destination_id', False): - name = prefix + tool_spec.get('runner') - - return name - - -def _get_limits(destination, dest_spec=SPECIFICATIONS, default_cores=1, default_mem=4, default_gpus=0): - """ - Get destination's limits - """ - limits = {'cores': default_cores, 'mem': default_mem, 'gpus': default_gpus} - limits.update(dest_spec.get(destination).get('limits', {})) - return limits - - -def _weighted_random_sampling(destinations, dest_spec=SPECIFICATIONS): - bunch = [] - for d in destinations: - weight = dest_spec[d].get('nodes', 1) - bunch += [d]*weight - destination = sample(bunch, 1)[0] - return destination - - -def build_spec(tool_spec, dest_spec=SPECIFICATIONS, runner_hint=None): - destination = runner_hint if runner_hint else tool_spec.get('runner') - - if destination not in dest_spec: - if destination in JOINT_DESTINATIONS: - destination = _weighted_random_sampling(JOINT_DESTINATIONS[destination]) - else: - destination = DEFAULT_DESTINATION - - env = dict(dest_spec.get(destination, {'env': {}})['env']) - params = dict(dest_spec.get(destination, {'params': {}})['params']) - tags = {dest_spec.get(destination).get('tags', None)} - - # We define the default memory and cores for all jobs. - tool_memory = tool_spec.get('mem') - tool_cores = tool_spec.get('cores') - tool_gpus = tool_spec.get('gpus') - - # We apply some constraints to these values, to ensure that we do not - # produce unschedulable jobs, requesting more ram/cpu than is available in a - # given location. Currently we clamp those values rather than intelligently - # re-scheduling to a different location due to TaaS constraints. - limits = _get_limits(destination, dest_spec=dest_spec) - tool_memory = min(tool_memory, limits.get('mem')) - tool_cores = min(tool_cores, limits.get('cores')) - tool_gpus = min(tool_gpus, limits.get('gpus')) - - kwargs = { - # Higher numbers are lower priority, like `nice`. - 'PRIORITY': tool_spec.get('priority', 128), - 'MEMORY': str(tool_memory) + 'G', - 'MEMORY_MB': int(tool_memory * 1024), - 'PARALLELISATION': tool_cores, - 'NATIVE_SPEC_EXTRA': "", - 'GPUS': tool_gpus, - } - - if 'docker_enabled' in params and params['docker_enabled']: - for k in tool_spec: - if k.startswith('docker'): - params[k] = tool_spec.get(k, '') - - if 'condor' in destination: - if 'requirements' in tool_spec: - params['requirements'] = tool_spec['requirements'] - - if 'rank' in tool_spec: - params['rank'] = tool_spec['rank'] - - if '+Group' in tool_spec: - params['+Group'] = tool_spec['+Group'] - - if 'remote_cluster_mq' in destination: - # specific for condor cluster - if tool_gpus == 0 and 'submit_request_gpus' in params: - del params['submit_request_gpus'] - - # Update env and params from kwargs. - env.update(tool_spec.get('env', {})) - env = {k: str(v).format(**kwargs) for (k, v) in env.items()} - - params.update(tool_spec.get('params', {})) - for (k, v) in params.items(): - if not isinstance(v, list): - params[k] = str(v).format(**kwargs) - else: - params[k] = v - - tags.add(tool_spec.get('tags', None)) - tags.discard(None) - tags = ','.join([x for x in tags if x is not None]) if len(tags) > 0 else None - - if 'condor' in destination: - runner = 'condor' - elif 'remote_cluster_mq' in destination: - # destination label has to follow this convention: - # remote_cluster_mq_feature1_feature2_feature3_pulsarid - runner = "_".join(['pulsar_eu', destination.split('_').pop()]) - else: - runner = 'local' - - env = [dict(name=k, value=v) for (k, v) in env.items()] - return env, params, runner, tags - - -def reroute_to_dedicated(user_roles): - """ - Re-route users to correct destinations. Some users will be part of a role - with dedicated training resources. - """ - # Collect their possible training roles identifiers. - training_roles = [role for role in user_roles if role.startswith('training-')] - if any([role.startswith('training-gcc-') for role in training_roles]): - training_roles.append('training-gcc') - - # No changes to specification. - if len(training_roles) == 0: - # Require that the jobs do not run on these dedicated training machines. - return {'requirements': 'GalaxyGroup == "compute"'} - - # Otherwise, the user does have one or more training roles. - # So we must construct a requirement / ranking expression. - training_expr = " || ".join(['(GalaxyGroup == "%s")' % role for role in training_roles]) - training_labels = '"'+", ".join(['%s' % role for role in training_roles])+'"' - return { - # We require that it does not run on machines that the user is not in the role for. - 'requirements': '(GalaxyGroup == "compute") || (%s)' % training_expr, - # We then rank based on what they *do* have the roles for - '+Group': training_labels, - } - - -def _finalize_tool_spec(tool_id, user_roles, special_tools=SPECIAL_TOOLS, tools_spec=TOOL_DESTINATIONS, memory_scale=1.0): - # Find the 'short' tool ID which is what is used in the .yaml file. - tool = get_tool_id(tool_id) - # Pull the tool specification (i.e. job destination configuration for this tool) - tool_spec = copy.deepcopy(tools_spec.get(tool, {})) - # Update the tool specification with any training resources that are available - tool_spec.update(reroute_to_dedicated(user_roles)) - - # Update the tool specification with default values if they are not present - for s in DEFAULT_TOOL_SPEC: - tool_spec[s] = tool_spec.get(s, DEFAULT_TOOL_SPEC[s]) - - tool_spec['mem'] *= memory_scale - - # Only few tools are truly special. - if tool_id in special_tools.get('upload'): - tool_spec = { - 'cores': 1, - 'mem': 0.3, - 'gpus': 0, - 'runner': 'condor_upload', - 'rank': 'GalaxyGroup == "upload"', - 'requirements': 'GalaxyTraining == false', - 'env': { - 'TEMP': '/data/1/galaxy_db/tmp/' - } - } - elif tool_id in special_tools.get('metadata'): - tool_spec = { - 'cores': 1, - 'mem': 0.3, - 'gpus': 0, - 'runner': 'condor_upload', - 'rank': 'GalaxyGroup == "metadata"', - 'requirements': 'GalaxyTraining == false', - } - # These we're running on a specific nodes subset - elif 'interactive_tool_' in tool_id: - tool_spec['requirements'] = 'GalaxyDockerHack == True && GalaxyGroup == "compute"' - - return tool_spec - - -def _gateway(tool_id, user_preferences, user_roles, user_id, user_email, ft=FAST_TURNAROUND, - special_tools=SPECIAL_TOOLS, memory_scale=1.0): - tool_spec = _finalize_tool_spec(tool_id, user_roles, memory_scale=memory_scale) - - # Now build the full spec - - # Use this hint to force a destination (e.g. defined from the user's preferences) - runner_hint = None - if tool_id not in special_tools.get('upload') or tool_id not in special_tools.get('metadata'): - for data_item in user_preferences: - if "distributed_compute|remote_resources" in data_item: - if user_preferences[data_item] != "None": - runner_hint = user_preferences[data_item] - - # Ensure that this tool is permitted to run, otherwise, throw an exception. - assert_permissions(tool_spec, user_email, user_roles) - - env, params, runner, tags = build_spec(tool_spec, runner_hint=runner_hint) - params['accounting_group_user'] = str(user_id) - params['description'] = get_tool_id(tool_id) - - # This is a special case, we're requiring it for faster feedback / turnaround times. - # Fast turnaround can be enabled for all the jobs or per single user adding a user role - # with the label described by 'role_label' key. - ft_enabled = ft.get('enabled') - ft_mode = ft.get('mode') - ft_role_label = ft.get('role_label') - ft_requirements = ft.get('requirements') - - if ft_enabled: - if (ft_mode == 'user_roles' and ft_role_label in user_roles) or ft_mode == 'all_jobs': - params['requirements'] = ft_requirements - - return env, params, runner, tool_spec, tags - - -def gateway(tool_id, user, memory_scale=1.0, next_dest=None): - # And run it. - if user: - user_roles = [role.name for role in user.all_roles() if not role.deleted] - user_preferences = user.extra_preferences - email = user.email - user_id = user.id - else: - user_roles = [] - user_preferences = [] - email = '' - user_id = -1 - - if get_tool_id(tool_id).startswith('interactive_tool_') and user_id == -1: - raise JobMappingException("This tool is restricted to registered users, " - "please contact a site administrator") - - try: - env, params, runner, spec, tags = _gateway(tool_id, user_preferences, user_roles, user_id, email, - ft=FAST_TURNAROUND, special_tools=SPECIAL_TOOLS, - memory_scale=memory_scale) - except Exception as e: - return JobMappingException(str(e)) - - resubmit = [] - if next_dest: - resubmit = [{ - 'condition': 'any_failure and attempt <= 3', - 'destination': next_dest - }] - - name = name_it(spec) - return JobDestination( - id=name, - tags=tags, - runner=runner, - params=params, - env=env, - resubmit=resubmit, - ) - - -def gateway_1x(tool_id, user): - return gateway(tool_id, user, memory_scale=1, next_dest='gateway_1_5x') - - -def gateway_1_5x(tool_id, user): - return gateway(tool_id, user, memory_scale=1.5, next_dest='gateway_2x') - - -def gateway_2x(tool_id, user): - return gateway(tool_id, user, memory_scale=2) diff --git a/files/galaxy-test/dynamic_rules/usegalaxy/sorting_hat.yaml b/files/galaxy-test/dynamic_rules/usegalaxy/sorting_hat.yaml deleted file mode 100644 index c85b8da8f..000000000 --- a/files/galaxy-test/dynamic_rules/usegalaxy/sorting_hat.yaml +++ /dev/null @@ -1,67 +0,0 @@ ---- -# Specify which keys can be used -allowed_keys: - destination_specifications: - - env - - limits - - nodes - - params - - tags - tool_destinations: - - cores - - env - - gpus - - mem - - name - - params - - permissions - - runner - - tags - - tmp - - force_destination_id - - docker_auto_rm - - docker_default_container_id - - docker_set_user - - docker_memory - - docker_run_extra_arguments - - docker_set_user - - docker_sudo - - docker_volumes - -# Set the default destination -default_destination: &dd 'condor' - -# Tool that doesn't specify a job configuration, will run with the following -default_tool_specification: - cores: 1 - mem: 4.0 - gpus: 0 - force_destination_id: False - runner: *dd - -# Enable this if you need a faster feedback / turnaround times. -# It implies you have a compute node(s) in a Condor cluster -# with the custom ClassAd defined in the requirements -fast_turnaround: - enabled: False - mode: 'all_jobs' # can be all_jobs/user_roles - role_label: 'fast-turnaround' - requirements: 'GalaxyGroup == "fast-turnaround"' - -# Some file paths -file_paths: - destination_specifications: 'destination_specifications.yaml' # Destination details are defined in this file - tool_destinations: 'tool_destinations.yaml' # Tools execution details are defined in this file - joint_destinations: 'joint_destinations.yaml' # Joint destinations details are defined in this file - -# For some tools, we have to force a static id and this is the prefix -force_destination_id_prefix: 'sh_fdid_' - -special_tools: - upload: - - 'upload1' - - '__DATA_FETCH__' - metadata: - - '__SET_METADATA__' - interactive: - - 'interactive_tool_' diff --git a/files/galaxy-test/dynamic_rules/usegalaxy/tool_destinations.yaml b/files/galaxy-test/dynamic_rules/usegalaxy/tool_destinations.yaml deleted file mode 100644 index 7a23c5734..000000000 --- a/files/galaxy-test/dynamic_rules/usegalaxy/tool_destinations.yaml +++ /dev/null @@ -1,783 +0,0 @@ ---- -__default__: {} - -keras_batch_models: - runner: remote_cluster_mq_de03 - gpus: 1 - env: - CUDA_VISIBLE_DEVICES: 0 -sklearn_train_test_eval: - runner: remote_cluster_mq_de03 - gpus: 1 - env: - CUDA_VISIBLE_DEVICES: 0 - -interactive_tool_jupyter_notebook: {mem: 4, runner: condor_docker} -interactive_tool_ethercalc: {mem: 4, runner: condor_docker} -interactive_tool_pinch: {mem: 4, runner: condor_docker} -interactive_tool_neo4j: {mem: 4, runner: condor_docker} -interactive_tool_hicbrowser: {mem: 4, runner: condor_docker} -interactive_tool_cellxgene: {mem: 4, runner: condor_docker} -interactive_tool_bam_iobio: {mem: 4, runner: condor_docker} -interactive_tool_askomics: {mem: 4, runner: condor_docker} -interactive_tool_rstudio: {mem: 4, runner: condor_docker} -interactive_tool_paraview: {mem: 4, runner: condor_docker} - -samtools_idxstats: {runner: condor_singularity_with_conda} - -# roary needs many many cpus if the number of input files increase -# a more specific function would ne neat -roary: {cores: 24} -maxquant: {mem: 20, cores: 8} -scpipe: {mem: 64, cores: 8} -cardinal_combine: {mem: 92} -cardinal_classification: {mem: 128} -cardinal_filtering: {mem: 30} -cardinal_mz_images: {mem: 20} -cardinal_preprocessing: {mem: 110} -cardinal_quality_report: {mem: 120} -cardinal_segmentations: {mem: 92} -cardinal_spectra_plots: {mem: 32} -cardinal_data_exporter: {mem: 64} -kraken_database_builder: {mem: 200} -heatmap: {mem: 12} -Heatmap: {mem: 12} -msconvert: {runner: condor_docker} -RNAlien: {cores: 12} -slamdunk: {cores: 12} -tombo_resquiggle: {cores: 12, mem: 32} -pilon: - mem: 18 - env: - _JAVA_OPTIONS: -Xmx18G -Xms1G - -cuffmerge: {mem: 8} -bio3d_pca: {mem: 64} -bio3d_rmsd: {mem: 64} -bio3d_rmsf: {mem: 64} - -docking: {runner: remote_cluster_mq_it01} -# prepare_ligands_for_docking: {runner: remote_cluster_mq_it01} -prepare_box: {runner: remote_cluster_mq_it01} - -cds_essential_variability: - env: - COPERNICUS_CDSAPIRC_KEY_FILE: /data/db/data_managers/COPERNICUS_CDSAPIRC_KEY_FILE - -kraken: {mem: 128} - -viz_overlay_moving_and_fixed_image: {mem: 12} -ip_projective_transformation: {mem: 24} -scale_image: {mem: 12} -re_he_maldi_image_registration: {mem: 48} -nanopolish_methylation: {cores: 10, mem: 12} -minimap2: {cores: 8, mem: 20} -flye: {cores: 20, mem: 24} -nanopolish_variants: {cores: 20, mem: 12} -nanopolish_eventalign: {cores: 20, mem: 12} -AccurateMassSearch: {cores: 4, mem: 8} -AdditiveSeries: {cores: 20, mem: 12} -augustus: {runner: remote_cluster_mq_be01} -BaselineFilter: {cores: 4, mem: 8} -CONVERTER_bedgraph_to_bigwig: {mem: 8} -CVInspector: {cores: 4, mem: 8} -CompNovo: {cores: 4, mem: 8} -CompNovoCID: {cores: 4, mem: 8} -ConsensusID: {cores: 1, mem: 58} -ConsensusMapNormalizer: {cores: 4, mem: 8} -DeMeanderize: {cores: 4, mem: 8} -Decharger: {cores: 4, mem: 8} -DecoyDatabase: {cores: 4, mem: 8} -Digestor: {cores: 4, mem: 8} -DigestorMotif: {cores: 4, mem: 8} -EICExtractor: {cores: 4, mem: 8} -'EMBOSS: fuzztran39': {mem: 10} -ERPairFinder: {cores: 4, mem: 8} -FFEval: {cores: 4, mem: 8} -FalseDiscoveryRate: {cores: 4, mem: 8} -FeatureFinderCentroided: {cores: 4, mem: 8} -FeatureFinderIsotopeWavelet: {cores: 4, mem: 8} -FeatureFinderMRM: {cores: 4, mem: 8} -FeatureFinderMetabo: {cores: 4, mem: 8} -FeatureFinderMultiplex: {cores: 8, mem: 8} -FeatureFinderSuperHirn: {cores: 4, mem: 8} -FeatureLinkerLabeled: {cores: 4, mem: 8} -FeatureLinkerUnlabeled: {cores: 4, mem: 8} -FeatureLinkerUnlabeledQT: {cores: 4, mem: 8} -FidoAdapter: {cores: 8, mem: 8} -FileConverter: {cores: 4, mem: 8} -FileFilter: {cores: 4, mem: 8} -FileInfo: {cores: 4, mem: 8} -FileMerger: {cores: 4, mem: 16} -HighResPrecursorMassCorrector: {cores: 4, mem: 8} -IDConflictResolver: {cores: 4, mem: 8} -IDEvaluator: {cores: 4, mem: 8} -IDExtractor: {cores: 4, mem: 8} -IDFileConverter: {cores: 4, mem: 8} -IDFilter: {cores: 4, mem: 8} -IDMapper: {cores: 4, mem: 8} -IDMassAccuracy: {cores: 4, mem: 8} -IDMerger: {cores: 1, mem: 30} -IDPosteriorErrorProbability: {cores: 4, mem: 8} -IDRTCalibration: {cores: 4, mem: 8} -IDSplitter: {cores: 4, mem: 8} -ITRAQAnalyzer: {cores: 4, mem: 8} -ImageCreator: {cores: 4, mem: 8} -InclusionExclusionListCreator: {cores: 4, mem: 8} -InternalCalibration: {cores: 4, mem: 8} -IsobaricAnalyzer: {cores: 4, mem: 8} -LabeledEval: {cores: 4, mem: 8} -maker: {cores: 8, mem: 8, runner: remote_cluster_mq_be01} -gmx_md: {runner: remote_cluster_mq_de02} -gmx_merge_topology_files: {runner: remote_cluster_mq_de02} -gmx_em: {runner: remote_cluster_mq_de02} -gmx_nvt: {runner: remote_cluster_mq_de02} -gmx_npt: {runner: remote_cluster_mq_de02} -gmx_setup: {runner: remote_cluster_mq_de02} -gmx_solvate: {runner: remote_cluster_mq_de02} -mdanalysis_hbonds: {runner: remote_cluster_mq_de02} -MRMMapper: {cores: 4, mem: 8} -MRMPairFinder: {cores: 4, mem: 8} -msgfplus: - cores: 10 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp - mem: 12 - name: special -MSGFPlusAdapter: - cores: 10 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp - mem: 12 - name: special -mass_spectrometry_imaging_segmentations: {mem: 92} -mass_spectrometry_imaging_combine: {mem: 12} -mass_spectrometry_imaging_mzplots: {mem: 24} -msi_ion_images: {mem: 92} -msi_spectra_plot: {mem: 210} -MSSimulator: {cores: 4, mem: 8} -MapAlignerIdentification: {cores: 4, mem: 8} -MapAlignerPoseClustering: {cores: 4, mem: 8} -MapAlignerSpectrum: {cores: 4, mem: 8} -MapAlignmentEvaluation: {cores: 4, mem: 8} -MapNormalizer: {cores: 4, mem: 8} -MapRTTransformer: {cores: 4, mem: 8} -MapStatistics: {cores: 4, mem: 8} -MassCalculator: {cores: 4, mem: 8} -MassTraceExtractor: {cores: 4, mem: 8} -MyriMatchAdapter: {cores: 4, mem: 8} -MzTabExporter: {cores: 4, mem: 8} -QCCalculator: {mem: 8} -OpenSwathWorkflow: {mem: 156} -mira_assembler: {mem: 24} -mothur_align_check: {cores: 8, env: {TERM: vt100}} -mothur_align_seqs: {cores: 8, env: {TERM: vt100}} -mothur_amova: {cores: 8, env: {TERM: vt100}} -mothur_anosim: {cores: 8, env: {TERM: vt100}} -mothur_bin_seqs: {cores: 8, env: {TERM: vt100}} -mothur_chimera_bellerophon: {cores: 8, env: {TERM: vt100}} -mothur_chimera_ccode: {cores: 8, env: {TERM: vt100}} -mothur_chimera_check: {cores: 8, env: {TERM: vt100}} -mothur_chimera_perseus: {cores: 8, env: {TERM: vt100}} -mothur_chimera_pintail: {cores: 8, env: {TERM: vt100}} -mothur_chimera_slayer: {cores: 8, env: {TERM: vt100}} -mothur_chimera_uchime: {cores: 8, env: {TERM: vt100}} -mothur_chop_seqs: {cores: 8, env: {TERM: vt100}} -mothur_classify_otu: {cores: 8, env: {TERM: vt100}} -mothur_classify_rf: {cores: 8, env: {TERM: vt100}} -mothur_classify_seqs: {cores: 8, mem: 8, env: {TERM: vt100}} -mothur_classify_tree: {cores: 8, env: {TERM: vt100}} -mothur_clearcut: {cores: 8, env: {TERM: vt100}} -mothur_cluster: {cores: 8, env: {TERM: vt100}} -mothur_cluster_classic: {cores: 8, env: {TERM: vt100}} -mothur_cluster_fragments: {cores: 8, env: {TERM: vt100}} -mothur_cluster_split: {cores: 8, env: {TERM: vt100}} -mothur_collect_shared: {cores: 8, env: {TERM: vt100}} -mothur_collect_single: {cores: 8, env: {TERM: vt100}} -mothur_consensus_seqs: {cores: 8, env: {TERM: vt100}} -mothur_cooccurrence: {cores: 8, env: {TERM: vt100}} -mothur_corr_axes: {cores: 8, env: {TERM: vt100}} -mothur_count_groups: {cores: 8, env: {TERM: vt100}} -mothur_count_seqs: {cores: 8, env: {TERM: vt100}} -mothur_create_database: {cores: 8, env: {TERM: vt100}} -mothur_degap_seqs: {cores: 8, env: {TERM: vt100}} -mothur_deunique_seqs: {cores: 8, env: {TERM: vt100}} -mothur_deunique_tree: {cores: 8, env: {TERM: vt100}} -mothur_dist_seqs: {cores: 8, env: {TERM: vt100}} -mothur_dist_shared: {cores: 8, env: {TERM: vt100}} -mothur_fastq_info: {cores: 8, env: {TERM: vt100}} -mothur_filter_seqs: {cores: 8, env: {TERM: vt100}} -mothur_filter_shared: {cores: 8, env: {TERM: vt100}} -mothur_get_communitytype: {cores: 8, env: {TERM: vt100}} -mothur_get_coremicrobiome: {cores: 8, env: {TERM: vt100}} -mothur_get_dists: {cores: 8, env: {TERM: vt100}} -mothur_get_group: {cores: 8, env: {TERM: vt100}} -mothur_get_groups: {cores: 8, env: {TERM: vt100}} -mothur_get_label: {cores: 8, env: {TERM: vt100}} -mothur_get_lineage: {cores: 8, env: {TERM: vt100}} -mothur_get_mimarkspackage: {cores: 8, env: {TERM: vt100}} -mothur_get_otulabels: {cores: 8, env: {TERM: vt100}} -mothur_get_otulist: {cores: 8, env: {TERM: vt100}} -mothur_get_oturep: {cores: 8, env: {TERM: vt100}} -mothur_get_otus: {cores: 8, env: {TERM: vt100}} -mothur_get_rabund: {cores: 8, env: {TERM: vt100}} -mothur_get_relabund: {cores: 8, env: {TERM: vt100}} -mothur_get_sabund: {cores: 8, env: {TERM: vt100}} -mothur_get_seqs: {cores: 8, env: {TERM: vt100}} -mothur_get_sharedseqs: {cores: 8, env: {TERM: vt100}} -mothur_hcluster: {cores: 8, env: {TERM: vt100}} -mothur_heatmap_bin: {cores: 8, env: {TERM: vt100}} -mothur_heatmap_sim: {cores: 8, env: {TERM: vt100}} -mothur_homova: {cores: 8, env: {TERM: vt100}} -mothur_indicator: {cores: 8, env: {TERM: vt100}} -mothur_lefse: {cores: 8, env: {TERM: vt100}} -mothur_libshuff: {cores: 8, env: {TERM: vt100}} -mothur_list_otulabels: {cores: 8, env: {TERM: vt100}} -mothur_list_seqs: {cores: 8, env: {TERM: vt100}} -mothur_make_biom: {cores: 8, env: {TERM: vt100}} -mothur_make_contigs: {cores: 8, env: {TERM: vt100}} -mothur_make_design: {cores: 8, env: {TERM: vt100}} -mothur_make_fastq: {cores: 8, env: {TERM: vt100}} -mothur_make_group: {cores: 8, env: {TERM: vt100}} -mothur_make_lefse: {cores: 8, env: {TERM: vt100}} -mothur_make_lookup: {cores: 8, env: {TERM: vt100}} -mothur_make_shared: {cores: 8, env: {TERM: vt100}} -mothur_make_sra: {cores: 8, env: {TERM: vt100}} -mothur_mantel: {cores: 8, env: {TERM: vt100}} -mothur_merge_files: {cores: 8, env: {TERM: vt100}} -mothur_merge_groups: {cores: 8, env: {TERM: vt100}} -mothur_merge_sfffiles: {cores: 8, env: {TERM: vt100}} -mothur_merge_taxsummary: {cores: 8, env: {TERM: vt100}} -mothur_metastats: {cores: 8, env: {TERM: vt100}} -mothur_mimarks_attributes: {cores: 8, env: {TERM: vt100}} -mothur_nmds: {cores: 8, env: {TERM: vt100}} -mothur_normalize_shared: {cores: 8, env: {TERM: vt100}} -mothur_otu_association: {cores: 8, env: {TERM: vt100}} -mothur_otu_hierarchy: {cores: 8, env: {TERM: vt100}} -mothur_pairwise_seqs: {cores: 8, env: {TERM: vt100}} -mothur_parse_list: {cores: 8, env: {TERM: vt100}} -mothur_parsimony: {cores: 8, env: {TERM: vt100}} -mothur_pca: {cores: 8, env: {TERM: vt100}} -mothur_pcoa: {cores: 8, env: {TERM: vt100}} -mothur_pcr_seqs: {cores: 8, env: {TERM: vt100}} -mothur_phylo_diversity: {cores: 8, env: {TERM: vt100}} -mothur_phylotype: {cores: 8, env: {TERM: vt100}} -mothur_pre_cluster: {cores: 8, env: {TERM: vt100}} -mothur_primer_design: {cores: 8, env: {TERM: vt100}} -mothur_rarefaction_shared: {cores: 8, env: {TERM: vt100}} -mothur_rarefaction_single: {cores: 8, env: {TERM: vt100}} -mothur_remove_dists: {cores: 8, env: {TERM: vt100}} -mothur_remove_groups: {cores: 8, env: {TERM: vt100}} -mothur_remove_lineage: {cores: 8, env: {TERM: vt100}} -mothur_remove_otulabels: {cores: 8, env: {TERM: vt100}} -mothur_remove_otus: {cores: 8, env: {TERM: vt100}} -mothur_remove_rare: {cores: 8, env: {TERM: vt100}} -mothur_remove_seqs: {cores: 8, env: {TERM: vt100}} -mothur_reverse_seqs: {cores: 8, env: {TERM: vt100}} -mothur_screen_seqs: {cores: 8, env: {TERM: vt100}} -mothur_sens_spec: {cores: 8, env: {TERM: vt100}} -mothur_seq_error: {cores: 8, env: {TERM: vt100}} -mothur_sffinfo: {cores: 8, env: {TERM: vt100}} -mothur_shhh_flows: {cores: 8, env: {TERM: vt100}} -mothur_shhh_seqs: {cores: 8, env: {TERM: vt100}} -mothur_sort_seqs: {cores: 8, env: {TERM: vt100}} -mothur_split_abund: {cores: 8, env: {TERM: vt100}} -mothur_split_groups: {cores: 8, env: {TERM: vt100}} -mothur_sub_sample: {cores: 8, env: {TERM: vt100}} -mothur_summary_qual: {cores: 8, env: {TERM: vt100}} -mothur_summary_seqs: {cores: 8, env: {TERM: vt100}} -mothur_summary_shared: {cores: 8, env: {TERM: vt100}} -mothur_summary_single: {cores: 8, env: {TERM: vt100}} -mothur_summary_tax: {cores: 8, env: {TERM: vt100}} -mothur_tree_shared: {cores: 8, env: {TERM: vt100}} -mothur_trim_flows: {cores: 8, env: {TERM: vt100}} -mothur_trim_seqs: {cores: 8, env: {TERM: vt100}} -mothur_unifrac_unweighted: {cores: 8, env: {TERM: vt100}} -mothur_unifrac_weighted: {cores: 8, env: {TERM: vt100}} -mothur_unique_seqs: {cores: 8, env: {TERM: vt100}} -mothur_venn: {cores: 8, env: {TERM: vt100}} - -NSPDK_candidateClust: {mem: 32} -NoiseFilterGaussian: {cores: 4, mem: 8} -NoiseFilterSGolay: {cores: 4, mem: 8} -OMSSAAdapter: {cores: 4, mem: 8} -OpenSwathAnalyzer: {cores: 4, mem: 8} -OpenSwathChromatogramExtractor: {cores: 4, mem: 8} -OpenSwathConfidenceScoring: {cores: 4, mem: 8} -OpenSwathDIAPreScoring: {cores: 4, mem: 8} -OpenSwathDecoyGenerator: {cores: 4, mem: 8} -OpenSwathFeatureXMLToTSV: {cores: 4, mem: 8} -OpenSwathRTNormalizer: {cores: 4, mem: 8} -OpenSwathRewriteToFeatureXML: {cores: 4, mem: 8} -PTModel: {cores: 4, mem: 8} -PTPredict: {cores: 4, mem: 8} -PeakPickerHiRes: {cores: 4, mem: 8} -PeakPickerWavelet: {cores: 4, mem: 8} -PepNovoAdapter: {cores: 4, mem: 8} -PeptideIndexer: {cores: 4, mem: 8} -PicardASMetrics: {mem: 12} -PicardGCBiasMetrics: {mem: 12} -PicardHsMetrics: {mem: 12} -PicardInsertSize: {mem: 12} -picard_CleanSam: {mem: 12} -PrecursorIonSelector: {cores: 4, mem: 8} -PrecursorMassCorrector: {cores: 4, mem: 8} -ProteinInference: {cores: 4, mem: 8} -ProteinQuantifier: {cores: 4, mem: 8} -ProteinResolver: {cores: 4, mem: 8} -RNPxl: {cores: 4, mem: 8} -RTEvaluation: {cores: 4, mem: 8} -RTModel: {cores: 4, mem: 8} -RTPredict: {cores: 4, mem: 8} -Resampler: {cores: 4, mem: 8} -SeedListGenerator: {cores: 4, mem: 8} -SemanticValidator: {cores: 4, mem: 8} -SequenceCoverageCalculator: {cores: 4, mem: 8} -SpecLibCreator: {cores: 4, mem: 8} -SpecLibSearcher: {cores: 4, mem: 8} -SpectraFilterBernNorm: {cores: 4, mem: 8} -SpectraFilterMarkerMower: {cores: 4, mem: 8} -SpectraFilterNLargest: {cores: 4, mem: 8} -SpectraFilterNormalizer: {cores: 4, mem: 8} -SpectraFilterParentPeakMower: {cores: 4, mem: 8} -SpectraFilterScaler: {cores: 4, mem: 8} -SpectraFilterSqrtMower: {cores: 4, mem: 8} -SpectraFilterThresholdMower: {cores: 4, mem: 8} -SpectraFilterWindowMower: {cores: 4, mem: 8} -SpectraMerger: {cores: 4, mem: 8} -TMTAnalyzer: {cores: 4, mem: 8} -TOFCalibration: {cores: 4, mem: 8} -TextExporter: {cores: 4, mem: 8} -TransformationEvaluation: {cores: 4, mem: 8} -XMLValidator: {cores: 4, mem: 8} -XTandemAdapter: {cores: 4, mem: 8} -abims_xcms_fillPeaks: { mem: 32} -abims_xcms_retcor: {mem: 32} -abims_CAMERA_annotateDiffreport: {mem: 32} -antismash: - cores: 12 - mem: 120 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx96G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -bam-to-wig: {mem: 20} -bamCompare_deepTools: {mem: 10} -bamCorrelate_deepTools: {cores: 12, mem: 120} -bamCoverage_deepTools: {mem: 10} -bamFingerprint: {mem: 10} -bedtools_intersectbed: {mem: 20} -bedtools_intersectbed_bam: {mem: 20} -bedtools_mergebed: {mem: 15} -bedtools_sortbed: {mem: 20} -bedtools_subtractbed: {mem: 8} -bfast_wrapper: {cores: 12, mem: 20} -bg_diamond: {cores: 6, mem: 90} -bg_diamond_makedb: {cores: 12, mem: 40} -bgchem_fragment_merger: {cores: 12, mem: 40} -bigwig_to_bedgraph: {mem: 12} -biosigner: {mem: 12} -bismark_bowtie: {cores: 6, mem: 30, name: bismark, tmp: large} -bismark_bowtie2: {cores: 6, mem: 30, name: bismark, tmp: large} -blast_parser: {mem: 8} -blast2go: {mem: 20} -blockbuster: {mem: 64} -blockclust: {mem: 10} -bowtie2: {cores: 8, mem: 20} -bwa: {cores: 8, mem: 20} -bwa_mem: {cores: 8, mem: 30} -bwa_mem_index_builder_data_manager: {mem: 48} -bwameth_index_builder_data_manager: {mem: 48} -bwameth: {cores: 8, mem: 24} -bwtool-lift: {mem: 80} -canu: {cores: 20, mem: 64} -mass_spectrometry_imaging_preprocessing: {mem: 110} -mass_spectrometry_imaging_ion_images: {mem: 20} -mass_spectrometry_imaging_qc: {mem: 110} -mass_spectrometry_imaging_filtering: {mem: 20} -metaspades: {cores: 10, mem: 250} -megahit: {cores: 10, mem: 110} -charts: {mem: 10} -circgraph: {mem: 10} -computeMatrix: {mem: 80} -correctGCBias: {mem: 10} -cshl_fastx_collapser: {mem: 16} -create_tool_recommendation_model: {mem: 160} -crispr_recognition_tool: {mem: 10} -ctb_np-likeness-calculator: {mem: 12} -ctb_online_data_fetch: {mem: 10} -ctb_openmg: - mem: 20 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx15G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -ctb_pubchem_download_as_smiles: {cores: 12, mem: 20} -cuffcompare: {mem: 10} -cuffdiff: {cores: 6, mem: 40} -cufflinks: {cores: 6, mem: 30} -cuffnorm: {cores: 6, mem: 20} -cuffquant: {cores: 6, mem: 20} -data_manager_gemini_download: {mem: 20} -data_manager_humann2_download: {mem: 25} -data_manager_snpeff_databases: {mem: 12} -data_manager_snpeff_download: {mem: 12} -deeptools_bamCompare: {cores: 12, mem: 12} -deeptools_bamCorrelate: {cores: 12, mem: 90} -deeptools_bamCoverage: {cores: 12, mem: 48} -deeptools_bamFingerprint: {cores: 12, mem: 12} -deeptools_bam_compare: {cores: 12, mem: 24} -deeptools_bam_coverage: {cores: 12, mem: 24} -deeptools_bam_pe_fragmentsize: {cores: 12, mem: 24} -deeptools_bigwigCompare: {cores: 12, mem: 12} -deeptools_bigwigCorrelate: {cores: 12, mem: 40} -deeptools_bigwig_compare: {cores: 12, mem: 24} -deeptools_computeGCBias: {cores: 12, mem: 24} -deeptools_computeMatrix: {cores: 12, mem: 24} -deeptools_compute_gc_bias: {cores: 12, mem: 24} -deeptools_compute_matrix: {cores: 12, mem: 30} -deeptools_correctGCBias: {cores: 12, mem: 24} -deeptools_correct_gc_bias: {cores: 12, mem: 24} -deeptools_heatmapper: {mem: 25} -deeptools_multi_bam_summary: {cores: 12, mem: 24} -deeptools_multi_bigwig_summary: {cores: 12, mem: 40} -deeptools_plot_correlation: {mem: 20} -deeptools_plot_coverage: {mem: 20} -deeptools_plot_fingerprint: {mem: 20} -deeptools_plot_heatmap: {mem: 25} -deeptools_plot_pca: {mem: 20} -deeptools_plot_profile: {mem: 20} -deeptools_profiler: {mem: 20} -deseq2: {mem: 8} -dexseq_count: {mem: 25} -diamond_database_builder: {cores: 12, mem: 90} -dt_profiler: {mem: 10} -eukaryotic_ncbi_submission: {cores: 24, mem: 24} -fastq_dump: {mem: 20} -fastqc: {mem: 20} -featurecounts: {cores: 8, mem: 18} -feebayes: {cores: 12, mem: 12} -flashlfq: - env: - MONO_GC_PARAMS: max-heap-size=2g - mem: 32 -flexbar: {cores: 12, mem: 12} -flexbar_no_split: {cores: 12, mem: 12} -flexbar_split_RR_bcs: {cores: 12, mem: 12} -flexbar_split_RYYR_bcs: {cores: 12, mem: 12} -freebayes: {cores: 12, mem: 12} -gatk2_base_recalibrator: - cores: 12 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx8G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 24 - name: _special -gatk2_depth_of_coverage: {cores: 12, mem: 24} -gatk2_haplotype_caller: {cores: 12, mem: 24} -gatk2_indel_realigner: {mem: 10} -gatk2_print_reads: {cores: 12, mem: 24} -gatk2_realigner_target_creator: {cores: 12, mem: 24} -gatk2_reduce_reads: - cores: 12 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx8G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 24 - name: _special -gatk2_unified_genotyper: {cores: 12, mem: 24} -gatk2_variant_annotator: {cores: 12, mem: 24} -gatk2_variant_apply_recalibration: {cores: 12, mem: 24} -gatk2_variant_combine: {cores: 12, mem: 24} -gatk2_variant_eval: {cores: 12, mem: 24} -gatk2_variant_filtration: {mem: 10} -gatk2_variant_recalibrator: {cores: 12, mem: 24} -gatk2_variant_select: {cores: 12, mem: 24} -gatk2_variant_validate: {cores: 12, mem: 24} -gatk_picard_index_builder: {mem: 12} -gemini_load: {cores: 12, mem: 40} - -# Permissions -#gemini_query: -# permissions: -# allow: -# roles: -# - admin - -ggplot2_heatmap2: {mem: 24} -graphprot_predict_profile: {mem: 8} -# this tool was developed by IGC Bioinformatics Unit and Daniel Sobral from ELIXIR-PT -goenrichment: {runner: remote_cluster_mq_pt01} -hammock_1.0: - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx15G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 20 - name: java_temp -heatmapper: {mem: 25} -heatmapper_deepTools: {mem: 25} -hicexplorer_hicbuildmatrix: {cores: 10, mem: 120} -hicexplorer_hiccorrectmatrix: {mem: 64} -hicexplorer_hiccorrelate: {mem: 20} -hicexplorer_hicfindtads: {mem: 20} -hicexplorer_hicplotmatrix: {mem: 210} -hicexplorer_hicplottads: {mem: 20} -hicexplorer_hicsummatrices: {mem: 65} -hicexplorer_hicpca: {cores: 12, mem: 60} -hicexplorer_hicmergematrixbins: {mem: 80} -hicexplorer_hictransform: {cores: 12, mem: 60} -hicexplorer_hicplotviewpoint: {mem: 12} -hicexplorer_hicaggregatecontacts: {mem: 12} -hicup_mapper: {cores: 6, mem: 24} -hisat: {cores: 12, mem: 20} -hisat2: {cores: 8, mem: 20, runner: condor_singularity_with_conda} -#hisat2: {cores: 8, mem: 20} - -hisat2_index_builder_data_manager: {cores: 12, mem: 180} -hmmer_hmmsearch: {mem: 10} -htseq_count: {mem: 32} -humann2: {cores: 6, mem: 90} -infernal_cmbuild: {cores: 10, mem: 20} -infernal_cmsearch: {cores: 10, mem: 20} -interproscan: - env: - PATH: $PATH:/data/0/interproscan/interproscan-5.36-75.0/ - mem: 40 - cores: 6 -iqtree: {cores: 12} -iterative_map_pipeline: {mem: 60} -je_clip: {cores: 8} -je_demultiplex: {cores: 8, mem: 20} -je_demultiplex_illu: {cores: 8} -je_markdupes: {cores: 8} -join1: {mem: 18} -bamFilter: {runner: remote_cluster_mq_de01} -jq: {runner: remote_cluster_mq_pt01} -kallisto_quant: {cores: 8, mem: 20} -## uha, with the reference.fa file from the history it is not working? -## kallisto_quant: {cores: 8, mem: 20_singularity} -lastz_wrapper_2: {mem: 8} -limma_voom: {mem: 4} -macs2_bdgdiff: {mem: 10} -macs2_callpeak: {mem: 15} -maldi_quant_preprocessing: {mem: 400} -maldi_quant_peak_detection: {mem: 180} -mass_spectrometry_imaging_classification: {mem: 128} -megablast_wrapper: {mem: 20} -meme_dreme: {mem: 16} -meme_fimo: {mem: 4} -merge_pcr_duplicates.py: {mem: 4} -methtools_calling: {cores: 12, mem: 40} -methtools_filter: {mem: 10} -methtools_plot: {cores: 12, mem: 20} -metilene: {cores: 12, mem: 20} -mimodd_varcall: {cores: 6} -minced: {mem: 10} -migmap: - env: - _JAVA_OPTIONS: -XX:MaxPermSize=6G -Xmx90G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 96 - name: java_temp -proteomics_moff: {mem: 20, cores: 6} -morpheus: - env: - MONO_GC_PARAMS: max-heap-size=2g - mem: 64 -msaboot: {mem: 6} -naive_variant_caller: {mem: 20} -ncbi_makeblastdb: {mem: 20} -nspdk_sparse: {mem: 16} -numeric_clustering: {mem: 12} -peakachu: {mem: 16} -peptide_shaker: - cores: 12 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx25G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 32 - name: _special -picard_ARRG: {mem: 12} -picard_AddOrReplaceReadGroups: {mem: 12} -picard_BamIndexStats: {mem: 12} -picard_CASM: {mem: 12} -picard_CollectInsertSizeMetrics: {mem: 12} -picard_CollectRnaSeqMetrics: {mem: 12} -picard_DownsampleSam: {mem: 12} -picard_EstimateLibraryComplexity: {mem: 12} -picard_NormalizeFasta: {mem: 12} -picard_FilterSamReads: - env: - TMP_DIR: $TMPDIR - mem: 12 - tmp: large -picard_FixMateInformation: {mem: 12} -picard_FastqToSam: {mem: 12} -picard_MarkDuplicates: {mem: 12} -picard_MergeSamFiles: {mem: 12} -picard_QualityScoreDistribution: {mem: 12} -picard_ReorderSam: {mem: 12} -picard_ReplaceSamHeader: {mem: 12} -picard_SamToFastq: {mem: 12} -picard_SortSam: - env: - _JAVA_OPTIONS: -Xmx4G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 10 - name: java_temp -picard_index_builder_data_manager: {mem: 12} -piranha: {mem: 15} -preproc: {mem: 10} -prokaryotic_ncbi_submission: {cores: 24, mem: 24} -prokka: - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx15G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 20 -proteomics_search_msgfplus_1: {mem: 10} -pureclip: {mem: 32, cores: 2} -quality_metrics: {mem: 12} -r_correlation_matrix: {mem: 80} -racon: {cores: 4, mem: 12} -rbc_mirdeep2_mapper: {cores: 12, mem: 20} -rcas: {cores: 4, mem: 16} -reactome_pathwaymatcher: - mem: 20 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx17G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -rgEstLibComp: {mem: 12} -rgPicFixMate: {mem: 12} -rgPicardMarkDups: {mem: 12} -rm_spurious_events.py: {mem: 4} -rna_star: {cores: 12, mem: 140} -rna_starsolo: {cores: 4, mem: 40} -rna_star_index_builder_data_manager: - cores: 12 - mem: 100 - params: - local_slots: 6 -rnbeads: {mem: 20} -rsem_calculate_expression: {mem: 16} -rseqc_bam2wig: {cores: 8, mem: 16} -sailfish: {cores: 6, mem: 70} -salmon: {cores: 6, mem: 70} -sam_merge2: - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx15G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 32 -#samtools_stats: {runner: remote_cluster_mq_de01} -search_gui: - cores: 12 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp - mem: 12 - name: special -secretbt2test: {cores: 12, mem: 20} -segemehl: {mem: 80} -seq_filter_by_mapping: {mem: 8} -shovill: - cores: 4 - mem: 50 - env: - SHOVILL_RAM: 50 -signalp3: {mem: 10} -sklearn_searchcv: {cores: 12, mem: 16} -smooth_running_window: {mem: 32} -snippy: {cores: 4, mem: 12, runner: remote_cluster_mq_au01} -#snippy: {mem: 12} -snpEff: {mem: 12} -snpEff_databases: {mem: 12} -snpEff_download: {mem: 12} -snpEff_get_chr_names: {mem: 12} -snpEff_build_gb: - mem: 48 - env: - _JAVA_OPTIONS: -Xmx48G -Xms1G -snpSift_annotate: {mem: 12} -snpSift_caseControl: {mem: 12} -snpSift_filter: {mem: 18} -snpSift_geneSets: {mem: 12} -snpSift_int: {mem: 12} -spades: {cores: 12, mem: 512} -sshmm: {mem: 16} -structurefold: {mem: 12} -rnaspades: {cores: 12, mem: 90} -stringtie: {mem: 25} -t_coffee: - env: - DIR_4_TCOFFEE: $TMP - TMP_4_TCOFFEE: $TMP - CACHE_4_TCOFFEE: $TMP -tophat2: {cores: 12, mem: 90} -tp_easyjoin_tool: {mem: 12} -tp_multijoin_tool: {mem: 8} -tp_uniq_tool: {mem: 12} -trimmomatic: - cores: 6 - mem: 12 - name: special - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp -trinity: - cores: 4 - mem: 220 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx170G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp - name: special -umi_tools_group: {mem: 12} -umi_tools_dedup: {mem: 12} -umi_tools_extract: {mem: 4} -unicycler: - cores: 12 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp - TERM: vt100 - mem: 80 - name: special -valet: {cores: 8, mem: 20} -varscan_somatic: {cores: 8, mem: 12} -varscan_mpileup: {mem: 12} -varscan_copynumber: {mem: 12} -velvetg: - cores: 4 - env: - OMP_NUM_THREADS: 4 - OMP_THREAD_LIMIT: 4 - mem: 32 - name: _velvetg -velveth: - cores: 4 - env: - OMP_NUM_THREADS: 4 - OMP_THREAD_LIMIT: 4 - mem: 16 - name: _velveth -vsearch_search: {mem: 80} - - - -# Some admin tools -echo_main_env: - permissions: - allow: - roles: - - admin - -#tp_awk_tool: -# runner: condor_docker -# permissions: -# allow: -# roles: -# - admin - -#tp_replace_in_column: -# permissions: -# allow: -# roles: -# - admin - -# permissions testing. -_test_permissions_0: {} - -_test_permissions_1: - permissions: - allow: - users: - - b@example.com - -_test_permissions_2: - permissions: - allow: - users: - - a@example.com - roles: - - role-b diff --git a/files/galaxy-test/dynamic_rules/usegalaxy/wig2bigwig.py b/files/galaxy-test/dynamic_rules/usegalaxy/wig2bigwig.py deleted file mode 100644 index a98d0c180..000000000 --- a/files/galaxy-test/dynamic_rules/usegalaxy/wig2bigwig.py +++ /dev/null @@ -1,19 +0,0 @@ -from galaxy.jobs import JobDestination -import os - -def wig_to_bigwig( job, tool ): - # wig_to_bigwig needs a lot of memory if the input file is big - inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] ) - inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] ) - wig_file = inp_data[ "input1" ].file_name - wig_file_size = os.path.getsize(wig_file) / (1024*1024.0) - - # according to http://genome.ucsc.edu/goldenpath/help/bigWig.html - # wig2bigwig uses a lot of memory; somewhere on the order of 1.5 times more memory than the uncompressed wiggle input file - required_memory = min(max(wig_file_size * 3.0, 16 * 1024), 250*1024) # our biggest memory node has 256GB memory - params = {} - # params["nativeSpecification"] = """ -q galaxy1.q,all.q -p -128 -l galaxy1_slots=1 -l h_vmem=%sM -v _JAVA_OPTIONS -v TEMP -v TMPDIR -v PATH -v PYTHONPATH -v LD_LIBRARY_PATH -v XAPPLRESDIR -v GDFONTPATH -v GNUPLOT_DEFAULT_GDFONT -v MPLCONFIGDIR -soft -l galaxy1_dedicated=1 """ % (required_memory) - params['request_memory'] = required_memory / 1024 - params['requirements'] = '(GalaxyGroup == "compute")' - - return JobDestination(id="wig_to_bigwig_job_destination", runner="condor", params=params) diff --git a/files/galaxy-test/tpv/destinations.yml b/files/galaxy-test/tpv/destinations.yml new file mode 100644 index 000000000..5beff16e1 --- /dev/null +++ b/files/galaxy-test/tpv/destinations.yml @@ -0,0 +1,259 @@ +--- +# NOTE: Use dashes (-) exclusively for tags and underscores (_) exclusively for destinations. +# submit_request_cpus its called in pulsar and in plain condor only request_cpus +destinations: + ###################### + # BASIC DESTINATIONS # + ###################### + basic_docker_destination: + abstract: true + params: + docker_enabled: true + docker_sudo: false + docker_net: bridge + docker_auto_rm: true + docker_set_user: "" + docker_volumes: "$_CONDOR_SCRATCH_DIR:rw,$default,$job_directory:rw,$tool_directory:ro,/cvmfs/data.galaxyproject.org:ro" + require_container: true + submit_request_cpus: "{cores}" + submit_request_memory: "{mem}G" + outputs_to_working_directory: false + container_monitor_result: callback + submit_requirements: "GalaxyDockerHack == True" + + basic_singularity_destination: + abstract: true + params: + submit_request_cpus: "{cores}" + submit_request_memory: "{mem}G" + singularity_enabled: true + singularity_volumes: "$_CONDOR_SCRATCH_DIR:rw,$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw,/data/db/:ro,/data/dnb01/galaxy_db/:ro,/data/dnb02/galaxy_db/:ro,/data/dnb03/galaxy_db/:ro,/data/dnb05/galaxy_db/:ro,/data/dnb06/galaxy_db/:rw,/data/dnb07/galaxy_db/:rw,/data/dnb08/galaxy_db/:rw,/data/dp01/galaxy_db/:rw,/data/0/galaxy_db/:ro,/data/1/galaxy_db/:ro,/data/2/galaxy_db/:ro,/data/3/galaxy_db/:ro,/data/4/galaxy_db/:ro,/data/5/galaxy_import/galaxy_user_data/:ro,/data/6/galaxy_db/:ro,/data/7/galaxy_db/:ro,/usr/local/tools/:ro" + singularity_default_container_id: "/cvmfs/singularity.galaxyproject.org/all/centos:8.3.2011" + + ################################ + # EMBEDDED PULSAR DESTINATIONS # + ################################ + + interactive_pulsar: + inherits: basic_docker_destination + runner: pulsar_embedded + max_accepted_cores: 24 + max_accepted_mem: 128 + scheduling: + accept: + - docker + require: + - interactive + + interactive_pulsar_gpu: + inherits: interactive_pulsar + max_accepted_gpus: 1 + env: + GPU_AVAILABLE: "1" + params: + requirements: 'GalaxyGroup == "compute_gpu"' + scheduling: + require: + - gpu + + embedded_pulsar_docker: + inherits: basic_docker_destination + runner: pulsar_embedded + max_accepted_cores: 24 + max_accepted_mem: 128 + scheduling: + require: + - docker + - embedded-pulsar + + embedded_pulsar_docker_gpu: + inherits: embedded_pulsar_docker + max_accepted_gpus: 1 + env: + GPU_AVAILABLE: "1" + params: + requirements: 'GalaxyGroup == "compute_gpu"' + scheduling: + require: + - gpu + + ####################### + # PULSAR DESTINATIONS # + ####################### + + pulsar_default: # use for remote Pulsar nodes and ALWAYS overwrite the runner. + inherits: basic_singularity_destination + abstract: true + runner: pulsar_embedded + env: + LC_ALL: C + SINGULARITY_CACHEDIR: /data/share/var/database/container_cache # On the NFS share on remote Pulsar side + params: + jobs_directory: /data/share/staging + transport: curl + remote_metadata: "false" + metadata_strategy: directory + default_file_action: remote_transfer + rewrite_parameters: "true" + persistence_directory: /data/share/persisted_data + outputs_to_working_directory: "false" + dependency_resolution: "none" + submit_request_cpus: "{cores}" + submit_request_memory: "{mem}" + docker_volumes: "$default,$job_directory:rw,$tool_directory:ro,/cvmfs/data.galaxyproject.org:ro" + scheduling: + require: + - pulsar + + pulsar_mira_tpv: + inherits: pulsar_default + runner: pulsar_mira_runner + max_accepted_cores: 8 + max_accepted_mem: 15 + scheduling: + require: + - mira-pulsar + accept: + - docker + - condor-tpv + - singularity + + pulsar_sanjay_tpv: + inherits: pulsar_default + runner: pulsar_sanjay_runner + max_accepted_cores: 8 + max_accepted_mem: 15 + scheduling: + require: + - sanjay-pulsar + accept: + - docker + - condor-tpv + - singularity + + pulsar_sk01_tpv: + inherits: pulsar_default + runner: pulsar_eu_sk01 + max_accepted_cores: 8 + max_accepted_mem: 16 + scheduling: + require: + - sk-pulsar + accept: + - docker + - condor-tpv + - singularity + + pulsar_it_tpv: + inherits: pulsar_default + runner: pulsar_eu_it01 + max_accepted_cores: 16 + max_accepted_mem: 31 + scheduling: + require: + - it-pulsar + accept: + - docker + - condor-tpv + - singularity + + pulsar_fr01_tpv: + runner: pulsar_eu_fr01 + inherits: pulsar_default + max_accepted_cores: 8 + max_accepted_mem: 63 + scheduling: + require: + - fr-pulsar + accept: + - docker + - condor-tpv + - singularity + + pulsar_be_tpv: + inherits: pulsar_default + runner: pulsar_eu_be01 + max_accepted_cores: 8 + max_accepted_mem: 15 + scheduling: + require: + - be-pulsar + accept: + - docker + - condor-tpv + - singularity + + ############################# + # LOCAL CONDOR DESTINATIONS # + ############################# + + condor_docker: + inherits: basic_docker_destination + runner: condor + max_accepted_cores: 36 + max_accepted_mem: 975 + scheduling: + require: + - docker + + condor_singularity: + inherits: basic_singularity_destination + runner: condor + max_accepted_cores: 24 + max_accepted_mem: 128 + params: + scheduling: + prefer: + - singularity + require: + - offline + + # Generic destination for tools that don't get any params + # and no specified dependency resolution + condor_tpv: + runner: condor + max_accepted_cores: 64 + max_accepted_mem: 1000 + scheduling: + prefer: + - condor-tpv + + condor_singularity_with_conda: + inherits: basic_singularity_destination + runner: condor + max_accepted_cores: 64 + max_accepted_mem: 1000 + params: + container_override: + - type: singularity + shell: "/bin/bash" + resolve_dependencies: true + identifier: "/data/0/singularity_base_images/centos:8.3.2011" + scheduling: + require: + - singularity + - conda + + condor_upload: + runner: condor + max_accepted_cores: 20 + max_accepted_mem: 10 + params: + requirements: "GalaxyTraining == false" + rank: 'GalaxyGroup == "upload"' + scheduling: + require: + - upload + + condor_gpu: + runner: condor + max_accepted_cores: 8 + max_accepted_mem: 16 + max_accepted_gpus: 1 + env: + GPU_AVAILABLE: 1 + params: + requirements: 'GalaxyGroup == "compute_gpu"' + scheduling: + require: + - gpu diff --git a/files/galaxy/dynamic_rules/usegalaxy/total_perspective_vortex/interactive_tools.yml b/files/galaxy-test/tpv/interactive_tools.yml similarity index 90% rename from files/galaxy/dynamic_rules/usegalaxy/total_perspective_vortex/interactive_tools.yml rename to files/galaxy-test/tpv/interactive_tools.yml index 09f619b49..a125f01ef 100644 --- a/files/galaxy/dynamic_rules/usegalaxy/total_perspective_vortex/interactive_tools.yml +++ b/files/galaxy-test/tpv/interactive_tools.yml @@ -4,15 +4,7 @@ tools: cores: 1 mem: 4 params: - runner: pulsar_embedded - tmp_dir: true - docker_enabled: true docker_volumes: $defaults - docker_sudo: false - docker_net: bridge - docker_auto_rm: true - docker_set_user: '' - require_container: true container_monitor_result: callback submit_requirements: 'GalaxyDockerHack == True && GalaxyGroup == "interactive"' scheduling: @@ -24,15 +16,7 @@ tools: cores: 1 mem: 4 params: - runner: pulsar_embedded - tmp_dir: true - docker_enabled: true docker_volumes: $defaults - docker_sudo: false - docker_net: bridge - docker_auto_rm: true - docker_set_user: '' - require_container: true container_monitor_result: callback submit_requirements: 'GalaxyDockerHack == True' rules: @@ -50,6 +34,12 @@ tools: inherits: interactive_tool cores: 1 mem: 4 + interactive_tool_blobtoolkit: + inherits: interactive_tool + cores: 1 + mem: 4 + env: + TEMP: /data/1/galaxy_db/tmp interactive_tool_mgnify_notebook: inherits: interactive_tool cores: 1 @@ -173,9 +163,12 @@ tools: inherits: interactive_tool_gpu gpus: 1 cores: 1 - mem: 8 + mem: 18 params: docker_run_extra_arguments: " --gpus all" + scheduling: + require: + - docker #interactive_tool_jupyter_notebook: # inherits: interactive_tool diff --git a/files/galaxy/dynamic_rules/usegalaxy/total_perspective_vortex/roles.yml b/files/galaxy-test/tpv/roles.yml similarity index 100% rename from files/galaxy/dynamic_rules/usegalaxy/total_perspective_vortex/roles.yml rename to files/galaxy-test/tpv/roles.yml diff --git a/files/galaxy-test/tpv/tool_defaults.yml b/files/galaxy-test/tpv/tool_defaults.yml new file mode 100644 index 000000000..1c5dfe7a5 --- /dev/null +++ b/files/galaxy-test/tpv/tool_defaults.yml @@ -0,0 +1,35 @@ +# ALL tags must be with dashes (-) instead of underscores (_) +global: + default_inherits: default +tools: + default: + cores: 1 + mem: cores * 3.8 + gpus: 0 + env: {} + params: + metadata_strategy: 'extended' + tmp_dir: true + request_cpus: "{cores}" + request_memory: "{mem}G" + submit_request_gpus: "{gpus or 0}" + docker_memory: "{mem}G" + scheduling: + reject: + - offline + rules: + - if: user is not None + execute: | + training_roles = [r.name for r in user.all_roles() if not r.deleted and "training" in r.name] + training_expr = " || ".join(['(GalaxyGroup == "%s")' % role for role in training_roles]) + training_labels = '"' + ", ".join(training_roles) + '"' + entity.params['requirements'] = '(GalaxyGroup == "compute") || (%s)' % training_expr if training_expr else '(GalaxyGroup == "compute")' + entity.params['+Group'] = training_labels + - id: gpu_tools_to_condor_gpu + if: entity.gpus > 0 + scheduling: + require: + - gpu + rank: | + final_destinations = helpers.weighted_random_sampling(candidate_destinations) + final_destinations diff --git a/files/galaxy/dynamic_rules/usegalaxy/total_perspective_vortex/tools.yml b/files/galaxy-test/tpv/tools.yml similarity index 65% rename from files/galaxy/dynamic_rules/usegalaxy/total_perspective_vortex/tools.yml rename to files/galaxy-test/tpv/tools.yml index 95a52dbc3..4fb57dee2 100644 --- a/files/galaxy/dynamic_rules/usegalaxy/total_perspective_vortex/tools.yml +++ b/files/galaxy-test/tpv/tools.yml @@ -1,33 +1,72 @@ --- # ALL tags must be with dashes (-) instead of underscores (_) -global: - default_inherits: default tools: - default: - cores: 2 - mem: cores * 3.8 + __DATA_FETCH__: + cores: 1 + mem: 3 gpus: 0 - env: {} - params: - tmp_dir: true - request_cpus: "{cores}" - request_memory: "{mem}G" - submit_request_gpus: "{gpus or 0}" - docker_memory: "{mem}G" scheduling: - reject: - - offline - rules: - - if: user is not None - execute: | - training_roles = [r.name for r in user.all_roles() if not r.deleted and "training" in r.name] - training_expr = " || ".join(['(GalaxyGroup == "%s")' % role for role in training_roles]) - training_labels = '"' + ", ".join(training_roles) + '"' - entity.params['requirements'] = '(GalaxyGroup == "compute") || (%s)' % training_expr if training_expr else '(GalaxyGroup == "compute")' - entity.params['+Group'] = training_labels - rank: | - final_destinations = helpers.weighted_random_sampling(candidate_destinations) - final_destinations + require: + - upload + env: + TEMP: /data/1/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/chemteam/gmx_sim/gmx_sim/.*: + gpus: 1 + + toolshed.g2.bx.psu.edu/repos/chemteam/gmx_sim/gmx_sim/2019.1.5.1: + gpus: 1 + + toolshed.g2.bx.psu.edu/repos/chemteam/gmx_sim/gmx_sim/2020.4+galaxy0: + gpus: 1 + + toolshed.g2.bx.psu.edu/repos/bgruening/hifiasm/hifiasm/.*: + # The memory requirement of Hifiasm depends on a wrapper's input + mem: | + from math import ceil + + parameters = {p.name: p.value for p in job.parameters} + parameters = tool.params_from_strings(parameters, app) + + advanced_options = parameters.get("advanced_options", dict()) + + kcov_default = 36 + kcov = advanced_options.get("kcov", kcov_default) + + hg_size = advanced_options.get("hg_size", "") + + value = 0 + if hg_size: + conversion_factors = { + "k": 1000000, + "M": 1000, + "G": 1, + } + conversion_factors = { + key.lower(): value for key, value in conversion_factors.items() + } + suffix = hg_size[-1:].lower() + value = hg_size[:len(hg_size) - 1] + value = value.replace(",", ".") + value = float(value) + # compute hg size in Gb + value = value / conversion_factors[suffix.lower()] + value = ceil(value * (kcov * 2) * 1.75) + + # return the amount of memory needed + value + + keras_train_and_eval: + # Type of compute resource (CPU or GPU) for keras_train_eval tool depends on user's input from its wrapper. + # Default resource is CPU. + gpus: | + parameters = {p.name: p.value for p in job.parameters} + parameters = tool.params_from_strings(parameters, app) + + gpus = int( + parameters.get("__job_resource", dict()).get("gpu", '0') + ) + gpus toolshed.g2.bx.psu.edu/repos/iuc/snippy/snippy/.*: cores: 2 @@ -41,7 +80,13 @@ tools: toolshed.g2.bx.psu.edu/repos/iuc/enasearch_search_data/enasearch_search_data/.*: scheduling: require: - - condor-singularity-with-conda-python2 + - conda + - singularity + + toolshed.g2.bx.psu.edu/repos/galaxy-australia/hifiasm_meta/hifiasm_meta/.*: + cores: 8 + params: + singularity_enabled: True toolshed.g2.bx.psu.edu/repos/rnateam/dewseq/dewseq/.*: cores: 2 @@ -80,6 +125,10 @@ tools: scheduling: require: - docker + toolshed.g2.bx.psu.edu/repos/bgruening/instagraal/instagraal/.*: + inherits: basic_docker_tool + cb_ivr: + inherits: basic_docker_tool basic_illumination: inherits: basic_docker_tool mesmer: @@ -417,25 +466,20 @@ tools: qiime2_core__tools__import: inherits: basic_docker_tool toolshed.g2.bx.psu.edu/repos/iuc/cherri_train/cherri_train/.*: + inherits: basic_docker_tool cores: 10 mem: 90 - scheduling: - require: - - docker toolshed.g2.bx.psu.edu/repos/iuc/cherri_eval/cherri_eval/.*: + inherits: basic_docker_tool cores: 1 mem: 20 - scheduling: - require: - - docker toolshed.g2.bx.psu.edu/repos/nml/metaspades/metaspades/.*: cores: 2 scheduling: accept: - pulsar - - pulsar-training-large - condor-tpv rules: - if: 0.05 <= input_size < 1 @@ -500,14 +544,28 @@ tools: scheduling: prefer: - condor-tpv + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx{int(mem)}G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp rules: - - if: 0.1 <= input_size < 1 + # version < '2.15.1+galaxy0' + - if: helpers.tool_version_lt(tool, '2.15.1+galaxy0') and 0.1 <= input_size < 1 cores: 20 mem: 100 - - if: 1 <= input_size < 2 + - if: helpers.tool_version_lt(tool, '2.15.1+galaxy0') and 1 <= input_size < 2 + cores: 30 + mem: 200 + - if: helpers.tool_version_lt(tool, '2.15.1+galaxy0') and 2 <= input_size < 20 + cores: 60 + mem: 950 + # version >= '2.15.1+galaxy0' + - if: helpers.tool_version_gte(tool, '2.15.1+galaxy0') and 0.1 <= input_size < 1 + cores: 20 + mem: 50 + - if: helpers.tool_version_gte(tool, '2.15.1+galaxy0') and 1 <= input_size < 20 cores: 30 mem: 200 - - if: 2 <= input_size < 30 + # all versions + - if: 20 <= input_size < 30 cores: 60 mem: 950 - if: input_size >= 30 @@ -515,35 +573,37 @@ tools: Too much data, we cannot support such large Trinity assemblies with our backend. Please use another server for your job. - 'mothur_*': + '.*mothur_.*': cores: 1 mem: 90 params: submit_requirements: 'GalaxyGroup == "compute_mothur"' - docker_run_extra_arguments: --pids-limit 10000 --ulimit fsize=1000000000000 --env TERM=vt100 + docker_run_extra_arguments: --pids-limit 10000 --ulimit fsize=1000000000 --env TERM=vt100 docker_volumes: "$_CONDOR_SCRATCH_DIR:rw,$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw,/data/db/:ro,/data/dnb01/galaxy_db/:ro,/data/dnb02/galaxy_db/:ro,/data/dnb03/galaxy_db/:ro,/data/dnb05/galaxy_db/:ro,/data/dnb06/galaxy_db/:rw,/data/dnb07/galaxy_db/:rw,/data/dp01/galaxy_db/:rw,/data/0/galaxy_db/:ro,/data/1/galaxy_db/:ro,/data/2/galaxy_db/:ro,/data/3/galaxy_db/:ro,/data/4/galaxy_db/:ro,/data/5/galaxy_import/galaxy_user_data/:ro,/data/6/galaxy_db/:ro,/data/7/galaxy_db/:ro,/usr/local/tools/:ro" docker_default_container_id: centos:8.3.2011 + object_store_id: "files13" scheduling: require: - docker - embedded-pulsar - 'mothur_classify_seqs*': + '.*mothur_classify_seqs.*': cores: 2 mem: 20 params: submit_requirements: 'GalaxyGroup == "compute_mothur"' - docker_run_extra_arguments: --pids-limit 10000 --ulimit fsize=1000000000000 --env TERM=vt100 + docker_run_extra_arguments: --pids-limit 10000 --ulimit fsize=1000000000 --env TERM=vt100 docker_volumes: "$_CONDOR_SCRATCH_DIR:rw,$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw,/data/db/:ro,/data/dnb01/galaxy_db/:ro,/data/dnb02/galaxy_db/:ro,/data/dnb03/galaxy_db/:ro,/data/dnb05/galaxy_db/:ro,/data/dnb06/galaxy_db/:rw,/data/dnb07/galaxy_db/:rw,/data/dp01/galaxy_db/:rw,/data/0/galaxy_db/:ro,/data/1/galaxy_db/:ro,/data/2/galaxy_db/:ro,/data/3/galaxy_db/:ro,/data/4/galaxy_db/:ro,/data/5/galaxy_import/galaxy_user_data/:ro,/data/6/galaxy_db/:ro,/data/7/galaxy_db/:ro,/usr/local/tools/:ro" docker_default_container_id: centos:8.3.2011 + object_store_id: "files13" scheduling: require: - docker - embedded-pulsar - 'bioext_bam2msa': + '.*bioext_bam2msa.*': params: - docker_run_extra_arguments: --pids-limit 10000 --ulimit fsize=1000000000000 --env TERM=vt100 + docker_run_extra_arguments: --pids-limit 10000 --ulimit fsize=1000000000 --env TERM=vt100 docker_volumes: "$_CONDOR_SCRATCH_DIR:rw,$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw,/data/db/:ro,/data/dnb01/galaxy_db/:ro,/data/dnb02/galaxy_db/:ro,/data/dnb03/galaxy_db/:ro,/data/dnb05/galaxy_db/:ro,/data/dnb06/galaxy_db/:rw,/data/dnb07/galaxy_db/:rw,/data/dp01/galaxy_db/:rw,/data/0/galaxy_db/:ro,/data/1/galaxy_db/:ro,/data/2/galaxy_db/:ro,/data/3/galaxy_db/:ro,/data/4/galaxy_db/:ro,/data/5/galaxy_import/galaxy_user_data/:ro,/data/6/galaxy_db/:ro,/data/7/galaxy_db/:ro,/usr/local/tools/:ro" docker_default_container_id: centos:8.3.2011 scheduling: @@ -553,10 +613,205 @@ tools: 'last_*': params: - docker_run_extra_arguments: --pids-limit 10000 --ulimit fsize=1000000000000 --env TERM=vt100 + docker_run_extra_arguments: --pids-limit 10000 --ulimit fsize=1000000000 --env TERM=vt100 docker_volumes: "$_CONDOR_SCRATCH_DIR:rw,$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw,/data/db/:ro,/data/dnb01/galaxy_db/:ro,/data/dnb02/galaxy_db/:ro,/data/dnb03/galaxy_db/:ro,/data/dnb05/galaxy_db/:ro,/data/dnb06/galaxy_db/:rw,/data/dnb07/galaxy_db/:rw,/data/dp01/galaxy_db/:rw,/data/0/galaxy_db/:ro,/data/1/galaxy_db/:ro,/data/2/galaxy_db/:ro,/data/3/galaxy_db/:ro,/data/4/galaxy_db/:ro,/data/5/galaxy_import/galaxy_user_data/:ro,/data/6/galaxy_db/:ro,/data/7/galaxy_db/:ro,/usr/local/tools/:ro" docker_default_container_id: centos:8.3.2011 scheduling: require: - docker - embedded-pulsar + + toolshed.g2.bx.psu.edu/repos/bgruening/blobtoolkit/blobtoolkit/.*: + cores: 8 + mem: 20 + inherits: basic_docker_tool + params: + docker_run_extra_arguments: --user 999 + + # 4GB is enough for most of the runs as it seems + toolshed.g2.bx.psu.edu/repos/iuc/purge_dups/purge_dups/.*: + cores: 1 + mem: 6 + + toolshed.g2.bx.psu.edu/repos/devteam/picard/picard_MarkDuplicates/.*: + cores: 8 + mem: 20 + inherits: basic_docker_tool + env: + _JAVA_OPTIONS: -Xmx{int(mem)}G -Xms1G + + toolshed.g2.bx.psu.edu/repos/bgruening/diamond/diamond/.*: + cores: 6 + mem: 90 + rules: + - if: input_size >= 30 + cores: 12 + toolshed.g2.bx.psu.edu/repos/bgruening/xchem_transfs_scoring/xchem_transfs_scoring/.*: + scheduling: + require: + - docker + toolshed.g2.bx.psu.edu/repos/bgruening/openduck_run_smd/openduck_run_smd/.*: + env: + docker_set_user: 1000 + docker_run_extra_arguments: '-e "OPENDUCK_GPU_PARAM=$OPENDUCK_GPU_PARAM" --gpus all' + scheduling: + require: + - docker + toolshed.g2.bx.psu.edu/repos/bgruening-util/stress_ng/stress_ng/.*: + scheduling: + require: + - singularity + - conda + toolshed.g2.bx.psu.edu/repos/galaxyp/maxquant/maxquant/.*: + scheduling: + require: + - singularity + toolshed.g2.bx.psu.edu/repos/iuc/lumpy_prep/lumpy_prep/.*: + scheduling: + require: + - singularity + - conda + # is there a way to avoid this + ".*pcgr.*": + mem: 16 + cores: 8 + env: + GALAXY_PCGR_DIR: "/data/db/databases/pcgr" + scheduling: + require: + - docker + toolshed.g2.bx.psu.edu/repos/iuc/vardict_java/vardict_java/.*: + scheduling: + require: + - singularity + - conda +# Not for Pulsar, or is the file copied? + toolshed.g2.bx.psu.edu/repos/climate/cds_essential_variability/cds_essential_variability/.*: + env: + COPERNICUS_CDSAPIRC_KEY_FILE: /data/db/data_managers/COPERNICUS_CDSAPIRC_KEY_FILE + toolshed.g2.bx.psu.edu/repos/iuc/idr_download_by_ids/idr_download_by_ids/.*: + scheduling: + require: + - singularity + - conda + toolshed.g2.bx.psu.edu/repos/imgteam/overlay_moving_and_fixed_image/ip_viz_overlay_moving_and_fixed_image/.*: + cores: 8 + basic_numba_tool: + env: + NUMBA_CACHE_DIR: /data/2/galaxy_db/tmp + OMP_NUM_THREADS: 4 + OPENBLAS_NUM_THREADS: 4 + MKL_NUM_THREADS: 4 + VECLIB_MAXIMUM_THREADS: 4 + NUMEXPR_NUM_THREADS: 4 + NUMBA_NUM_THREADS: 4 + toolshed.g2.bx.psu.edu/repos/computational-metabolomics/dimspy_process_scans/dimspy_process_scans/.*: + inherits: basic_numba_tool + toolshed.g2.bx.psu.edu/repos/computational-metabolomics/dimspy_replicate_filter/dimspy_replicate_filter/.*: + inherits: basic_numba_tool + toolshed.g2.bx.psu.edu/repos/computational-metabolomics/dimspy_align_samples/dimspy_align_samples/.*: + inherits: basic_numba_tool + toolshed.g2.bx.psu.edu/repos/galaxyp/openms_msgfplusadapter/MSGFPlusAdapter/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp + toolshed.g2.bx.psu.edu/repos/iracooke/msgfplus/msgfplus/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp + toolshed.g2.bx.psu.edu/repos/bgruening/repeat_masker/repeatmasker_wrapper/.*: + env: + RM_LIB_PATH: "/data/db/databases/dfam/3.4/" + toolshed.g2.bx.psu.edu/repos/bgruening/repeat_masker/repeatmasker_wrapper/4.1.5+galaxy0: + cores: 4 + toolshed.g2.bx.psu.edu/repos/galaxyp/reactome_pathwaymatcher/reactome_pathwaymatcher/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx17G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/iuc/bbtools_callvariants/bbtools_callvariants/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx15G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/crs4/prokka/prokka/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx15G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/devteam/picard/picard_SortSam/.*: + env: + _JAVA_OPTIONS: -Xmx4G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/peptide_shaker/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=4G -Xmx120G -Xms4G -Djava.io.tmpdir=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/iuc/migmap/migmap/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=6G -Xmx14G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/iuc/scanpy_cluster_reduce_dimension/scanpy_cluster_reduce_dimension/.*: + inherits: basic_numba_tool + + toolshed.g2.bx.psu.edu/repos/iuc/scanpy_filter/scanpy_filter/.*: + inherits: basic_numba_tool + + toolshed.g2.bx.psu.edu/repos/iuc/scanpy_inspect/scanpy_inspect/.*: + inherits: basic_numba_tool + + toolshed.g2.bx.psu.edu/repos/iuc/scanpy_normalize/scanpy_normalize/.*: + inherits: basic_numba_tool + + toolshed.g2.bx.psu.edu/repos/iuc/scanpy_remove_confounders/scanpy_remove_confounders/.*: + inherits: basic_numba_tool + + toolshed.g2.bx.psu.edu/repos/iuc/scanpy_plot/scanpy_plot/.*: + inherits: basic_numba_tool + + toolshed.g2.bx.psu.edu/repos/devteam/sam_merge/sam_merge2/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx15G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/search_gui/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/pjbriggs/trimmomatic/trimmomatic/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/iuc/unicycler/unicycler/.*: + cores: 16 + mem: 90 + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp + TERM: vt100 + + toolshed.g2.bx.psu.edu/repos/imgteam/unzip/unzip/.*: + scheduling: + require: + - singularity + + # Also on add_to_tpv_shared_db.yml but without NUMBA_CACHE_DIR + toolshed.g2.bx.psu.edu/repos/iuc/gemini_inheritance/gemini_inheritance/.*: + inherits: basic_numba_tool + + toolshed.g2.bx.psu.edu/repos/iuc/chira_map/chira_map/.*: + scheduling: + require: + - singularity + - conda + + toolshed.g2.bx.psu.edu/repos/iuc/chira_merge/chira_merge/.*: + scheduling: + require: + - singularity + - conda + + toolshed.g2.bx.psu.edu/repos/iuc/chira_quantify/chira_quantify/.*: + scheduling: + require: + - singularity + - conda + + toolshed.g2.bx.psu.edu/repos/iuc/chira_extract/chira_extract/.*: + scheduling: + require: + - singularity + - conda diff --git a/files/galaxy/dynamic_rules/usegalaxy/total_perspective_vortex/users.yml b/files/galaxy-test/tpv/users.yml similarity index 100% rename from files/galaxy/dynamic_rules/usegalaxy/total_perspective_vortex/users.yml rename to files/galaxy-test/tpv/users.yml diff --git a/files/galaxy/config/builds.txt b/files/galaxy/config/builds.txt index 8d20ffb69..3ed425b31 100644 --- a/files/galaxy/config/builds.txt +++ b/files/galaxy/config/builds.txt @@ -1,5 +1,6 @@ #Harvested from http://genome.cse.ucsc.edu/cgi-bin/das/dsn ? unspecified (?) +hs1 Human Jan. 2022 (T2T CHM13v2.0/hs1) (hs1) hg38 Human Dec. 2013 (GRCh38/hg38) (hg38) hg19 Human Feb. 2009 (GRCh37/hg19) (hg19) hg18 Human Mar. 2006 (NCBI36/hg18) (hg18) @@ -183,6 +184,7 @@ droYak2 D. yakuba Nov. 2005 (WUGSC 7.1/droYak2) (droYak2) droYak1 D. yakuba Apr. 2004 (WUGSC 1.0/droYak1) (droYak1) pelCri1 Dalmatian pelican May 2014 (ASM68737v1/pelCri1) (pelCri1) myoDav1 David's myotis (bat) Aug 2012 (ASM32734v1/myoDav1) (myoDav1) +canFam6 Dog Oct. 2020 (Dog10K_Boxer_Tasha/canFam6) (canFam6) canFam5 Dog May 2019 (UMICH_Zoey_3.1/canFam5) (canFam5) canFam4 Dog Mar. 2020 (UU_Cfam_GSD_1.0/canFam4) (canFam4) canFam3 Dog Sep. 2011 (Broad CanFam3.1/canFam3) (canFam3) @@ -459,7 +461,8 @@ tupBel1 Tree shrew Dec. 2006 (Broad/tupBel1) (tupBel1) melGal5 Turkey Nov. 2014 (Turkey_5.0/melGal5) (melGal5) melGal1 Turkey Dec. 2009 (TGC Turkey_2.01/melGal1) (melGal1) nanGal1 Upper Galilee mountains blind mole rat Jun 2014 (S.galili_v1.0/nanGal1) (nanGal1) -wuhCor1 SARS-CoV-2 Jan. 2020/NC_045512.2 (wuhCor1) +wuhCor1 SARS-CoV-2 Jan. 2020 (NC_045512.2) (wuhCor1) +mpxvRivers Monkeypox virus MPXV-M5312_HM12_Rivers (MT903340.1/GCF_014621545.1) (mpxvRivers) macEug2 Wallaby Sep. 2009 (TWGS Meug_1.1/macEug2) (macEug2) macEug1 Wallaby Nov. 2007 (Baylor 1.0/macEug1) (macEug1) bubBub1 Water buffalo Sep. 2013 (UMD_CASPUR_WB_2.0/bubBub1) (bubBub1) @@ -471,6 +474,7 @@ halAlb1 White-tailed eagle May 2014 (ASM69140v1/halAlb1) (halAlb1) phaLep1 White-tailed tropicbird May 2014 (ASM68728v1/phaLep1) (phaLep1) zonAlb1 White-throated sparrow Apr 2013 (ASM38545v1/zonAlb1) (zonAlb1) bosMut1 Wild yak Oct 2011 (BosGru_v2.0/bosMut1) (bosMut1) +xenTro10 X. tropicalis Nov. 2019 (UCB_Xtro_10.0/xenTro10) (xenTro10) xenTro9 X. tropicalis Jul. 2016 (Xenopus_tropicalis_v9.1/xenTro9) (xenTro9) xenTro7 X. tropicalis Sep. 2012 (JGI 7.0/xenTro7) (xenTro7) xenTro3 X. tropicalis Nov. 2009 (JGI 4.2/xenTro3) (xenTro3) diff --git a/files/galaxy/config/datatypes_conf.xml b/files/galaxy/config/datatypes_conf.xml index 869c288df..c9ffe643a 100644 --- a/files/galaxy/config/datatypes_conf.xml +++ b/files/galaxy/config/datatypes_conf.xml @@ -184,7 +184,9 @@ + + @@ -198,6 +200,7 @@ + @@ -305,6 +308,7 @@ + @@ -314,13 +318,13 @@ + - @@ -415,6 +419,7 @@ + @@ -458,6 +463,7 @@ + @@ -493,6 +499,10 @@ + + + + @@ -895,10 +905,6 @@ - - - - @@ -925,7 +931,6 @@ - @@ -947,6 +952,23 @@ + + + + + + + + + + + + + + + + + we should enable it and test the difference, I don't think it has an effect - singularity_enabled: true - singularity_default_container_id: '/data/0/singularity/ubuntu:18.04' - tmp_dir: 'True' - -remote_cluster_mq_de03: - info: - remote: true - scheduler: 'condor' - limits: - cores: 8 - mem: 19 - gpus: 1 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - submit_request_gpus: '{GPUS}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'remote' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - -remote_cluster_mq_docker_de03: - info: - remote: true - scheduler: 'condor' - nodes: 18 - limits: - cores: 8 - mem: 19 - gpus: 1 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - submit_request_gpus: '{GPUS}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'remote' - docker_enabled: true - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - # can and should be made stricter at some point - docker_volumes: '$galaxy_root:ro,$tool_directory:ro,$working_directory:rw,$job_directory:rw,/data/share/staging/:ro' - docker_memory: '{MEMORY}' - docker_auto_rm: true - docker_default_container_id: busybox:ubuntu-14.04 - docker_sudo: false - docker_set_user: '' - docker_run_extra_arguments: '--gpus all' - tmp_dir: 'True' - - -remote_cluster_mq_pt01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 4 - mem: 15 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'remote' - outputs_to_working_directory: true - rewrite_parameters: true - transport: 'curl' - -remote_cluster_mq_uk01: - info: - remote: true - scheduler: 'condor' - nodes: 40 - limits: - cores: 60 - mem: 300 - gpus: 1 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - GPU_AVAILABLE: 1 - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - submit_request_gpus: '{GPUS}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'remote' - outputs_to_working_directory: true - rewrite_parameters: true - transport: 'curl' - -remote_cluster_mq_singularity_uk01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 60 - mem: 300 - gpus: 1 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - GPU_AVAILABLE: 1 - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - submit_request_gpus: '{GPUS}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - -remote_cluster_mq_docker_uk01: - info: - remote: true - scheduler: 'condor' - nodes: 40 - limits: - cores: 60 - mem: 300 - gpus: 1 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - GPU_AVAILABLE: 1 - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - submit_request_gpus: '{GPUS}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - docker_enabled: true - # can and should be made stricter at some point - docker_volumes: '$galaxy_root:ro,$tool_directory:ro,$working_directory:rw,$job_directory:rw,/data/share/staging/:ro' - docker_memory: '{MEMORY}' - docker_auto_rm: true - docker_default_container_id: busybox:glibc - docker_sudo: false - docker_set_user: '' - docker_run_extra_arguments: '--gpus all' - tmp_dir: 'True' - -remote_cluster_mq_it02: - info: - remote: true - scheduler: 'condor' - limits: - cores: 4 - mem: 7 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - -remote_cluster_mq_2wdfalse_it03: - info: - remote: true - scheduler: 'condor' - limits: - cores: 8 - mem: 15 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - singularity_volumes: '$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw' - -remote_cluster_mq_2wdtrue_it03: - info: - remote: true - scheduler: 'condor' - limits: - cores: 8 - mem: 15 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: true - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - singularity_volumes: '$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw' - -remote_cluster_mq_fr01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 8 - mem: 31 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - -remote_cluster_mq_fi01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 8 - mem: 31 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - -remote_cluster_mq_no01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 4 - mem: 15 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: true - rewrite_parameters: true - transport: 'curl' - -remote_cluster_mq_es01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 4 - mem: 16 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /data/share/var/database/container_cache - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/data/share/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: false - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/cvmfs/singularity.galaxyproject.org/u/b/ubuntu:18.04' - singularity_volumes: '$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw' - -remote_cluster_mq_cz01: - info: - remote: true - scheduler: 'condor' - limits: - cores: 4 - mem: 16 - env: - GALAXY_MEMORY_MB: '{MEMORY_MB}' - GALAXY_SLOTS: '{PARALLELISATION}' - LC_ALL: C - SINGULARITY_CACHEDIR: /storage/praha5-elixir/home/galaxyeu/pulsar/singularity_cache - SINGULARITY_TMPDIR: $SCRATCHDIR - TMPDIR: $SCRATCHDIR - TMP: $SCRATCHDIR - TEMP: $SCRATCHDIR - params: - priority: -{PRIORITY} - submit_request_cpus: '{PARALLELISATION}' - submit_request_memory: '{MEMORY}' - jobs_directory: '/storage/praha5-elixir/home/galaxyeu/pulsar/files/staging' - default_file_action: 'remote_transfer' - dependency_resolution: 'none' - outputs_to_working_directory: true - rewrite_parameters: true - transport: 'curl' - singularity_enabled: true - singularity_default_container_id: '/storage/praha5-elixir/home/galaxyeu/pulsar/ubuntu_20.04' - singularity_volumes: '$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw' diff --git a/files/galaxy/dynamic_rules/usegalaxy/dexseq.py b/files/galaxy/dynamic_rules/usegalaxy/dexseq.py deleted file mode 100644 index af598f674..000000000 --- a/files/galaxy/dynamic_rules/usegalaxy/dexseq.py +++ /dev/null @@ -1,34 +0,0 @@ -from galaxy.jobs import JobDestination -import os - -def dexseq_memory_mapper( job, tool ): - # Assign admin users' jobs to special admin_project. - # Allocate extra time - inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] ) - inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] ) - gtf_file = inp_data[ "gtf" ].file_name - vmem = 5200 - cores = 6 - params = {} - gtf_file_size = os.path.getsize(gtf_file) / (1024*1024.0) - if gtf_file_size > 150: - vmem = 30000 - cores = 6 - - # TODO(hxr): fix? - # params["nativeSpecification"] = """ - # -q galaxy1.q,all.q -l galaxy1_slots=1 -l h_vmem=%sM -pe "pe*" %s -v - # _JAVA_OPTIONS -v TEMP -v TMPDIR -v PATH -v PYTHONPATH -v - # LD_LIBRARY_PATH -v XAPPLRESDIR -v GDFONTPATH -v GNUPLOT_DEFAULT_GDFONT - # -v MPLCONFIGDIR -soft -l galaxy1_dedicated=1 - # """ % (vmem, cores) - params['request_memory'] = vmem / 1024 - params['request_cpus'] = cores - params['requirements'] = '(GalaxyGroup == "compute")' - params['priority'] = 128 - env = { - '_JAVA_OPTIONS': "-Xmx4G -Xms1G", - } - - return JobDestination(id="dexseq_dynamic_memory_mapping", runner="condor", params=params, env=env) - # return JobDestination(id="dexseq_dynamic_memory_mapping", runner="drmaa", params=params) diff --git a/files/galaxy/dynamic_rules/usegalaxy/joint_destinations.yaml b/files/galaxy/dynamic_rules/usegalaxy/joint_destinations.yaml deleted file mode 100644 index 2c4dd6719..000000000 --- a/files/galaxy/dynamic_rules/usegalaxy/joint_destinations.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -remote_condor_cluster_gpu_docker: - - remote_cluster_mq_docker_uk01 - -remote_condor_cluster_gpu: - - remote_cluster_mq_uk01 - -remote_condor_cluster_singularity: - - remote_cluster_mq_de01 - - remote_cluster_mq_it01 - - remote_cluster_mq_fi01 diff --git a/files/galaxy/dynamic_rules/usegalaxy/sorting_hat.py b/files/galaxy/dynamic_rules/usegalaxy/sorting_hat.py deleted file mode 100644 index bedc7f430..000000000 --- a/files/galaxy/dynamic_rules/usegalaxy/sorting_hat.py +++ /dev/null @@ -1,644 +0,0 @@ -#!/usr/bin/env python -# usegalaxy.eu sorting hat -""" - - .'lddc,. - 'cxOOOOOOOOOxoc;,... - .:dOOOOOOOOOOOOOOOOOOOOOOOl - .;dOOOOOOOOOOOOOOxcdOOOOOOOkl. - oOOOOOOOOOOOOOOOx, ...... - .xOOkkkOOOOOOOOOk' - .xOOkkkOOOOOOOOO00. - dOOkkkOOOOOOOOOOOOd - cOOkkkOOOOOOOOOOOOOO' - .OOOkkOOOOOOOOOOOOOOOd - dOOkkOOOOOOOOOOOOOOOOO, - .OOOOOOOOOOOOOOOOOOOOOOx - cOOOOOOOOOOOOOOOOOOOOOOO; - kOOOOOOOxddddddddxOOOOOOk. - ..,:cldxdlodxxkkO;'''''''';Okkxxdookxdlc:,.. - .;lxO00000000d;;;;;;;;,'';;;;'',;;;;;;;:k00000000Oxl;. - d0000000000000xl::;;;;;,'''''''',;;;;;::lk0000000000000d - .d00000000000000000OkxxxdoooooooodxxxkO00000000000000000d. - .;lxO00000000000000000000000000000000000000000000Oxl;. - ..,;cloxkOO0000000000000000000000OOkxdlc;,.. - .................. - -"Oh, you may not think I'm pretty, -But don't judge on what you see," - -"For I'm the [Galaxy] Sorting Hat -And I can cap them all." - -You might belong in Condor, -Where dwell the slow to compute, - -You might belong in Pulsar, -Far flung and remote, - -Or yet in wise old Singularity, -If you're evil and insecure - ---hexylena -""" -from galaxy.jobs import JobDestination -from galaxy.jobs.mapper import JobMappingException -from random import sample - -import copy -import math -import os -import yaml - -# Maximum resources -CONDOR_MAX_CORES = 40 -CONDOR_MAX_MEM = 1000 - -# The default / base specification for the different environments. -SPECIFICATION_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'destination_specifications.yaml') -with open(SPECIFICATION_PATH, 'r') as handle: - SPECIFICATIONS = yaml.load(handle, Loader=yaml.SafeLoader) - -TOOL_DESTINATION_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'tool_destinations.yaml') -with open(TOOL_DESTINATION_PATH, 'r') as handle: - TOOL_DESTINATIONS = yaml.load(handle, Loader=yaml.SafeLoader) - -DEFAULT_DESTINATION = 'condor' -DEFAULT_TOOL_SPEC = { - 'cores': 1, - 'mem': 4.0, - 'gpus': 0, - 'force_destination_id': False, - 'runner': DEFAULT_DESTINATION -} - -TOOL_DESTINATION_ALLOWED_KEYS = ['cores', 'env', 'gpus', 'mem', 'name', 'nativeSpecExtra', - 'params', 'permissions', 'runner', 'tags', 'tmp', 'force_destination_id', - 'docker_auto_rm', 'docker_default_container_id', 'docker_set_user', - 'docker_memory', 'docker_run_extra_arguments', 'docker_set_user', - 'docker_sudo', 'docker_volumes'] - -SPECIFICATION_ALLOWED_KEYS = ['env', 'limits', 'params', 'tags', 'nodes'] - -FDID_PREFIX = 'sh_fdid_' - -JOINT_DESTINATIONS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'joint_destinations.yaml') -with open(JOINT_DESTINATIONS_PATH, 'r') as handle: - JOINT_DESTINATIONS = yaml.load(handle, Loader=yaml.SafeLoader) - - -def assert_permissions(tool_spec, user_email, user_roles): - """ - Permissions testing. - - - default state is to allow everyone to run everything. - - If there is a permissions block, `deny: all` is the default. - - We ONLY support allowing specific users to run something. This DOES NOT - support preventing specific users from running something. - - """ - exception_text = "This tool is temporarily disabled due to internal policy. Please contact us if you have issues." - # If there is no permissions block then it's going to be fine for everyone. - if 'permissions' not in tool_spec: - return - - permissions = tool_spec['permissions'] - - # TODO(hxr): write a custom tool thing linter. - # We'll be extra defensive here since I don't think I trust us to get - # linting right for now. - if len(permissions.keys()) == 0: - raise Exception("JCaaS Configuration error 1") - - # And for typos. - if 'allow' not in permissions: - raise Exception("JCaaS Configuration error 2") - - if 'users' not in permissions['allow'] and 'roles' not in permissions['allow']: - raise Exception("JCaaS Configuration error 3") - # ENDTODO - - # Pull out allowed users and roles, defaulting to empty lists if the keys - # aren't there. - allowed_users = permissions['allow'].get('users', []) - allowed_roles = permissions['allow'].get('roles', []) - - # If the user is on our list, yay, return. - if user_email in allowed_users: - return - - # If one of their roles is in our list - if any([user_role in allowed_roles for user_role in user_roles]): - return - - # Auth failure. - raise Exception(exception_text) - - -def change_object_store_dependent_on_user(params, user_roles): - """ - Different roles can have their own storage. Here we overwrite the object store based on user associated roles. - Example: A user belongs to the role 'dataplant'. Those users own dedicated storage that they include into Galaxy. - Here, we change the 'object_store_id' based in the role 'dataplant'. - """ - if 'dataplant' in user_roles: - params['object_store_id'] = 'dataplant01' - # test new storage engines - if 'storage-test' in user_roles: - params['object_store_id'] = 's3_netapp01' - return params - - -def get_tool_id(tool_id): - """ - Convert ``toolshed.g2.bx.psu.edu/repos/devteam/column_maker/Add_a_column1/1.1.0`` - to ``Add_a_column`` - - :param str tool_id: a tool id, can be the short kind (e.g. upload1) or the long kind with the full TS path. - - :returns: a short tool ID. - :rtype: str - """ - if tool_id.count('/') == 0: - # E.g. upload1, etc. - return tool_id - - # what about odd ones. - if tool_id.count('/') == 5: - (server, _, owner, repo, name, version) = tool_id.split('/') - return name - - return tool_id - - -def name_it(tool_spec, prefix=FDID_PREFIX): - if 'cores' in tool_spec: - name = '%scores_%sG' % (tool_spec.get('cores', 1), tool_spec.get('mem', 4)) - elif len(tool_spec.keys()) == 0 or (len(tool_spec.keys()) == 1 and 'runner' in tool_spec): - name = '%s_default' % tool_spec.get('runner') - else: - name = '%sG_memory' % tool_spec.get('mem', 4) - - if tool_spec.get('tmp', None) == 'large': - name += '_large' - - if 'name' in tool_spec: - name += '_' + tool_spec['name'] - - # Force a replacement of the destination's id - if tool_spec.get('force_destination_id', False): - name = prefix + tool_spec.get('runner') - - return name - - -def _get_limits(destination, dest_spec=SPECIFICATIONS, default_cores=1, default_mem=4, default_gpus=0): - limits = {'cores': default_cores, 'mem': default_mem, 'gpus': default_gpus} - limits.update(dest_spec.get(destination).get('limits', {})) - return limits - - -def _weighted_random_sampling(destinations, dest_spec=SPECIFICATIONS): - bunch = [] - for d in destinations: - weight = SPECIFICATIONS[d].get('nodes', 1) - bunch += [d]*weight - destination = sample(bunch, 1)[0] - return destination - - -def build_spec(tool_spec, dest_spec=SPECIFICATIONS, runner_hint=None): - destination = runner_hint if runner_hint else tool_spec.get('runner') - - if destination not in dest_spec: - if destination in JOINT_DESTINATIONS: - destination = _weighted_random_sampling(JOINT_DESTINATIONS[destination]) - else: - destination = DEFAULT_DESTINATION - - env = dict(dest_spec.get(destination, {'env': {}})['env']) - params = dict(dest_spec.get(destination, {'params': {}})['params']) - tags = {dest_spec.get(destination).get('tags', None)} - - # A dictionary that stores the "raw" details that went into the template. - raw_allocation_details = {} - - # We define the default memory and cores for all jobs. This is - # semi-internal, and may not be properly propagated to the end tool - tool_memory = tool_spec.get('mem', 4) - tool_cores = tool_spec.get('cores', 1) - tool_gpus = tool_spec.get('gpus', 0) - - # We apply some constraints to these values, to ensure that we do not - # produce unschedulable jobs, requesting more ram/cpu than is available in a - # given location. Currently, we clamp those values rather than intelligently - # re-scheduling to a different location due to TaaS constraints. - limits = _get_limits(destination, dest_spec=dest_spec) - tool_memory = min(tool_memory, limits.get('mem')) - tool_cores = min(tool_cores, limits.get('cores')) - tool_gpus = min(tool_gpus, limits.get('gpus')) - - kwargs = { - # Higher numbers are lower priority, like `nice`. - 'PRIORITY': tool_spec.get('priority', 128), - 'MEMORY': str(tool_memory) + 'G', - 'MEMORY_MB': int(tool_memory * 1024), - 'PARALLELISATION': tool_cores, - 'NATIVE_SPEC_EXTRA': "", - 'GPUS': tool_gpus, - } - - if 'docker_enabled' in params and params['docker_enabled']: - for k in tool_spec: - if k.startswith('docker'): - params[k] = tool_spec.get(k, '') - - # Allow more human-friendly specification - if 'nativeSpecification' in params: - params['nativeSpecification'] = params['nativeSpecification'].replace('\n', ' ').strip() - - # We have some destination specific kwargs. `nativeSpecExtra` and `tmp` are only defined for SGE - if 'condor' in destination: - if 'cores' in tool_spec: - # kwargs['PARALLELISATION'] = tool_cores - raw_allocation_details['cpu'] = tool_cores - else: - del params['request_cpus'] - - if 'mem' in tool_spec: - raw_allocation_details['mem'] = tool_memory - - if 'requirements' in tool_spec: - params['requirements'] = tool_spec['requirements'] - - if 'rank' in tool_spec: - params['rank'] = tool_spec['rank'] - - if '+Group' in tool_spec: - params['+Group'] = tool_spec['+Group'] - - if 'remote_cluster_mq' in destination: - # specif for condor cluster - if tool_gpus == 0 and 'submit_request_gpus' in params: - del params['submit_request_gpus'] - - # Update env and params from kwargs. - env.update(tool_spec.get('env', {})) - env = {k: str(v).format(**kwargs) for (k, v) in env.items()} - params.update(tool_spec.get('params', {})) - for (k, v) in params.items(): - if not isinstance(v, list): - params[k] = str(v).format(**kwargs) - else: - params[k] = v - - tags.add(tool_spec.get('tags', None)) - tags.discard(None) - tags = ','.join([x for x in tags if x is not None]) if len(tags) > 0 else None - - if destination == 'sge': - runner = 'drmaa' - elif 'condor' in destination: - runner = 'condor' - elif 'remote_cluster_mq' in destination: - # destination label has to follow this convention: - # remote_cluster_mq_feature1_feature2_feature3_pulsarid - runner = "_".join(['pulsar_eu', destination.split('_').pop()]) - else: - runner = 'local' - - env = [dict(name=k, value=v) for (k, v) in env.items()] - return env, params, runner, raw_allocation_details, tags - - -def get_training_roles(user_roles): - training_roles = [role for role in user_roles if role.startswith('training-')] - if any([role.startswith('training-gcc-') for role in training_roles]): - training_roles.append('training-gcc') - return training_roles - - -def reroute_to_dedicated(tool_spec, user_roles): - """ - Re-route users to correct destinations. Some users will be part of a role - with dedicated training resources. - """ - # Collect their possible training roles identifiers. - training_roles = get_training_roles(user_roles) - - # No changes to specification. - if len(training_roles) == 0: - # Require that the jobs do not run on these dedicated training machines. - return {'requirements': 'GalaxyGroup == "compute"'} - - # Otherwise, the user does have one or more training roles. - # So we must construct a requirement / ranking expression. - training_expr = " || ".join(['(GalaxyGroup == "%s")' % role for role in training_roles]) - training_labels = '"'+", ".join(['%s' % role for role in training_roles])+'"' - return { - # We require that it does not run on machines that the user is not in the role for. - 'requirements': '(GalaxyGroup == "compute") || (%s)' % training_expr, - # We then rank based on what they *do* have the roles for - '+Group': training_labels, - } - - -def _finalize_tool_spec(tool_id, user_roles, tools_spec=TOOL_DESTINATIONS, memory_scale=1.0): - # Find the 'short' tool ID which is what is used in the .yaml file. - tool = get_tool_id(tool_id) - # Pull the tool specification (i.e. job destination configuration for this tool) - tool_spec = copy.deepcopy(tools_spec.get(tool, {})) - # Update the tool specification with any training resources that are available - tool_spec.update(reroute_to_dedicated(tool_spec, user_roles)) - - # Update the tool specification with default values if not specified - for s in DEFAULT_TOOL_SPEC: - tool_spec[s] = tool_spec.get(s, DEFAULT_TOOL_SPEC[s]) - - tool_spec['mem'] *= memory_scale - - # Only two tools are truly special. - if tool_id in ('upload1', '__DATA_FETCH__'): - tool_spec = { - 'mem': 3, - 'runner': 'condor_upload', - 'rank': 'GalaxyGroup == "upload"', - 'requirements': 'GalaxyTraining == false', - 'env': { - 'TEMP': '/data/1/galaxy_db/tmp/' - } - } - elif tool_id == '__SET_METADATA__': - tool_spec = { - 'mem': 1, - 'runner': 'condor_upload', - 'rank': 'GalaxyGroup == "metadata"', - 'requirements': 'GalaxyTraining == false', - } - # These we're running on a specific subset - elif tool in ('interactive_tool_ml_jupyter_notebook', 'gmx_sim', 'instagraal'): - tool_spec['requirements'] = 'GalaxyGroup == "compute_gpu"' - elif 'interactive_tool_' in tool_id: - tool_spec['requirements'] = 'GalaxyDockerHack == True && GalaxyGroup == "interactive"' - elif tool in ('run_jupyter_job', 'deepvariant', 'msconvert', 'glassgo', 'bionano_scaffold', 'mitohifi'): - tool_spec['requirements'] = 'GalaxyDockerHack == True && GalaxyGroup == "compute"' - elif 'mothur' in tool: - if len(get_training_roles(user_roles)) == 0: - tool_spec['requirements'] = 'GalaxyGroup == "compute_mothur"' - - return tool_spec - - -def convert_to(tool_spec, runner): - tool_spec['runner'] = runner - - if runner == 'sge': - # sge doesn't accept non-ints - tool_spec['mem'] = int(math.ceil(tool_spec['mem'])) - - return tool_spec - - -def _gateway(tool_id, user_preferences, user_roles, user_id, user_email, memory_scale=1.0): - tool_spec = _finalize_tool_spec(tool_id, user_roles, memory_scale=memory_scale) - - # Now build the full spec - runner_hint = None - - if tool_id not in ('upload1', '__DATA_FETCH__', '__SET_METADATA__'): - # hints = [x for x in user_roles if x.startswith('destination-')] - # if len(hints) > 0: - # runner_hint = hints[0].replace('destination-pulsar-', 'remote_cluster_mq_') - for data_item in user_preferences: - if "distributed_compute|remote_resources" in data_item: - if user_preferences[data_item] != "None": - runner_hint = user_preferences[data_item] - - # Ensure that this tool is permitted to run, otherwise, throw an exception. - assert_permissions(tool_spec, user_email, user_roles) - - env, params, runner, _, tags = build_spec(tool_spec, runner_hint=runner_hint) - params['accounting_group_user'] = str(user_id) - params['description'] = get_tool_id(tool_id) - - # This is a special case, we're requiring it for faster feedback / turnaround times. - if 'training-hard-limits' in user_roles: - params['requirements'] = 'GalaxyGroup == "training-hard-limits"' - - return env, params, runner, tool_spec, tags - - -def _special_case(param_dict, tool_id, user_id, user_roles): - """" - Takes care of tools with special cases - """ - if get_tool_id(tool_id).startswith('interactive_tool_') and user_id == -1: - raise JobMappingException("This tool is restricted to registered users, " - "please contact a site administrator at https://gitter.im/usegalaxy-eu/Lobby") - - if get_tool_id(tool_id).startswith('interactive_tool_ml') and 'interactive-tool-ml-jupyter-notebook' not in user_roles: - raise JobMappingException("This tool is restricted to authorized users, " - "please contact a site administrator at https://gitter.im/usegalaxy-eu/Lobby") - - if get_tool_id(tool_id).startswith('gmx_sim'): - md_steps_limit = 1000000 - if 'md_steps' in param_dict['sets']['mdp']: - if param_dict['sets']['mdp']['md_steps'] > md_steps_limit and 'gmx_sim_powerusers' not in user_roles: - raise JobMappingException("this tool's configuration has exceeded a computational limit, " - "please contact a site administrator at https://gitter.im/usegalaxy-eu/Lobby") - - return - - -def gateway(tool_id, user, memory_scale=1.0, next_dest=None): - if user: - user_roles = [role.name for role in user.all_roles() if not role.deleted] - user_preferences = user.extra_preferences - email = user.email - user_id = user.id - else: - user_roles = [] - user_preferences = [] - email = '' - user_id = -1 - - try: - env, params, runner, spec, tags = _gateway(tool_id, user_preferences, user_roles, user_id, email, - memory_scale=memory_scale) - except Exception as e: - return JobMappingException(str(e)) - - resubmit = [] - if next_dest: - resubmit = [{ - 'condition': 'any_failure and attempt <= 3', - 'destination': next_dest - }] - - name = name_it(spec) - params = change_object_store_dependent_on_user(params, user_roles) - return JobDestination( - id=name, - tags=tags, - runner=runner, - params=params, - env=env, - resubmit=resubmit, - ) - - -def gateway_1x(tool_id, user): - return gateway(tool_id, user, memory_scale=1, next_dest='gateway_1_5x') - - -def gateway_1_5x(tool_id, user): - return gateway(tool_id, user, memory_scale=1.5, next_dest='gateway_2x') - - -def gateway_2x(tool_id, user): - return gateway(tool_id, user, memory_scale=2) - - -def gateway_checkpoint(app, job, tool, user): - """ - These are tools that have to be blocked before starting to run, if a particular condition arise. - If not, reroute to gateway single run. - """ - param_dict = dict([(p.name, p.value) for p in job.parameters]) - param_dict = tool.params_from_strings(param_dict, app) - tool_id = tool.id - if user: - user_roles = [role.name for role in user.all_roles() if not role.deleted] - user_id = user.id - else: - user_roles = [] - user_id = -1 - - _special_case(param_dict, tool_id, user_id, user_roles) - - return gateway(tool_id, user) - - -def _compute_memory_for_hifiasm(param_dict): - computed_memory = 0 - converter = { - 'g': 1, - 'G': 1, - 'm': 1000, - 'M': 1000, - 'k': 1000000, - 'K': 1000000 - } - kcov = 36 - if 'advanced_options' in param_dict: - if 'kcov' in param_dict['advanced_options']: - kcov = param_dict['advanced_options']['kcov'] - if 'hg_size' in param_dict['advanced_options']: - hg_size = param_dict['advanced_options']['hg_size'] - if len(hg_size) > 1: - hg_size_suffix = hg_size[-1:] - hg_size_value = float(hg_size[:len(hg_size)-1].replace(",", ".")) - # (len*(kmercov*2) * 1.75 - hg_size_value_in_Gb = hg_size_value / converter[hg_size_suffix] - computed_memory = math.ceil(hg_size_value_in_Gb*(kcov*2)*1.75) - - return computed_memory - - -def gateway_for_hifiasm(app, job, tool, user, next_dest=None): - """" - The memory requirement of Hifiasm depends on a wrapper's input - """ - param_dict = dict([(p.name, p.value) for p in job.parameters]) - param_dict = tool.params_from_strings(param_dict, app) - tool_id = tool.id - if user: - user_roles = [role.name for role in user.all_roles() if not role.deleted] - user_preferences = user.extra_preferences - email = user.email - user_id = user.id - else: - user_roles = [] - user_preferences = [] - email = '' - user_id = -1 - - try: - env, params, runner, spec, tags = _gateway(tool_id, user_preferences, user_roles, user_id, email) - except Exception as e: - return JobMappingException(str(e)) - - limits = _get_limits(runner) - request_memory = min(max(_compute_memory_for_hifiasm(param_dict), spec['mem']), limits.get('mem')) - params['request_memory'] = "{}{}".format(request_memory, 'G') - - resubmit = [] - if next_dest: - resubmit = [{ - 'condition': 'any_failure and attempt <= 3', - 'destination': next_dest - }] - - spec['mem'] = request_memory - name = name_it(spec) - return JobDestination( - id=name, - tags=tags, - runner=runner, - params=params, - env=env, - resubmit=resubmit, - ) - - -def gateway_for_keras_train_eval(app, job, tool, user, next_dest=None): - """" - Type of compute resource (CPU or GPU) for keras_train_eval tool depends on user's input from its wrapper. - Default resource is CPU. - """ - param_dict = dict([(p.name, p.value) for p in job.parameters]) - param_dict = tool.params_from_strings(param_dict, app) - tool_id = tool.id - if user: - user_roles = [role.name for role in user.all_roles() if not role.deleted] - user_preferences = user.extra_preferences - email = user.email - user_id = user.id - else: - user_roles = [] - user_preferences = [] - email = '' - user_id = -1 - - # get default job destination parameters - try: - env, params, runner, spec, tags = _gateway(tool_id, user_preferences, user_roles, user_id, email) - except Exception as e: - return JobMappingException(str(e)) - - # set up to resubmit job in case of failure - resubmit = [] - if next_dest: - resubmit = [{ - 'condition': 'any_failure and attempt <= 3', - 'destination': next_dest - }] - name = name_it(spec) - - # assign dynamic runner based on user's input from tool wrapper - if '__job_resource' in param_dict: - if 'gpu' in param_dict['__job_resource']: - if param_dict['__job_resource']['gpu'] == '1': - params['requirements'] = 'GalaxyGroup == "compute_gpu"' - params['request_gpus'] = 1 - # env.append({'name': 'GPU_AVAILABLE', 'value': '1'}) - - # create dynamic destination rule - return JobDestination( - id=name, - tags=tags, - runner=runner, - params=params, - env=env, - resubmit=resubmit, - ) diff --git a/files/galaxy/dynamic_rules/usegalaxy/tool_destinations.yaml b/files/galaxy/dynamic_rules/usegalaxy/tool_destinations.yaml deleted file mode 100644 index e5fae4a8b..000000000 --- a/files/galaxy/dynamic_rules/usegalaxy/tool_destinations.yaml +++ /dev/null @@ -1,1287 +0,0 @@ ---- -__default__: {} - -upload1: - cores: 1 - mem: 3 - gpus: 0 - runner: condor_upload - env: - TEMP: /data/1/galaxy_db/tmp - -__DATA_FETCH__: - cores: 1 - mem: 3 - gpus: 0 - runner: condor_upload - env: - TEMP: /data/1/galaxy_db/tmp - -__SET_METADATA__: - cores: 1 - mem: 1 - gpus: 0 - runner: condor - params: - requirements: "GalaxyTraining == false" - rank: 'GalaxyGroup == "metadata"' - -keras_train_and_eval: - cores: 4 - env: - CUDA_VISIBLE_DEVICES: 0 - -xchem_transfs_scoring: - runner: remote_condor_cluster_gpu_docker - gpus: 1 - cores: 1 - env: - CUDA_VISIBLE_DEVICES: 0 - -xchem_pose_scoring: - runner: remote_condor_cluster_gpu_docker - gpus: 1 - cores: 1 - env: - CUDA_VISIBLE_DEVICES: 0 - -openduck_run_smd: - runner: remote_condor_cluster_gpu_docker - gpus: 1 - cores: 1 - env: - CUDA_VISIBLE_DEVICES: 0 - OPENDUCK_GPU_PARAM: "--gpu-id 1" - docker_set_user: 1000 - docker_run_extra_arguments: '-e "OPENDUCK_GPU_PARAM=$OPENDUCK_GPU_PARAM" --gpus all' - -sklearn_searchcv: {mem: 12, cores: 10} - -sklearn_model_validation: {mem: 12, cores: 10} - -# These are interactive and get restricted volumes -interactive_tool_jupyter_notebook: - mem: 4 - runner: condor_docker_ie_interactive - env: - HOME: /home/jovyan - force_destination_id: true - -instagraal: - runner: condor_docker_gpu - force_destination_id: true - gpus: 1 - cores: 1 - mem: 30 - docker_run_extra_arguments: " --gpus all" - -interactive_tool_pangeo_notebook: - mem: 4 - runner: condor_docker_ie_interactive - env: - HOME: /home/jovyan - force_destination_id: true - -interactive_tool_climate_notebook: - mem: 4 - runner: condor_docker_ie_interactive - env: - HOME: /home/jovyan - force_destination_id: true - -interactive_tool_ml_jupyter_notebook: - runner: condor_docker_ie_interactive_gpu - force_destination_id: true - gpus: 1 - cores: 1 - mem: 8 - docker_run_extra_arguments: " --gpus all" - -# this tool is only used from within another Jupyter notebook -run_jupyter_job: - runner: condor_docker - cores: 8 - mem: 32 - -#interactive_tool_rstudio: {mem: 8, cores: 2, runner: condor_docker_ie_interactive, force_destination_id: true} -#interactive_tool_pyiron: {mem: 8, cores: 1, runner: condor_docker_ie_interactive, force_destination_id: true} -#interactive_tool_guacamole_desktop: {mem: 8, cores: 2, runner: condor_docker_ie_interactive, force_destination_id: true} -#interactive_tool_panoply: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_vrm_editor: {mem: 4, cores: 1, runner: condor_docker_ie_interactive, force_destination_id: true} -# -## These are not -#interactive_tool_openrefine: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_audiolabeler: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_ethercalc: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_geoexplorer: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_radiant: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_higlass: {mem: 20, cores: 5, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_phinch: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_neo4j: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_hicbrowser: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_cellxgene: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_bam_iobio: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_vcf_iobio: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_askomics: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_wilson: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_wallace: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_paraview: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_simtext_app: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_isee: {mem: 4, runner: condor_docker_ie, force_destination_id: true} -#interactive_tool_metashark: {mem: 4, cores: 1, runner: condor_docker_ie, force_destination_id: true} - -transdecoder: {cores: 8, mem: 8} -zerone: {runner: remote_cluster_mq_it01} -basil: {runner: remote_cluster_mq_it01} -#goenrichment: {runner: remote_cluster_mq_be01} -#circos: {mem: 10, runner: remote_cluster_mq_pt01} -circos: {mem: 10} - -# docking: {runner: remote_cluster_mq_uk01} -# prepare_ligands_for_docking: {runner: remote_cluster_mq_it01} -# prepare_box: {runner: remote_cluster_mq_uk01} - -stress_ng: {runner: condor_singularity_with_conda} -fasterq_dump: {cores: 8} - -busco: {cores: 16, mem: 80} -filtlong: {mem: 50} -orfipy: {cores: 8, mem: 8} -nanoplot: {cores: 12, mem: 32} -# roary needs many many cpus if the number of input files increase -# a more specific function would ne neat -roary: {cores: 24} -tgsgapcloser: {cores: 16, mem: 24} -alevin: {mem: 92, cores: 8} -kc-align: {cores: 3} -maxquant: {mem: 60, cores: 8, runner: condor_singularity} -deepvariant: {mem: 10, runner: condor_singularity} -scpipe: {mem: 64, cores: 8} -cardinal_combine: {mem: 92} -cardinal_classification: {mem: 90} -cardinal_filtering: {mem: 60} -cardinal_mz_images: {mem: 20} -cardinal_preprocessing: {cores: 4, mem: 110} -cardinal_quality_report: {mem: 180} -cardinal_segmentations: {mem: 92} -cardinal_spectra_plots: {mem: 32} -cardinal_data_exporter: {mem: 200} -kraken_database_builder: {mem: 200} -heatmap: {mem: 12} -Heatmap: {mem: 12} -mitos2: {mem: 12} -feelnc: {mem: 12} -merqury: {mem: 12} -miniasm: {mem: 32} -quast: {cores: 10, mem: 12} -jellyfish: {mem: 12} -medaka_consensus_pipeline: {cores: 12, mem: 12} -shasta: {cores: 10, mem: 12} -msconvert: {runner: condor_docker} -## should be dynamically set, or lowered back to 250 -lumpy_sv: {mem: 8} -lumpy_prep: - runner: condor_singularity_with_conda - mem: 8 - -pharmcat: - runner: condor_docker - mem: 4 - env: - _JAVA_OPTIONS: -Xmx4G -Xms1G - -bionano_scaffold: - runner: condor_docker - mem: 250 - cores: 24 - -mitohifi: - runner: condor_docker - mem: 16 - cores: 8 - -pcgr: - runner: condor_docker - mem: 16 - cores: 8 - env: - GALAXY_PCGR_DIR: "/data/db/databases/pcgr" - -RNAlien: {cores: 10} -slamdunk: {cores: 10} -tombo_resquiggle: {cores: 10, mem: 32} -tombo_detect_modifications: {cores: 10, mem: 32} -pangolin: {cores: 8} -pilon: - mem: 32 - env: - _JAVA_OPTIONS: -Xmx32G -Xms1G - -encyclopedia_prosit_csv_to_library: - mem: 40 - env: - _JAVA_OPTIONS: -Xmx40G -Xms1G - -vardict_java: - runner: condor_singularity_with_conda - mem: 128 - cores: 2 - env: - _JAVA_OPTIONS: -Xmx128G -Xms1G - VARDICT_CHUNKSIZE: 1000000 - -paralyzer: - mem: 8 - env: - _JAVA_OPTIONS: -Xmx8G -Xms1G - -cuffmerge: {mem: 8} -CruxAdapter: {mem: 8} -bio3d_pca: {mem: 64} -bio3d_rmsd: {mem: 90} -bio3d_rmsf: {mem: 90} -rxdock_sort_filter: {mem: 90} -rdock_sort_filter: {mem: 90} - -alphafold: - cores: 10 - mem: 32 - runner: condor_singularity - env: - ALPHAFOLD_DB: /data/db/databases/alphafold_databases - ALPHAFOLD_USE_GPU: "False" - -cds_essential_variability: - env: - COPERNICUS_CDSAPIRC_KEY_FILE: /data/db/data_managers/COPERNICUS_CDSAPIRC_KEY_FILE - -idr_download_by_ids: {mem: 24, runner: condor_singularity_with_conda} -kraken: {mem: 90} -kraken2: {mem: 64} -ont_fast5_api_compress_fast5: {cores: 8} -viz_overlay_moving_and_fixed_image: {mem: 12} -ip_projective_transformation: {mem: 24} -scale_image: {mem: 12} -re_he_maldi_image_registration: {mem: 48} -minimap2: {cores: 8, mem: 32} -winnowmap: {cores: 8, mem: 32} -flye: {cores: 20, mem: 60} -msstats: {mem: 24} -msstatstmt: {mem: 24} -xarray_select: {mem: 12} -datamash_transpose: {mem: 8} -dimspy_process_scans: - cores: 4 - mem: 12 - env: - NUMBA_CACHE_DIR: /data/2/galaxy_db/tmp - OMP_NUM_THREADS: 4 - OPENBLAS_NUM_THREADS: 4 - MKL_NUM_THREADS: 4 - VECLIB_MAXIMUM_THREADS: 4 - NUMEXPR_NUM_THREADS: 4 - NUMBA_NUM_THREADS: 4 - -dimspy_replicate_filter: - cores: 4 - mem: 12 - env: - NUMBA_CACHE_DIR: /data/2/galaxy_db/tmp - OMP_NUM_THREADS: 4 - OPENBLAS_NUM_THREADS: 4 - MKL_NUM_THREADS: 4 - VECLIB_MAXIMUM_THREADS: 4 - NUMEXPR_NUM_THREADS: 4 - NUMBA_NUM_THREADS: 4 - -dimspy_align_samples: - cores: 4 - mem: 12 - env: - NUMBA_CACHE_DIR: /data/2/galaxy_db/tmp - OMP_NUM_THREADS: 4 - OPENBLAS_NUM_THREADS: 4 - MKL_NUM_THREADS: 4 - VECLIB_MAXIMUM_THREADS: 4 - NUMEXPR_NUM_THREADS: 4 - NUMBA_NUM_THREADS: 4 - -dimspy_blank_filter: {mem: 12} -dimspy_sample_filter: {mem: 12} -dimspy_missing_values_sample_filter: {mem: 12} -drep_dereplicate: {cores: 6, mem: 12} -nanopolish_methylation: {cores: 5, mem: 12} -nanopolish_variants: {cores: 5, mem: 12} -nanopolish_eventalign: {cores: 5, mem: 12} -nanopolishcomp_eventaligncollapse: {cores: 10, mem: 12} -nanocompore_sampcomp: {cores: 9, mem: 48} -salsa: {cores: 1, mem: 12} -AccurateMassSearch: {cores: 4, mem: 8} -AdditiveSeries: {cores: 20, mem: 12} -# augustus: {runner: remote_cluster_mq_be01} -BaselineFilter: {cores: 4, mem: 8} -CONVERTER_bedgraph_to_bigwig: {mem: 8} -CVInspector: {cores: 4, mem: 8} -CompNovo: {cores: 4, mem: 8} -CompNovoCID: {cores: 4, mem: 8} -ConsensusID: {cores: 1, mem: 58} -ConsensusMapNormalizer: {cores: 4, mem: 8} -DeMeanderize: {cores: 4, mem: 8} -Decharger: {cores: 4, mem: 8} -DecoyDatabase: {cores: 4, mem: 8} -Digestor: {cores: 4, mem: 8} -DigestorMotif: {cores: 4, mem: 8} -EICExtractor: {cores: 4, mem: 8} -"EMBOSS: fuzztran39": {mem: 10} -ERPairFinder: {cores: 4, mem: 8} -FFEval: {cores: 4, mem: 8} -FalseDiscoveryRate: {cores: 4, mem: 8} -FeatureFinderCentroided: {cores: 4, mem: 8} -FeatureFinderIsotopeWavelet: {cores: 4, mem: 8} -FeatureFinderMRM: {cores: 4, mem: 8} -FeatureFinderMetabo: {cores: 4, mem: 8} -FeatureFinderMultiplex: {cores: 8, mem: 8} -FeatureFinderSuperHirn: {cores: 4, mem: 8} -FeatureLinkerLabeled: {cores: 4, mem: 8} -FeatureLinkerUnlabeled: {cores: 4, mem: 8} -FeatureLinkerUnlabeledQT: {cores: 4, mem: 8} -FidoAdapter: {cores: 8, mem: 8} -FileConverter: {cores: 4, mem: 8} -FileFilter: {cores: 4, mem: 8} -FileInfo: {cores: 4, mem: 8} -FileMerger: {cores: 4, mem: 16} -HighResPrecursorMassCorrector: {cores: 4, mem: 8} -IDConflictResolver: {cores: 4, mem: 8} -IDEvaluator: {cores: 4, mem: 8} -IDExtractor: {cores: 4, mem: 8} -IDFileConverter: {cores: 4, mem: 8} -IDFilter: {cores: 4, mem: 8} -IDMapper: {cores: 4, mem: 8} -IDMassAccuracy: {cores: 4, mem: 8} -IDMerger: {cores: 1, mem: 30} -IDPosteriorErrorProbability: {cores: 4, mem: 8} -IDRTCalibration: {cores: 4, mem: 8} -IDSplitter: {cores: 4, mem: 8} -ITRAQAnalyzer: {cores: 4, mem: 8} -ImageCreator: {cores: 4, mem: 8} -InclusionExclusionListCreator: {cores: 4, mem: 8} -InternalCalibration: {cores: 4, mem: 8} -IsobaricAnalyzer: {cores: 4, mem: 8} -LabeledEval: {cores: 4, mem: 8} -#maker: {cores: 8, mem: 8, runner: remote_cluster_mq_fr01} -maker: {cores: 12, mem: 12} -funannotate_predict: {cores: 12, mem: 12} -fastp: {cores: 4} -gmx_sim: {cores: 8, mem: 8, gpus: 1, runner: remote_cluster_mq_uk01} -gmx_em: {cores: 16, mem: 8} -gmx_fep: {cores: 16, mem: 8} -gsc_scran_normalize: {cores:1: mem:20} - -# gmx_md: {runner: remote_cluster_mq_de02} -# gmx_merge_topology_files: {runner: remote_cluster_mq_de02} -# gmx_em: {runner: remote_cluster_mq_de02} -# gmx_nvt: {runner: remote_cluster_mq_de02} -# gmx_npt: {runner: remote_cluster_mq_de02} -# gmx_setup: {runner: remote_cluster_mq_de02} -# gmx_solvate: {runner: remote_cluster_mq_de02} -# mdanalysis_hbonds: {runner: remote_cluster_mq_de02} -mdanalysis_extract_rmsd: {cores: 4, mem: 16} -MRMMapper: {cores: 4, mem: 8} -MRMPairFinder: {cores: 4, mem: 8} -msgfplus: - cores: 10 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp - mem: 12 - name: special -MSGFPlusAdapter: - cores: 10 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp - mem: 12 - name: special -medaka_variant: {mem: 48} -repeatmasker_wrapper: - cores: 8 - env: - RM_LIB_PATH: "/data/db/databases/dfam/3.4/" -repeatmodeler: {cores: 8} -mass_spectrometry_imaging_segmentations: {mem: 92} -mass_spectrometry_imaging_combine: {mem: 12} -mass_spectrometry_imaging_mzplots: {mem: 24} -msi_ion_images: {mem: 92} -msi_spectra_plot: {mem: 210} -MSSimulator: {cores: 4, mem: 8} -MapAlignerIdentification: {cores: 4, mem: 8} -MapAlignerPoseClustering: {cores: 4, mem: 8} -MapAlignerSpectrum: {cores: 4, mem: 8} -MapAlignmentEvaluation: {cores: 4, mem: 8} -MapNormalizer: {cores: 4, mem: 8} -MapRTTransformer: {cores: 4, mem: 8} -MapStatistics: {cores: 4, mem: 8} -MassCalculator: {cores: 4, mem: 8} -MassTraceExtractor: {cores: 4, mem: 8} -MyriMatchAdapter: {cores: 4, mem: 8} -MzTabExporter: {cores: 4, mem: 8} -QCCalculator: {mem: 8} -OpenSwathWorkflow: {mem: 156} -mira_assembler: {mem: 24} -meryl: {cores: 24, mem: 130} -mothur_align_check: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_align_seqs: {cores: 2, mem: 90, env: {TERM: vt100}} -mothur_amova: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_anosim: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_bin_seqs: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_chimera_bellerophon: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_chimera_ccode: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_chimera_check: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_chimera_perseus: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_chimera_pintail: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_chimera_slayer: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_chimera_uchime: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_chop_seqs: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_classify_otu: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_classify_rf: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_classify_seqs: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_classify_tree: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_clearcut: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_cluster: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_cluster_classic: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_cluster_fragments: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_cluster_split: {cores: 1, mem: 90, env: {TERM: vt100}} -mothur_collect_shared: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_collect_single: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_consensus_seqs: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_cooccurrence: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_corr_axes: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_count_groups: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_count_seqs: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_create_database: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_degap_seqs: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_deunique_seqs: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_deunique_tree: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_dist_seqs: - {cores: 2, mem: 20, env: {TERM: vt100}, runner: condor_singularity} -mothur_dist_shared: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_fastq_info: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_filter_seqs: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_filter_shared: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_get_communitytype: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_get_coremicrobiome: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_get_dists: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_get_group: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_get_groups: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_get_label: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_get_lineage: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_get_mimarkspackage: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_get_otulabels: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_get_otulist: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_get_oturep: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_get_otus: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_get_rabund: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_get_relabund: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_get_sabund: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_get_seqs: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_get_sharedseqs: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_hcluster: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_heatmap_bin: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_heatmap_sim: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_homova: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_indicator: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_lefse: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_libshuff: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_list_otulabels: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_list_seqs: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_make_biom: {cores: 1, mem: 90, env: {TERM: vt100}} -mothur_make_contigs: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_make_design: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_make_fastq: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_make_group: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_make_lefse: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_make_lookup: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_make_shared: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_make_sra: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_mantel: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_merge_files: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_merge_groups: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_merge_sfffiles: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_merge_taxsummary: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_metastats: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_mimarks_attributes: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_nmds: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_normalize_shared: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_otu_association: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_otu_hierarchy: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_pairwise_seqs: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_parse_list: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_parsimony: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_pca: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_pcoa: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_pcr_seqs: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_phylo_diversity: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_phylotype: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_pre_cluster: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_primer_design: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_rarefaction_shared: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_rarefaction_single: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_remove_dists: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_remove_groups: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_remove_lineage: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_remove_otulabels: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_remove_otus: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_remove_rare: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_remove_seqs: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_reverse_seqs: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_screen_seqs: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_sens_spec: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_seq_error: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_sffinfo: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_shhh_flows: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_shhh_seqs: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_sort_seqs: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_split_abund: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_split_groups: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_sub_sample: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_summary_qual: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_summary_seqs: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_summary_shared: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_summary_single: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_summary_tax: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_tree_shared: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_trim_flows: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_trim_seqs: {cores: 2, mem: 20, env: {TERM: vt100}} -mothur_unifrac_unweighted: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_unifrac_weighted: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_unique_seqs: {cores: 1, mem: 20, env: {TERM: vt100}} -mothur_venn: {cores: 1, mem: 20, env: {TERM: vt100}} -mz_to_sqlite: - mem: 16 - env: - _JAVA_OPTIONS: -Xmx16G -Xms1G -NSPDK_candidateClust: {mem: 32} -novoplasty: {mem: 40} -NoiseFilterGaussian: {cores: 4, mem: 8} -NoiseFilterSGolay: {cores: 4, mem: 8} -OMSSAAdapter: {cores: 4, mem: 8} -OpenSwathAnalyzer: {cores: 4, mem: 8} -OpenSwathChromatogramExtractor: {cores: 4, mem: 8} -OpenSwathConfidenceScoring: {cores: 4, mem: 8} -OpenSwathDIAPreScoring: {cores: 4, mem: 8} -OpenSwathDecoyGenerator: {cores: 4, mem: 8} -OpenSwathFeatureXMLToTSV: {cores: 4, mem: 8} -OpenSwathRTNormalizer: {cores: 4, mem: 8} -OpenSwathRewriteToFeatureXML: {cores: 4, mem: 8} -PTModel: {cores: 4, mem: 8} -PTPredict: {cores: 4, mem: 8} -PeakPickerHiRes: {cores: 4, mem: 8} -PeakPickerWavelet: {cores: 4, mem: 8} -PepNovoAdapter: {cores: 4, mem: 8} -PeptideIndexer: {cores: 4, mem: 8} -pepquery: - mem: 8 - env: - _JAVA_OPTIONS: -Xmx8G -Xms1G -PicardASMetrics: - mem: 12 - env: - _JAVA_OPTIONS: -Xmx12G -Xms1G -PicardGCBiasMetrics: - mem: 12 - env: - _JAVA_OPTIONS: -Xmx12G -Xms1G -PicardHsMetrics: - mem: 12 - env: - _JAVA_OPTIONS: -Xmx12G -Xms1G -PicardInsertSize: - mem: 12 - env: - _JAVA_OPTIONS: -Xmx12G -Xms1G -picard_CleanSam: - mem: 12 - env: - _JAVA_OPTIONS: -Xmx12G -Xms1G -AssayGeneratorMetabo: - mem: 12 - env: - _JAVA_OPTIONS: -Xmx12G -Xms1G -PrecursorIonSelector: {cores: 4, mem: 8} -PrecursorMassCorrector: {cores: 4, mem: 8} -ProteinInference: {cores: 4, mem: 8} -ProteinQuantifier: {cores: 4, mem: 8} -ProteinResolver: {cores: 4, mem: 8} -RNPxl: {cores: 4, mem: 8} -RTEvaluation: {cores: 4, mem: 8} -RTModel: {cores: 4, mem: 8} -RTPredict: {cores: 4, mem: 8} -Resampler: {cores: 4, mem: 8} -sopra_wpc: {cores: 4, mem: 16} -stacks2_denovomap: {cores: 4, mem: 16} -stacks_clonefilter: {mem: 8} -stacks2_clonefilter: {mem: 8} -SeedListGenerator: {cores: 4, mem: 8} -SemanticValidator: {cores: 4, mem: 8} -SequenceCoverageCalculator: {cores: 4, mem: 8} -SpecLibCreator: {cores: 4, mem: 8} -SpecLibSearcher: {cores: 4, mem: 8} -SpectraFilterBernNorm: {cores: 4, mem: 8} -SpectraFilterMarkerMower: {cores: 4, mem: 8} -SpectraFilterNLargest: {cores: 4, mem: 8} -SpectraFilterNormalizer: {cores: 4, mem: 8} -SpectraFilterParentPeakMower: {cores: 4, mem: 8} -SpectraFilterScaler: {cores: 4, mem: 8} -SpectraFilterSqrtMower: {cores: 4, mem: 8} -SpectraFilterThresholdMower: {cores: 4, mem: 8} -SpectraFilterWindowMower: {cores: 4, mem: 8} -SpectraMerger: {cores: 4, mem: 8} -TMTAnalyzer: {cores: 4, mem: 8} -TOFCalibration: {cores: 4, mem: 8} -TextExporter: {cores: 4, mem: 8} -TransformationEvaluation: {cores: 4, mem: 8} -XMLValidator: {cores: 4, mem: 8} -XTandemAdapter: {cores: 4, mem: 8} -racon: {cores: 16, mem: 36} -abims_xcms_fillPeaks: {mem: 32} -abims_xcms_retcor: {mem: 32} -abims_CAMERA_annotateDiffreport: {mem: 32} -abyss-pe: {cores: 20, mem: 70} -anndata_export: {cores: 4} -anndata_import: {mem: 200} -anndata_inspect: {cores: 4} -anndata_manipulate: {cores: 4} -antismash: - cores: 10 - mem: 90 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx96G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -bam-to-wig: {mem: 20} -bamCompare_deepTools: {mem: 10} -bamCorrelate_deepTools: {cores: 10, mem: 90} -bamCoverage_deepTools: {mem: 10} -bamFingerprint: {mem: 10} -bedtools_intersectbed: {mem: 40} -bedtools_intersectbed_bam: {mem: 40} -bedtools_mergebed: {mem: 15} -bedtools_sortbed: {mem: 20} -bedtools_subtractbed: {mem: 8} -bfast_wrapper: {cores: 10, mem: 20} -bg_diamond: {cores: 6, mem: 90} -bg_diamond_makedb: {cores: 10, mem: 40} -bgchem_fragment_merger: {cores: 10, mem: 40} -bigwig_to_bedgraph: {mem: 12} -biosigner: {mem: 12} -bismark_bowtie: {cores: 6, mem: 30, name: bismark, tmp: large} -bismark_bowtie2: {cores: 6, mem: 30, name: bismark, tmp: large} -bismark_methylation_extractor: {cores: 4, mem: 12} -blast2go: {mem: 20} -blast_parser: {mem: 8} -blockbuster: {mem: 64} -blockclust: {mem: 10} -bowtie2: {cores: 8, mem: 20} -bwa: {cores: 8, mem: 20} -bwa_mem: {cores: 8, mem: 30} -bwa_mem2: {cores: 8, mem: 30} -bwa_mem_index_builder_data_manager: {cores: 12, mem: 48} -bwameth_index_builder_data_manager: {cores: 12, mem: 92} -kallisto_index_builder_data_manager: {cores: 12, mem: 92} -salmon_index_builder_data_manager: {cores: 12, mem: 92} -data_manager_metaphlan_download: {cores: 12, mem: 92} -data_manager_funannotate: {cores: 1, mem: 64} -data_manager_build_kraken2_database: {cores: 1, mem: 64} -bwameth: {cores: 8, mem: 24} -bwtool-lift: {mem: 80} -cat_contigs: {mem: 24} ## Diamond in the background -cat_bins: {mem: 24} ## Diamond in the background -cherri_eval: {cores: 1, mem: 20} -cherri_train: {cores: 1, mem: 40} -chira_map: {cores: 10, mem: 80, runner: condor_singularity_with_conda} -chira_merge: {cores: 1, mem: 60, runner: condor_singularity_with_conda} -chira_quantify: {cores: 1, mem: 60, runner: condor_singularity_with_conda} -chira_extract: {cores: 24, mem: 100, runner: condor_singularity_with_conda} -canu: {cores: 20, mem: 92} -mass_spectrometry_imaging_preprocessing: {mem: 110} -mass_spectrometry_imaging_ion_images: {mem: 20} -mass_spectrometry_imaging_qc: {mem: 110} -mass_spectrometry_imaging_filtering: {mem: 20} -metaspades: {cores: 10, mem: 250} -megahit: {cores: 10, mem: 80} -megan_daa_meganizer: - mem: 40 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx40G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -megan_daa2rma: - mem: 40 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx40G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -megan_sam2rma: - mem: 40 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx40G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -maxbin2: {cores: 10, mem: 48} -metabat2: {cores: 10, mem: 48} -concoct: {cores: 10, mem: 48} -charts: {mem: 10} -circgraph: {mem: 10} -computeMatrix: {mem: 90} -correctGCBias: {mem: 10} -cshl_fastx_collapser: {mem: 16} -create_tool_recommendation_model: {cores: 16, mem: 160} -crispr_recognition_tool: {mem: 10} -ctb_np-likeness-calculator: {mem: 12} -ctb_online_data_fetch: {mem: 10} -ctb_openmg: - mem: 20 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx15G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -ctb_pubchem_download_as_smiles: {cores: 10, mem: 20} -cuffcompare: {mem: 10} -cuffdiff: {cores: 6, mem: 40} -cufflinks: {cores: 6, mem: 30} -cuffnorm: {cores: 6, mem: 20} -cuffquant: {cores: 6, mem: 20} -data_manager_gemini_download: {mem: 20} -data_manager_humann2_download: {mem: 25} -data_manager_snpeff_databases: {mem: 12} -data_manager_snpeff_download: {mem: 12} -deeptools_bamCompare: {cores: 10, mem: 12} -deeptools_bamCorrelate: {cores: 10, mem: 90} -deeptools_bamCoverage: {cores: 10, mem: 48} -deeptools_bamFingerprint: {cores: 10, mem: 12} -deeptools_bam_compare: {cores: 10, mem: 24} -deeptools_bam_coverage: {cores: 10, mem: 24} -deeptools_bam_pe_fragmentsize: {cores: 10, mem: 24} -deeptools_bigwigCompare: {cores: 10, mem: 12} -deeptools_bigwigCorrelate: {cores: 10, mem: 40} -deeptools_bigwig_compare: {cores: 10, mem: 24} -deeptools_computeGCBias: {cores: 10, mem: 24} -deeptools_computeMatrix: {cores: 10, mem: 40} -deeptools_compute_gc_bias: {cores: 10, mem: 24} -deeptools_compute_matrix: {cores: 10, mem: 30} -deeptools_correctGCBias: {cores: 10, mem: 24} -deeptools_correct_gc_bias: {cores: 10, mem: 24} -deeptools_heatmapper: {mem: 25} -deeptools_multi_bam_summary: {cores: 10, mem: 24} -deeptools_multi_bigwig_summary: {cores: 10, mem: 40} -deeptools_plot_correlation: {mem: 20} -deeptools_plot_coverage: {mem: 20} -deeptools_plot_fingerprint: {mem: 20} -deeptools_plot_heatmap: {mem: 25} -deeptools_plot_pca: {mem: 20} -deeptools_plot_profile: {mem: 20} -deeptools_profiler: {mem: 20} -deseq2: {mem: 8} -dexseq_count: {mem: 25} -dexseq: {cores: 8} -diamond_database_builder: {cores: 10, mem: 90} -dt_profiler: {mem: 10} -eggnog_mapper: {cores: 8, mem: 12} -eukaryotic_ncbi_submission: {cores: 24, mem: 24} -fastq_dump: {mem: 20} -fastqc: {cores: 8} -featurecounts: {cores: 8, mem: 18} -feebayes: {cores: 10, mem: 12} -flashlfq: - env: - MONO_GC_PARAMS: max-heap-size=90g - mem: 90 -flexbar: {cores: 10, mem: 12} -flexbar_no_split: {cores: 10, mem: 12} -flexbar_split_RR_bcs: {cores: 10, mem: 12} -flexbar_split_RYYR_bcs: {cores: 10, mem: 12} -freebayes: {cores: 10, mem: 90} -gatk2_base_recalibrator: - cores: 10 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx8G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 24 - name: _special -gatk2_depth_of_coverage: {cores: 10, mem: 24} -gatk2_haplotype_caller: {cores: 10, mem: 24} -gatk2_indel_realigner: {mem: 10} -gatk2_print_reads: {cores: 10, mem: 24} -gatk2_realigner_target_creator: {cores: 10, mem: 24} -gatk2_reduce_reads: - cores: 10 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx8G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 24 - name: _special -gatk2_unified_genotyper: {cores: 10, mem: 24} -gatk2_variant_annotator: {cores: 10, mem: 24} -gatk2_variant_apply_recalibration: {cores: 10, mem: 24} -gatk2_variant_combine: {cores: 10, mem: 24} -gatk2_variant_eval: {cores: 10, mem: 24} -gatk2_variant_filtration: {mem: 10} -gatk2_variant_recalibrator: {cores: 10, mem: 24} -gatk2_variant_select: {cores: 10, mem: 24} -gatk2_variant_validate: {cores: 10, mem: 24} -gatk_picard_index_builder: {mem: 12} -gemini_load: {cores: 10, mem: 40} - -gemini_inheritance: - env: - NUMBA_CACHE_DIR: /data/2/galaxy_db/tmp - OMP_NUM_THREADS: 4 - OPENBLAS_NUM_THREADS: 4 - MKL_NUM_THREADS: 4 - VECLIB_MAXIMUM_THREADS: 4 - NUMEXPR_NUM_THREADS: 4 - NUMBA_NUM_THREADS: 4 - -# Permissions -#gemini_query: -# permissions: -# allow: -# roles: -# - admin - -ggplot2_heatmap2: {mem: 24} -glassgo: {mem: 4, runner: condor_docker} -graphprot_predict_profile: {mem: 16} -# this tool was developed by IGC Bioinformatics Unit and Daniel Sobral from ELIXIR-PT -# goenrichment: {runner: remote_cluster_mq_pt01} -hammock_1.0: - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx15G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 20 - name: java_temp -heatmapper: {mem: 25} -heatmapper_deepTools: {mem: 25} -hicexplorer_hicbuildmatrix: {cores: 10, mem: 160} -hicexplorer_hiccorrectmatrix: {mem: 64} -hicexplorer_hiccorrelate: {mem: 20} -hicexplorer_hicfindtads: {mem: 20} -hicexplorer_hicplotmatrix: {mem: 64} -hicexplorer_hicplottads: {mem: 64} -hicexplorer_hicsummatrices: {mem: 64} -hicexplorer_hicpca: {cores: 10, mem: 150} -hicexplorer_hicmergematrixbins: {mem: 80} -hicexplorer_hictransform: {cores: 10, mem: 60} -hicexplorer_hicplotviewpoint: {mem: 12} -hicexplorer_hicaggregatecontacts: {mem: 12} -hicexplorer_chicaggregatestatistic: {cores: 20, mem: 60} -hicexplorer_chicdifferentialtest: {cores: 20, mem: 60} -hicexplorer_chicplotviewpoint: {cores: 20, mem: 60} -hicexplorer_chicqualitycontrol: {cores: 20, mem: 60} -hicexplorer_chicsignificantinteractions: {cores: 20, mem: 60} -hicexplorer_chicviewpoint: {cores: 20, mem: 60} -hicexplorer_chicviewpointbackgroundmodel: {cores: 20, mem: 60} - -schicexplorer_schicdemultiplex: {cores: 5, mem: 64} -schicexplorer_schicadjustmatrix: {cores: 5, mem: 64} -schicexplorer_schiccluster: {cores: 20, mem: 64} -schicexplorer_schiccorrectmatrices: {cores: 20, mem: 110} -schicexplorer_schiccreatebulkmatrix: {cores: 10, mem: 64} -schicexplorer_schicclustercomparments: {cores: 20, mem: 64} -schicexplorer_schicclusterminhash: {cores: 20, mem: 240} -schicexplorer_schicclustersvl: {cores: 20, mem: 20} -schicexplorer_schicconsensusmatrices: {cores: 20, mem: 64} -schicexplorer_schicinfo: {cores: 1, mem: 8} -schicexplorer_schicmergematrixbins: {cores: 20, mem: 64} -schicexplorer_schicmergetoscool: {cores: 20, mem: 64} -schicexplorer_schicnormalize: {cores: 20, mem: 64} -schicexplorer_schicplotclusterprofiles: {cores: 20, mem: 64} -schicexplorer_schicplotconsensusmatrices: {cores: 10, mem: 32} -schicexplorer_schicqualitycontrol: {cores: 20, mem: 64} - -gubbins: {cores: 6} -hicup_mapper: {cores: 6, mem: 24} -hifiasm: {cores: 20, mem: 120} -hisat: {cores: 10, mem: 20} - -# It seems that we are running out of temp space according to error reports from 21.03.2021 -# [E::hts_open_format] Failed to open file "/var/lib/condor/execute/dir_2954912.0000.bam" : No such file or directory -# FATAL: Unable to handle docker://quay.io/biocontainers/mulled-v2-b570fc8a7b25c6a733660cda7e105007b53ac501:26fa90e5bca490ccf5ba8db752fa36fd5834ef00-0 uri: while building SIF from layers: packer failed to pack: while unpacking tmpfs: error unpacking rootfs: unpack layer: unpack entry: usr/local/bin/hisat2-build-l: unpack to regular file: short write: write /var/lib/condor/execute/dir_2147833/build-temp-943357231/rootfs/usr/local/bin/hisat2-build-l: no space left on device -#hisat2: {cores: 8, mem: 20} -hisat2: {cores: 8, mem: 20} - -hisat2_index_builder_data_manager: {cores: 10, mem: 180} -hmmer_hmmsearch: {mem: 10} -htseq_count: {mem: 32} -humann2: {cores: 6, mem: 90} -humann: {cores: 6, mem: 90} - -hyphy_busted: {cores: 20, mem: 10} -hyphy_bgm: {cores: 20, mem: 10} -hyphy_gard: {cores: 20, mem: 10} -hyphy_absrel: {cores: 20, mem: 10} -hyphy_fubar: {cores: 20, mem: 10} -hyphy_relax: {cores: 20, mem: 10} -hyphy_fade: {cores: 20, mem: 10} -hyphy_sm19: {cores: 20, mem: 10} -hyphy_meme: {cores: 20, mem: 10} -hyphy_slac: {cores: 20, mem: 10} -hyphy_fel: {cores: 20, mem: 10} -hyphy_prime: {cores: 20} -hypo: {cores: 8} - -infernal_cmbuild: {cores: 10, mem: 20} -infernal_cmsearch: {cores: 10, mem: 20} -interproscan: - #env: - # PATH: $PATH:/data/0/interproscan/interproscan-5.36-75.0/ - mem: 40 - cores: 6 -iqtree: {cores: 10} -iterative_map_pipeline: {mem: 60} -je_clip: {cores: 8} -je_demultiplex: {cores: 8, mem: 20} -je_demultiplex_illu: {cores: 8} -je_markdupes: {cores: 8} -join1: {mem: 18} -#jq: {runner: condor_singularity_with_conda} -kallisto_quant: {cores: 8, mem: 20} -## uha, with the reference.fa file from the history it is not working? -## kallisto_quant: {cores: 8, mem: 20_singularity} -lastz_wrapper_2: {mem: 8} -limma_voom: {mem: 4} -lofreq_call: {cores: 2, mem: 8} -lotus2: {cores: 4, mem: 8} -khmer_abundance_distribution_single: {cores: 8, mem: 8} -macs2_bdgdiff: {mem: 10} -macs2_callpeak: {mem: 15} -maldi_quant_preprocessing: {mem: 400} -maldi_quant_peak_detection: {mem: 180} -mass_spectrometry_imaging_classification: {mem: 90} -megablast_wrapper: {mem: 20} -meme_dreme: {mem: 16} -meme_fimo: {mem: 4} -merge_pcr_duplicates.py: {mem: 4} -methtools_calling: {cores: 10, mem: 40} -methtools_filter: {mem: 10} -methtools_plot: {cores: 10, mem: 20} -metilene: {cores: 10, mem: 20} -mimodd_varcall: {cores: 6} -minced: {mem: 10} -migmap: - cores: 10 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=6G -Xmx14G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 16 - name: java_temp -proteomics_moff: {mem: 20, cores: 6} -metagene_annotator: {mem: 16} -morpheus: - env: - MONO_GC_PARAMS: max-heap-size=2g - mem: 64 -msaboot: {mem: 6} -naive_variant_caller: {mem: 20} -ncbi_blastp_wrapper: - { - cores: 10, - mem: 40, - runner: condor_intensive_jobs, - force_destination_id: true, - } -ncbi_blastn_wrapper: {cores: 10, mem: 40} -ncbi_tblastn_wrapper: {cores: 10, mem: 40} -ncbi_blastx_wrapper: {cores: 10, mem: 40} -ncbi_makeblastdb: {mem: 20} -nspdk_sparse: {mem: 16} -numeric_clustering: {mem: 12} -peakachu: {mem: 16} -peptide_shaker: - cores: 10 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=4G -Xmx120G -Xms4G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 90 - name: _special -picard_ARRG: {mem: 12} -picard_AddOrReplaceReadGroups: {mem: 12} -picard_BamIndexStats: {mem: 12} -picard_CASM: {mem: 12} -picard_CollectInsertSizeMetrics: {mem: 12} -picard_CollectRnaSeqMetrics: {mem: 12} -picard_DownsampleSam: {mem: 12} -picard_EstimateLibraryComplexity: {mem: 12} -picard_NormalizeFasta: {mem: 12} -picard_FilterSamReads: - env: - TMP_DIR: $TMPDIR - mem: 12 - tmp: large -picard_FixMateInformation: {mem: 12} -picard_FastqToSam: {mem: 12} -picard_MarkDuplicates: - env: - _JAVA_OPTIONS: -Xmx12G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 12 - name: java_temp -picard_MergeSamFiles: {mem: 12} -picard_QualityScoreDistribution: {mem: 12} -picard_ReorderSam: {mem: 12} -picard_ReplaceSamHeader: {mem: 12} -picard_SamToFastq: {mem: 12} -picard_SortSam: - env: - _JAVA_OPTIONS: -Xmx4G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 10 - name: java_temp -picard_index_builder_data_manager: {mem: 12} -pileometh: {cores: 1, mem: 24} -piranha: {mem: 15} -plotly_regression_performance_plots: {mem: 8} -porechop: {mem: 40} -preproc: {mem: 10} -prokaryotic_ncbi_submission: {cores: 24, mem: 24} -prokka: - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx15G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 20 - cores: 8 -bbtools_callvariants: - mem: 15 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx15G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -proteomics_search_msgfplus_1: {mem: 10} -pyprophet_score: {mem: 400, cores: 1} -pureclip: {mem: 32, cores: 2} -qualimap_bamqc: {mem: 24} -qualimap_rnaseq: {mem: 12} -quality_metrics: {mem: 12} -r_correlation_matrix: {mem: 80} -rbc_mirdeep2_mapper: {cores: 10, mem: 20} -rcas: {cores: 4, mem: 16} -ribotaper_ribosome_profiling: {cores: 12, mem: 16} -reactome_pathwaymatcher: - mem: 20 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx17G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -rgEstLibComp: {mem: 12} -rgPicFixMate: {mem: 12} -rgPicardMarkDups: - mem: 12 - env: - _JAVA_OPTIONS: -Xmx12G -Xms1G -rm_spurious_events.py: {mem: 4} -rna_star: {cores: 10, mem: 50} -rna_starsolo: {cores: 4, mem: 40} -rna_star_index_builder_data_manager: - cores: 10 - mem: 300 - params: - local_slots: 6 -rnbeads: {mem: 20} -rsem_calculate_expression: {mem: 16} -rseqc_bam2wig: {cores: 8, mem: 16} -sailfish: {cores: 6, mem: 70} -salmon: {cores: 6, mem: 70} -scanpy_cluster_reduce_dimension: - cores: 4 - env: - NUMBA_CACHE_DIR: /data/2/galaxy_db/tmp - OMP_NUM_THREADS: 4 - OPENBLAS_NUM_THREADS: 4 - MKL_NUM_THREADS: 4 - VECLIB_MAXIMUM_THREADS: 4 - NUMEXPR_NUM_THREADS: 4 - NUMBA_NUM_THREADS: 4 - -scanpy_filter: - cores: 4 - env: - NUMBA_CACHE_DIR: /data/2/galaxy_db/tmp - OMP_NUM_THREADS: 4 - OPENBLAS_NUM_THREADS: 4 - MKL_NUM_THREADS: 4 - VECLIB_MAXIMUM_THREADS: 4 - NUMEXPR_NUM_THREADS: 4 - NUMBA_NUM_THREADS: 4 - -scanpy_inspect: - cores: 4 - env: - NUMBA_CACHE_DIR: /data/2/galaxy_db/tmp - OMP_NUM_THREADS: 4 - OPENBLAS_NUM_THREADS: 4 - MKL_NUM_THREADS: 4 - VECLIB_MAXIMUM_THREADS: 4 - NUMEXPR_NUM_THREADS: 4 - NUMBA_NUM_THREADS: 4 - -scanpy_normalize: - cores: 4 - env: - NUMBA_CACHE_DIR: /data/2/galaxy_db/tmp - OMP_NUM_THREADS: 4 - OPENBLAS_NUM_THREADS: 4 - MKL_NUM_THREADS: 4 - VECLIB_MAXIMUM_THREADS: 4 - NUMEXPR_NUM_THREADS: 4 - NUMBA_NUM_THREADS: 4 - -scanpy_remove_confounders: - cores: 4 - env: - NUMBA_CACHE_DIR: /data/2/galaxy_db/tmp - OMP_NUM_THREADS: 4 - OPENBLAS_NUM_THREADS: 4 - MKL_NUM_THREADS: 4 - VECLIB_MAXIMUM_THREADS: 4 - NUMEXPR_NUM_THREADS: 4 - NUMBA_NUM_THREADS: 4 - -scanpy_plot: - cores: 4 - env: - NUMBA_CACHE_DIR: /data/2/galaxy_db/tmp - OMP_NUM_THREADS: 4 - OPENBLAS_NUM_THREADS: 4 - MKL_NUM_THREADS: 4 - VECLIB_MAXIMUM_THREADS: 4 - NUMEXPR_NUM_THREADS: 4 - NUMBA_NUM_THREADS: 4 - -sam_merge2: - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx15G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp - mem: 32 -# samtools_stats: {runner: remote_cluster_mq_de01} -samtools_sort: {cores: 10, mem: 10} -search_gui: - cores: 10 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp - mem: 12 - name: special -secretbt2test: {cores: 10, mem: 20} -segemehl: {mem: 80} -seq_filter_by_mapping: {mem: 8} -SiriusAdapter: - mem: 16 - env: - _JAVA_OPTIONS: -Xmx16G -Xms1G -shovill: - cores: 4 - mem: 50 - env: - SHOVILL_RAM: 50 -signalp3: {mem: 10} -smooth_running_window: {mem: 32} -snippy: {mem: 12} -snpEff: {mem: 12} -snpEff_databases: {mem: 12} -snpEff_download: {mem: 12} -snpEff_get_chr_names: {mem: 12} -snpEff_build_gb: - mem: 48 - env: - _JAVA_OPTIONS: -Xmx48G -Xms1G -snpSift_annotate: {mem: 12} -snpSift_caseControl: {mem: 12} -snpSift_filter: {mem: 18} -snpSift_geneSets: {mem: 12} -snpSift_int: {mem: 12} -spades: {cores: 20, mem: 330} -spyboat: {cores: 8, mem: 4} -sshmm: {mem: 16} -structurefold: {mem: 12} -rnaspades: {cores: 10, mem: 250} -spades_biosyntheticspades: {cores: 10, mem: 450} -spades_coronaspades: {cores: 10, mem: 250} -spades_metaplasmidspades: {cores: 10, mem: 330} -spades_metaviralspades: {cores: 10, mem: 250} -spades_plasmidspades: {cores: 10, mem: 250} -spades_rnaviralspades: {cores: 10, mem: 250} -strelka_germline: {cores: 8, mem: 12} -strelka_somatic: {cores: 8, mem: 12} -stringtie: {mem: 25} -t_coffee: - mem: 90 - env: - DIR_4_TCOFFEE: $TMP - TMP_4_TCOFFEE: $TMP - CACHE_4_TCOFFEE: $TMP -tophat2: {cores: 10, mem: 90} -tp_easyjoin_tool: {mem: 12} -tp_multijoin_tool: {mem: 8} -tp_uniq_tool: {mem: 12} -trimmomatic: - cores: 6 - mem: 12 - name: special - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp -trinity: - cores: 4 - mem: 512 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx170G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp - name: special -umi_tools_group: {mem: 12} -umi_tools_dedup: {mem: 90} -umi_tools_extract: {mem: 4} -unicycler: - cores: 10 - env: - _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp - TERM: vt100 - mem: 90 - name: special -unzip: {runner: condor_singularity} -wig_to_bigWig: {mem: 10} -valet: {cores: 8, mem: 20} -varscan_somatic: {cores: 8, mem: 12} -varscan_mpileup: {mem: 12} -varscan_copynumber: {mem: 12} -velvetg: - cores: 4 - env: - OMP_NUM_THREADS: 4 - OMP_THREAD_LIMIT: 4 - mem: 32 - name: _velvetg -velveth: - cores: 4 - env: - OMP_NUM_THREADS: 4 - OMP_THREAD_LIMIT: 4 - mem: 16 - name: _velveth -vsearch_search: {mem: 80} -vsearch_alignment: {cores: 16, mem: 24} -yahs: {mem: 100} - -# Climate tool -cesm: {cores: 16, mem: 128} - -# Ecology tools -srs_preprocess_s2: {mem: 16} - -# Some admin tools -echo_main_env: - permissions: - allow: - roles: - - admin - -#tp_awk_tool: -# runner: condor_docker -# permissions: -# allow: -# roles: -# - admin - -#tp_replace_in_column: -# permissions: -# allow: -# roles: -# - admin - -# permissions testing. -_test_permissions_0: {} - -_test_permissions_1: - permissions: - allow: - users: - - b@example.com - -_test_permissions_2: - permissions: - allow: - users: - - a@example.com - roles: - - role-b diff --git a/files/galaxy/dynamic_rules/usegalaxy/total_perspective_vortex/destinations.yml b/files/galaxy/dynamic_rules/usegalaxy/total_perspective_vortex/destinations.yml deleted file mode 100644 index f07fbe08f..000000000 --- a/files/galaxy/dynamic_rules/usegalaxy/total_perspective_vortex/destinations.yml +++ /dev/null @@ -1,93 +0,0 @@ ---- -# ALL tags must be with dashes (-) instead of underscores (_) -destinations: - interactive_pulsar: - runner: pulsar_embedded - cores: 24 # arbritrary max - mem: 128 # arbritrary max - params: - tmp_dir: true - docker_enabled: true - docker_sudo: false - docker_net: bridge - docker_auto_rm: true - docker_set_user: "" - require_container: true - outputs_to_working_directory: false - container_monitor_result: callback - submit_requirements: 'GalaxyDockerHack == True && GalaxyGroup == "interactive"' - scheduling: - prefer: - - docker - - interactive - - embedded_pulsar_docker: - runner: pulsar_embedded - params: - tmp_dir: true - docker_enabled: true - #docker_volumes: $defaults - docker_sudo: false - docker_net: bridge - docker_auto_rm: true - docker_set_user: "" - require_container: true - outputs_to_working_directory: false - container_monitor_result: callback - submit_requirements: 'GalaxyDockerHack == True' - scheduling: - prefer: - - docker - - embedded-pulsar - - condor_singularity: - runner: condor - cores: 24 # arbritrary max - mem: 128 # arbritrary max - params: - singularity_enabled: true - singularity_volumes: "$_CONDOR_SCRATCH_DIR:rw,$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw,/data/db/:ro,/data/dnb01/galaxy_db/:ro,/data/dnb02/galaxy_db/:ro,/data/dnb03/galaxy_db/:ro,/data/dnb05/galaxy_db/:ro,/data/dnb06/galaxy_db/:rw,/data/dnb07/galaxy_db/:rw,/data/dp01/galaxy_db/:rw,/data/0/galaxy_db/:ro,/data/1/galaxy_db/:ro,/data/2/galaxy_db/:ro,/data/3/galaxy_db/:ro,/data/4/galaxy_db/:ro,/data/5/galaxy_import/galaxy_user_data/:ro,/data/6/galaxy_db/:ro,/data/7/galaxy_db/:ro,/usr/local/tools/:ro" - singularity_default_container_id: "/cvmfs/singularity.galaxyproject.org/all/centos:8.3.2011" - tmp_dir: true - requirements: 'GalaxyGroup == "compute"' - metadata_strategy: "extended" - scheduling: - prefer: - - singularity - - condor_tpv: - runner: condor - cores: 64 - mem: 1000 - scheduling: - accept: - - condor-tpv - - pulsar_mira_tpv: - cores: 2 - mem: 4 - scheduling: - require: - - mira-pulsar - - docker - - pulsar_it_tpv: - cores: 16 - mem: 31 - scheduling: - require: - - pulsar - - pulsar_be_tpv: - cores: 8 - mem: 15 - scheduling: - require: - - pulsar - - condor_singularity_with_conda_python2: - cores: 64 - mem: 1000 - scheduling: - require: - - condor-singularity-with-conda-python2 diff --git a/files/galaxy/dynamic_rules/usegalaxy/wig2bigwig.py b/files/galaxy/dynamic_rules/usegalaxy/wig2bigwig.py deleted file mode 100644 index a98d0c180..000000000 --- a/files/galaxy/dynamic_rules/usegalaxy/wig2bigwig.py +++ /dev/null @@ -1,19 +0,0 @@ -from galaxy.jobs import JobDestination -import os - -def wig_to_bigwig( job, tool ): - # wig_to_bigwig needs a lot of memory if the input file is big - inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] ) - inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] ) - wig_file = inp_data[ "input1" ].file_name - wig_file_size = os.path.getsize(wig_file) / (1024*1024.0) - - # according to http://genome.ucsc.edu/goldenpath/help/bigWig.html - # wig2bigwig uses a lot of memory; somewhere on the order of 1.5 times more memory than the uncompressed wiggle input file - required_memory = min(max(wig_file_size * 3.0, 16 * 1024), 250*1024) # our biggest memory node has 256GB memory - params = {} - # params["nativeSpecification"] = """ -q galaxy1.q,all.q -p -128 -l galaxy1_slots=1 -l h_vmem=%sM -v _JAVA_OPTIONS -v TEMP -v TMPDIR -v PATH -v PYTHONPATH -v LD_LIBRARY_PATH -v XAPPLRESDIR -v GDFONTPATH -v GNUPLOT_DEFAULT_GDFONT -v MPLCONFIGDIR -soft -l galaxy1_dedicated=1 """ % (required_memory) - params['request_memory'] = required_memory / 1024 - params['requirements'] = '(GalaxyGroup == "compute")' - - return JobDestination(id="wig_to_bigwig_job_destination", runner="condor", params=params) diff --git a/files/galaxy/gxadmin/gxadmin-local b/files/galaxy/gxadmin/gxadmin-local deleted file mode 100644 index c6220b256..000000000 --- a/files/galaxy/gxadmin/gxadmin-local +++ /dev/null @@ -1,8 +0,0 @@ -local_cu() { ## : Shows active users in last 10 minutes - handle_help "$@" <<-EOF - cu unique sorts the IP adresses from gunicorns log, using "GET /history/current_history_json" - and prints it in influx line format - EOF - - echo "active_users,timespan=last_10_min users=$(journalctl -u galaxy-gunicorn@*.service --since "10 minutes ago" | grep "/history/current_history_json" | awk "{print \$11}" | sort -u | wc -l)" -} diff --git a/files/galaxy/tpv/destinations.yml.j2 b/files/galaxy/tpv/destinations.yml.j2 new file mode 100644 index 000000000..81fa43c87 --- /dev/null +++ b/files/galaxy/tpv/destinations.yml.j2 @@ -0,0 +1,374 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +--- +# NOTE: Use dashes (-) exclusively for tags and underscores (_) exclusively for destinations. +# submit_request_cpus its called in pulsar and in plain condor only request_cpus +destinations: + ###################### + # BASIC DESTINATIONS # + ###################### + basic_docker_destination: + abstract: true + params: + docker_enabled: true + docker_sudo: false + docker_net: bridge + docker_auto_rm: true + docker_set_user: "" + # $defaults expands to "$galaxy_root:ro,$tool_directory:ro,$job_directory:ro,$working_directory:rw,$default_file_path:rw" + docker_volumes: "$_CONDOR_SCRATCH_DIR:rw, + $defaults, + {% for vol in dnb.values() %} + {% if vol.name not in ['db', 'dnb04', 'dnb-ds01', 'dnb-ds02', '5'] %} + {{ vol.path }}/galaxy_db/:{{ vol.docker_perm }}, + {% endif %} + {% endfor %} + {{ dnb.5.path }}/galaxy_import/galaxy_user_data/:{{ dnb.5.docker_perm }}, + {{ dnb.db.path }}/:{{ dnb.db.docker_perm }}, + {{ cvmfs.data.path }}:{{ cvmfs.data.docker_perm }}" + require_container: true + submit_request_cpus: "{cores}" + submit_request_memory: "{mem}G" + outputs_to_working_directory: false + container_monitor_result: callback + submit_requirements: "GalaxyDockerHack == True" + + basic_singularity_destination: + abstract: true + params: + submit_request_cpus: "{cores}" + submit_request_memory: "{mem}G" + singularity_enabled: true + singularity_volumes: "$_CONDOR_SCRATCH_DIR:rw, + $job_directory:rw, + $tool_directory:ro, + $job_directory/outputs:rw, + $working_directory:rw, + {% for vol in dnb.values() %}{% if vol.name not in ['5', 'dnb04', 'dnb-ds01', 'dnb-ds02', 'db'] %} + {{ vol.path }}/galaxy_db/:{{ vol.docker_perm }}, + {% endif %} + {% endfor %} + {{ dnb.5.path }}/galaxy_import/galaxy_user_data/:{{ dnb.5.docker_perm }}, + {{ dnb.db.path }}/:{{ dnb.db.docker_perm }}, + {{ tools.tools.path }}/:{{ tools.tools.docker_perm }}" + singularity_default_container_id: "{{ cvmfs.singularity.path }}/all/centos:8.3.2011" + + ################################ + # EMBEDDED PULSAR DESTINATIONS # + ################################ + + interactive_pulsar: + inherits: basic_docker_destination + runner: pulsar_embedded + max_accepted_cores: 24 + max_accepted_mem: 128 + min_accepted_gpus: 0 + max_accepted_gpus: 0 + scheduling: + accept: + - docker + require: + - interactive + + interactive_pulsar_gpu: + inherits: interactive_pulsar + min_accepted_gpus: 1 + max_accepted_gpus: 1 + env: + GPU_AVAILABLE: "1" + params: + requirements: 'GalaxyGroup == "compute_gpu"' + + interactive_pulsar_rstudio_poweruser: + inherits: interactive_pulsar + min_accepted_gpus: 0 + max_accepted_gpus: 0 + scheduling: + require: + - rstudio-poweruser + + embedded_pulsar_docker: + inherits: basic_docker_destination + runner: pulsar_embedded + max_accepted_cores: 24 + max_accepted_mem: 128 + min_accepted_gpus: 0 + max_accepted_gpus: 0 + scheduling: + require: + - docker + - embedded-pulsar + + embedded_pulsar_docker_gpu: + inherits: embedded_pulsar_docker + min_accepted_gpus: 1 + max_accepted_gpus: 1 + env: + GPU_AVAILABLE: "1" + params: + requirements: 'GalaxyGroup == "compute_gpu"' + + ####################### + # PULSAR DESTINATIONS # + ####################### + + pulsar_default: # use for remote Pulsar nodes and ALWAYS overwrite the runner. + abstract: true + runner: pulsar_embedded + env: + LC_ALL: C + SINGULARITY_CACHEDIR: /data/share/var/database/container_cache # On the NFS share on remote Pulsar side + params: + jobs_directory: /data/share/staging + transport: curl + remote_metadata: "false" + metadata_strategy: directory + default_file_action: remote_transfer + rewrite_parameters: "true" + persistence_directory: /data/share/persisted_data + outputs_to_working_directory: "false" + dependency_resolution: "none" + submit_request_cpus: "{cores}" + submit_request_memory: "{mem}" + docker_volumes: "$job_directory:rw, + $tool_directory:ro, + $job_directory/outputs:rw, + $working_directory:rw, + {{ cvmfs.data.path }}:{{ cvmfs.data.docker_perm }}" + singularity_volumes: "$job_directory:rw, + $tool_directory:ro, + $job_directory/outputs:rw, + $working_directory:rw, + {{ cvmfs.data.path }}:{{ cvmfs.data.docker_perm }}" + singularity_enabled: true + singularity_default_container_id: "{{ cvmfs.singularity.path }}/all/centos:8.3.2011" + scheduling: + accept: + - pulsar + - conda + - singularity + - docker + - condor-tpv + + pulsar_mira_tpv: + inherits: pulsar_default + runner: pulsar_mira_runner + max_accepted_cores: 8 + max_accepted_mem: 15 + min_accepted_gpus: 0 + max_accepted_gpus: 0 + scheduling: + require: + - mira-pulsar + + pulsar_sanjay_tpv: + inherits: pulsar_default + runner: pulsar_sanjay_runner + max_accepted_cores: 8 + max_accepted_mem: 15 + min_accepted_gpus: 0 + max_accepted_gpus: 0 + scheduling: + require: + - sanjay-pulsar + + pulsar_sk01_tpv: + inherits: pulsar_default + runner: pulsar_eu_sk01 + max_accepted_cores: 8 + max_accepted_mem: 16 + min_accepted_gpus: 0 + max_accepted_gpus: 0 + scheduling: + require: + - sk-pulsar + + pulsar_it_tpv: + inherits: pulsar_default + runner: pulsar_eu_it01 + max_accepted_cores: 16 + max_accepted_mem: 31 + min_accepted_gpus: 0 + max_accepted_gpus: 0 + scheduling: + require: + - it-pulsar + + pulsar_it02_tpv: + inherits: pulsar_default + runner: pulsar_eu_it02 + max_accepted_cores: 16 + max_accepted_mem: 31 + min_accepted_gpus: 0 + max_accepted_gpus: 0 + params: + singularity_volumes: '$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw,/cvmfs/data.galaxyproject.org:ro' + scheduling: + require: + - it02-pulsar + + pulsar_fr01_tpv: + runner: pulsar_eu_fr01 + inherits: pulsar_default + max_accepted_cores: 8 + max_accepted_mem: 63 + min_accepted_gpus: 0 + max_accepted_gpus: 0 + scheduling: + require: + - fr-pulsar + + pulsar_be_tpv: + inherits: pulsar_default + runner: pulsar_eu_be01 + max_accepted_cores: 8 + max_accepted_mem: 15 + min_accepted_gpus: 0 + max_accepted_gpus: 0 + scheduling: + require: + - be-pulsar + + pulsar_cz01_tpv: + inherits: pulsar_default + runner: pulsar_eu_cz01 + max_accepted_cores: 32 + max_accepted_mem: 128 + min_accepted_gpus: 0 + max_accepted_gpus: 0 + env: + LC_ALL: C + SINGULARITY_CACHEDIR: "/storage/praha5-elixir/home/galaxyeu/singularity" + SINGULARITY_TMPDIR: "/storage/praha5-elixir/home/galaxyeu/singularity/tmp" + TMPDIR: "$SCRATCHDIR" + TMP: "$SCRATCHDIR" + TEMP: "$SCRATCHDIR" + params: + jobs_directory: "/storage/praha5-elixir/home/galaxyeu/pulsar-eu/files/staging" + persistence_directory: "/storage/praha5-elixir/home/galaxyeu/pulsar-eu/files/persistent" + singularity_volumes: "$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw,/cvmfs/data.galaxyproject.org:ro,$SCRATCHDIR,/storage/praha5-elixir/home/galaxyeu:/home/galaxyeu,/cvmfs/data.galaxyproject.org/managed/:/data/db/data_managers/:ro" + scheduling: + require: + - cz-pulsar + + pulsar_cz02_tpv: + inherits: pulsar_default + runner: pulsar_eu_cz02 + max_accepted_cores: 32 + max_accepted_mem: 128 + min_accepted_gpus: 1 + max_accepted_gpus: 1 + env: + LC_ALL: C + SINGULARITY_CACHEDIR: "/storage/praha5-elixir/home/galaxyeu/singularity" + SINGULARITY_TMPDIR: "/storage/praha5-elixir/home/galaxyeu/singularity/tmp" + TMPDIR: "$SCRATCHDIR" + TMP: "$SCRATCHDIR" + TEMP: "$SCRATCHDIR" + ALPHAFOLD_DB: "/storage/brno11-elixir/projects/alphafold/alphafold.db-2.3.1" + GPU_AVAILABLE: "1" + ALPHAFOLD_USE_GPU: "True" + params: + jobs_directory: "/storage/praha5-elixir/home/galaxyeu/pulsar-eu/files/staging" + persistence_directory: "/storage/praha5-elixir/home/galaxyeu/pulsar-eu/files/persistent" + singularity_volumes: "$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw,/cvmfs/data.galaxyproject.org:ro,$SCRATCHDIR,/storage/praha5-elixir/home/galaxyeu:/home/galaxyeu,$ALPHAFOLD_DB:/data:ro" + singularity_run_extra_arguments: "--nv" + scheduling: + require: + - cz-pulsar + + ############################# + # LOCAL CONDOR DESTINATIONS # + ############################# + + condor_docker: + inherits: basic_docker_destination + runner: condor + max_accepted_cores: 36 + max_accepted_mem: 975 + min_accepted_gpus: 0 + max_accepted_gpus: 0 + scheduling: + require: + - docker + + condor_singularity: + inherits: basic_singularity_destination + runner: condor + max_accepted_cores: 24 + max_accepted_mem: 128 + min_accepted_gpus: 0 + max_accepted_gpus: 0 + params: + scheduling: + require: + - singularity + + # Generic destination for tools that don't get any params + # and no specified dependency resolution + condor_tpv: + runner: condor + max_accepted_cores: 64 + max_accepted_mem: 1000 + min_accepted_gpus: 0 + max_accepted_gpus: 0 + scheduling: + prefer: + - condor-tpv + + condor_singularity_with_conda: + inherits: basic_singularity_destination + runner: condor + max_accepted_cores: 64 + max_accepted_mem: 1000 + min_accepted_gpus: 0 + max_accepted_gpus: 0 + params: + container_override: + - type: singularity + shell: "/bin/bash" + resolve_dependencies: true + identifier: "{{ dnb.0.path }}/singularity_base_images/centos:8.3.2011" + scheduling: + require: + - singularity + - conda + + condor_upload: + runner: condor + max_accepted_cores: 20 + max_accepted_mem: 10 + min_accepted_gpus: 0 + max_accepted_gpus: 0 + params: + requirements: "GalaxyTraining == false" + rank: 'GalaxyGroup == "upload"' + scheduling: + require: + - upload + + condor_gpu: + runner: condor + max_accepted_cores: 14 + max_accepted_mem: 37 + min_accepted_gpus: 1 + max_accepted_gpus: 1 + env: + GPU_AVAILABLE: 1 + params: + requirements: 'GalaxyGroup == "compute_gpu"' + + condor_docker_gpu: + inherits: basic_docker_destination + # shorter than inheriting from condor_gpu + runner: condor + max_accepted_cores: 14 + max_accepted_mem: 37 + min_accepted_gpus: 1 + max_accepted_gpus: 1 + scheduling: + require: + - docker + env: + GPU_AVAILABLE: 1 + params: + requirements: 'GalaxyGroup == "compute_gpu"' diff --git a/files/galaxy/tpv/interactive_tools.yml b/files/galaxy/tpv/interactive_tools.yml new file mode 100644 index 000000000..012f42345 --- /dev/null +++ b/files/galaxy/tpv/interactive_tools.yml @@ -0,0 +1,209 @@ +--- +tools: + interactive_tool: + cores: 1 + mem: 4 + params: + docker_volumes: $defaults + container_monitor_result: callback + submit_requirements: 'GalaxyDockerHack == True && GalaxyGroup == "interactive"' + scheduling: + require: + - docker + - interactive + + interactive_tool_gpu: + cores: 1 + mem: 4 + params: + docker_volumes: $defaults + container_monitor_result: callback + submit_requirements: 'GalaxyDockerHack == True' + rules: + - if: user and 'gpu_access_validated' in [role.name for role in user.all_roles() if not role.deleted] + scheduling: + require: + - docker + - interactive + - if: | + not user or not any([ role for role in user.all_roles() if (role.name in ['gpu_access_validated'] and not role.deleted) ]) + fail: | + This tool has restricted access. Please request access by visiting https://usegalaxy.eu/gpu-request. + + + interactive_tool_divand: + inherits: interactive_tool + cores: 1 + mem: 4 + params: + docker_run_extra_arguments: " --user 999 " + interactive_tool_tadviewer: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_hdfview: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_pampa: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_metashrimps: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_odv: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_scoop3_argo: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_pavian: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_blobtoolkit: + inherits: interactive_tool + cores: 1 + mem: 4 + env: + TEMP: /data/1/galaxy_db/tmp + interactive_tool_mgnify_notebook: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_openrefine: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_audiolabeler: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_ethercalc: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_geoexplorer: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_radiant: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_higlass: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_phinch: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_neo4j: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_hicbrowser: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_cellxgene: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_bam_iobio: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_vcf_iobio: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_askomics: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_wilson: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_wallace: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_paraview: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_simtext_app: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_isee: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_metashark: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_rstudio: + inherits: interactive_tool + cores: 2 + mem: 8 + interactive_tool_pyiron: + inherits: interactive_tool + cores: 1 + mem: 8 + interactive_tool_guacamole_desktop: + inherits: interactive_tool + cores: 2 + mem: 8 + interactive_tool_panoply: + inherits: interactive_tool + cores: 1 + mem: 4 + interactive_tool_vrm_editor: + inherits: interactive_tool + cores: 1 + mem: 4 + + interactive_tool_jupyter_notebook: + inherits: interactive_tool + mem: 4 + env: + HOME: /home/jovyan + + interactive_tool_pangeo_notebook: + inherits: interactive_tool + mem: 4 + env: + HOME: /home/jovyan + + interactive_tool_climate_notebook: + inherits: interactive_tool + mem: 4 + env: + HOME: /home/jovyan + + interactive_tool_ml_jupyter_notebook: + inherits: interactive_tool_gpu + gpus: 1 + cores: 1 + mem: 8 + params: + docker_run_extra_arguments: " --gpus all" + scheduling: + require: + - docker + + #interactive_tool_jupyter_notebook: + # inherits: interactive_tool + # rules: + # - if: | + # not user or not 'trusted_for_ITs' in [role.name for role in user.all_roles() if not role.deleted] + # fail: "Interactive tool jupyter not available for user" # TODO: update placeholder text diff --git a/files/galaxy/tpv/roles.yml b/files/galaxy/tpv/roles.yml new file mode 100644 index 000000000..ca698221d --- /dev/null +++ b/files/galaxy/tpv/roles.yml @@ -0,0 +1,21 @@ +--- +roles: + dataplant*: + params: + object_store_id: "dataplant01" + storage-test*: + params: + object_store_id: "s3_netapp01" + + rstudio-poweruser*: + rules: + - id: rstudio_poweruser + if: | + 'interactive_tool_rstudio' in tool.id + scheduling: + require: + - docker + - interactive + - rstudio-poweruser + mem: 100 + cores: 2 diff --git a/files/galaxy/tpv/tool_defaults.yml b/files/galaxy/tpv/tool_defaults.yml new file mode 100644 index 000000000..8e12981d5 --- /dev/null +++ b/files/galaxy/tpv/tool_defaults.yml @@ -0,0 +1,49 @@ +--- +# ALL tags must be with dashes (-) instead of underscores (_) +global: + default_inherits: default +tools: + default: + cores: 1 + mem: cores * 3.8 + gpus: 0 + env: + GALAXY_MEMORY_MB: "{int(mem * 1024)}" # set 5/2023 might be moved to runner or tool wrappers, related to Galaxy issue 15952 + params: + metadata_strategy: 'extended' + tmp_dir: true + request_cpus: "{cores}" + request_memory: "{mem}G" + submit_request_gpus: "{gpus or 0}" + docker_memory: "{mem}G" + description: "{tool.id if not tool.id.count('/') == 5 else tool.id.split('/')[4]}" + scheduling: + reject: + - offline + rules: + - if: user is not None + execute: | + training_roles = [r.name for r in user.all_roles() if not r.deleted and "training" in r.name] + training_expr = " || ".join(['(GalaxyGroup == "%s")' % role for role in training_roles]) + training_labels = '"' + ", ".join(training_roles) + '"' + entity.params['requirements'] = '(GalaxyGroup == "compute") || (%s)' % training_expr if training_expr else '(GalaxyGroup == "compute")' + entity.params['+Group'] = training_labels + entity.params['accounting_group_user'] = str(user.id) + - id: remote_resources + if: user is not None + execute: | + from tpv.core.entities import TagSetManager + pulsar_tag = None + user_preferences = user.extra_preferences + for data_item in user_preferences: + if "distributed_compute|remote_resources" in data_item: + if user_preferences[data_item] != "None": + pulsar_tag = user_preferences[data_item] + new_tag = {'require':[pulsar_tag]} if pulsar_tag else {} + entity.tpv_tags = entity.tpv_tags.combine(TagSetManager.from_dict(new_tag)) + if pulsar_tag == 'cz-pulsar' and helpers.tool_version_gte(tool, '2.3.1') and 'alphafold2' in tool.id: + entity.gpus = 1 + + rank: | + final_destinations = helpers.weighted_random_sampling(candidate_destinations) + final_destinations diff --git a/files/galaxy/tpv/tools.yml b/files/galaxy/tpv/tools.yml new file mode 100644 index 000000000..0b85d16c2 --- /dev/null +++ b/files/galaxy/tpv/tools.yml @@ -0,0 +1,979 @@ +--- +# ALL tags must be with dashes (-) instead of underscores (_) +tools: + basic_gpu_resource_param_tool: + # Type of compute resource (CPU or GPU) for the tool depends on user's input from its wrapper. + # Default resource is CPU. + rules: + - id: resource_params_gpu + if: | + param_dict = job.get_param_values(app) + param_dict.get('__job_resource', {}).get('__job_resource__select') == 'yes' + gpus: int(job.get_param_values(app)['__job_resource']['gpu']) + + __DATA_FETCH__: + cores: 1 + mem: 3 + gpus: 0 + scheduling: + require: + - upload + env: + TEMP: /data/1/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/chemteam/gmx_sim/gmx_sim/.*: + inherits: basic_gpu_resource_param_tool + + toolshed.g2.bx.psu.edu/repos/bgruening/deeptools_.*/deeptools_.*/.*: + rules: + - id: deeptools_singularity + if: | + # versions without singularity container available + no_container = { + '2.0.1.0', + '2.1.0.0', + '2.2.2.0', # a container is available but it is broken + '2.2.3.0', + '2.5.0.0', + '3.0.1.0', + '3.0.2.0', + } + all(not helpers.tool_version_eq(tool, version) for version in no_container) + scheduling: + require: + - singularity + + toolshed.g2.bx.psu.edu/repos/bgruening/deeptools_bam_compare/deeptools_bam_compare/.*: + rules: + - id: deeptools_singularity_bam_compare + if: | + # versions working in conda but not in singularity + conda_only_versions = { + '2.4.1.0', + } + any(helpers.tool_version_eq(tool, version) for version in conda_only_versions) + # remove singularity tag + execute: | + from tpv.core.entities import TagSetManager, TagType + entity.tpv_tags = TagSetManager([ + tag for tag in entity.tpv_tags.tags + if not (tag.value == 'singularity' and tag.tag_type == TagType.REQUIRE) + ]) + + toolshed.g2.bx.psu.edu/repos/bgruening/deeptools_bam_coverage/deeptools_bam_coverage/.*: + rules: + - id: deeptools_singularity_bam_coverage + if: | + # versions working in conda but not in singularity + conda_only_versions = { + '2.4.1.0', + } + any(helpers.tool_version_eq(tool, version) for version in conda_only_versions) + # remove singularity tag + execute: | + from tpv.core.entities import TagSetManager, TagType + entity.tpv_tags = TagSetManager([ + tag for tag in entity.tpv_tags.tags + if not (tag.value == 'singularity' and tag.tag_type == TagType.REQUIRE) + ]) + + toolshed.g2.bx.psu.edu/repos/bgruening/deeptools_plot_coverage/deeptools_plot_coverage/.*: + rules: + - id: deeptools_singularity_plot_coverage + if: | + # versions working in conda but not in singularity + conda_only_versions = { + '2.4.1.0', + } + any(helpers.tool_version_eq(tool, version) for version in conda_only_versions) + # remove singularity tag + execute: | + from tpv.core.entities import TagSetManager, TagType + entity.tpv_tags = TagSetManager([ + tag for tag in entity.tpv_tags.tags + if not (tag.value == 'singularity' and tag.tag_type == TagType.REQUIRE) + ]) + + toolshed.g2.bx.psu.edu/repos/bgruening/deeptools_plot_profile/deeptools_plot_profile/.*: + rules: + - id: deeptools_singularity_plot_profile + if: | + # versions working in conda but not in singularity + conda_only_versions = { + '3.1.2.0.0', + } + any(helpers.tool_version_eq(tool, version) for version in conda_only_versions) + # remove singularity tag + execute: | + from tpv.core.entities import TagSetManager, TagType + entity.tpv_tags = TagSetManager([ + tag for tag in entity.tpv_tags.tags + if not (tag.value == 'singularity' and tag.tag_type == TagType.REQUIRE) + ]) + + toolshed.g2.bx.psu.edu/repos/bgruening/hifiasm/hifiasm/.*: + rules: + - id: hifiasm_memory + # The memory requirement of Hifiasm depends on a wrapper's input + if: | + parameters = {p.name: p.value for p in job.parameters} + parameters = tool.params_from_strings(parameters, app) + + advanced_options = parameters.get("advanced_options", dict()) + hg_size = advanced_options.get("hg_size", "") + + bool(hg_size) + mem: | + from math import ceil + + parameters = {p.name: p.value for p in job.parameters} + parameters = tool.params_from_strings(parameters, app) + + advanced_options = parameters.get("advanced_options", dict()) + + kcov_default = 36 + kcov = advanced_options.get("kcov", kcov_default) + + hg_size = advanced_options.get("hg_size", "") + + value = 0 + if hg_size: + conversion_factors = { + "k": 1000000, + "M": 1000, + "G": 1, + } + conversion_factors = { + key.lower(): value for key, value in conversion_factors.items() + } + suffix = hg_size[-1:].lower() + value = hg_size[:len(hg_size) - 1] + value = value.replace(",", ".") + value = float(value) + # compute hg size in Gb + value = value / conversion_factors[suffix.lower()] + value = ceil(value * (kcov * 2) * 1.75) + + # return the amount of memory needed + value + + toolshed.g2.bx.psu.edu/repos/bgruening/keras_train_and_eval/keras_train_and_eval/.*: + inherits: basic_gpu_resource_param_tool + + toolshed.g2.bx.psu.edu/repos/iuc/snippy/snippy/.*: + cores: 2 + scheduling: + require: + - condor-tpv + rules: + - if: input_size >= 0.015 + cores: 14 + + toolshed.g2.bx.psu.edu/repos/iuc/enasearch_search_data/enasearch_search_data/.*: + scheduling: + require: + - conda + - singularity + + toolshed.g2.bx.psu.edu/repos/galaxy-australia/hifiasm_meta/hifiasm_meta/.*: + cores: 8 + params: + singularity_enabled: True + + toolshed.g2.bx.psu.edu/repos/rnateam/dewseq/dewseq/.*: + cores: 2 + mem: 40 + scheduling: + prefer: + - condor-tpv + + toolshed.g2.bx.psu.edu/repos/iuc/fasta_stats/fasta-stats/.*: + rules: + - if: input_size >= 0.01 + cores: 3 + + toolshed.g2.bx.psu.edu/repos/rnateam/htseq-clip/htseq-clip/.*: + cores: 4 + mem: 16 + scheduling: + prefer: + - condor-tpv + + toolshed.g2.bx.psu.edu/repos/iuc/sleuth/sleuth/.*: + cores: 4 + mem: 16 + + toolshed.g2.bx.psu.edu/repos/iuc/rnaquast/rna_quast/.*: + cores: 12 + mem: 40 + + toolshed.g2.bx.psu.edu/repos/galaxy-australia/alphafold2/alphafold/.*: + cores: 10 + mem: 32 + rules: + - if: helpers.tool_version_eq(tool, '2.0.0+galaxy1') + # The version number of alphafold may not match the version number of the the tool in this case. The alphafold + # version number should in fact be newer than 2.1.0 and older than 2.1.1. Check the links below to verify this + # claim: + # - https://github.com/usegalaxy-au/tools-au/blob/fae57866fda74c85d405a2db03a82ebfdaed6070/tools/alphafold/docker/Dockerfile#L7 + # - https://github.com/usegalaxy-au/tools-au/tree/fae57866fda74c85d405a2db03a82ebfdaed6070/tools/alphafold/docker + # - https://github.com/usegalaxy-au/tools-au/commit/aa7c146a02df64fcaa8ef03a89a76012227f35d6 + # - https://github.com/deepmind/alphafold/blob/be37a41d6f83e4145bd4912cbe8bf6a24af80c29/setup.py#L21 + # However neither, 2.1.0 nor 2.1.1 are still supposed to be able to use the GPU during the relaxation step, yet + # there are CUDA errors in the tool tests when no GPU is available. Verify the GPU use claim below: + # - https://github.com/deepmind/alphafold/blob/be37a41d6f83e4145bd4912cbe8bf6a24af80c29/alphafold/relax/amber_minimize.py#L93 + # There seems indeed to have been a mishap when packaging this version of the tool. See the link below. + # - https://github.com/usegalaxy-au/tools-au/commit/06dd35df4064c0c3c1272957e46c0df59d24c7fe + # Regardless of how things came to be this way, this version of the tool needs a GPU, and the + # requirement cannot be disabled) because `ALPHAFOLD_USE_GPU` is not declared in alphafold.xml. Thus we set + # gpus to one. + gpus: 1 + - if: helpers.tool_version_eq(tool, '2.0.0+galaxy2') + # The same story as above applies to this tool version. + # - https://github.com/usegalaxy-au/tools-au/blob/78302ce1d79058f37b24c7b395de450f42631260/tools/alphafold/alphafold.xml#L52 + gpus: 1 + - if: helpers.tool_version_eq(tool, '2.1.2+galaxy0') + # This version of alphafold already allows to control whether the GPU should be used during the relaxation + # step, but the tool developers added `ALPHAFOLD_USE_GPU` in the next tool revision (2.1.2+galaxy1). GPU use + # is still mandatory. + # - https://github.com/usegalaxy-au/tools-au/commit/6352c107873cf824d83bfe06b368523624746de7 + gpus: 1 + - if: helpers.tool_version_lt(tool, '2.3') + params: + singularity_run_extra_arguments: "--env ALPHAFOLD_DB=/data/db/databases/alphafold_databases/2.2/,ALPHAFOLD_USE_GPU=False" + - if: helpers.tool_version_gte(tool, '2.3') and helpers.tool_version_lt(tool, '2.3.1+galaxy2') + params: + singularity_run_extra_arguments: "--env ALPHAFOLD_DB=/data/db/databases/alphafold_databases/2.3/,ALPHAFOLD_USE_GPU=False" + - if: helpers.tool_version_gte(tool, '2.3.1+galaxy2') + params: + singularity_run_extra_arguments: "--env ALPHAFOLD_DB=/data/db/databases/alphafold_databases,ALPHAFOLD_USE_GPU=False" + scheduling: + require: + - singularity + + + basic_docker_tool: + scheduling: + require: + - docker + toolshed.g2.bx.psu.edu/repos/bgruening/instagraal/instagraal/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/ecology/cb_ivr/cb_ivr/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/perssond/basic_illumination/basic_illumination/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/goeckslab/mesmer/mesmer/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/perssond/quantification/quantification/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/perssond/s3segmenter/s3segmenter/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/perssond/coreograph/unet_coreograph/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/perssond/unmicst/unmicst/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/perssond/ashlar/ashlar/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__alignment__mafft/qiime2__alignment__mafft/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__alignment__mafft_add/qiime2__alignment__mafft_add/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__alignment__mask/qiime2__alignment__mask/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__composition__add_pseudocount/qiime2__composition__add_pseudocount/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__composition__ancom/qiime2__composition__ancom/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__composition__ancombc/qiime2__composition__ancombc/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__composition__tabulate/qiime2__composition__tabulate/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__cutadapt__demux_paired/qiime2__cutadapt__demux_paired/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__cutadapt__demux_single/qiime2__cutadapt__demux_single/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__cutadapt__trim_paired/qiime2__cutadapt__trim_paired/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__cutadapt__trim_single/qiime2__cutadapt__trim_single/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__dada2__denoise_ccs/qiime2__dada2__denoise_ccs/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__dada2__denoise_paired/qiime2__dada2__denoise_paired/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__dada2__denoise_pyro/qiime2__dada2__denoise_pyro/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__dada2__denoise_single/qiime2__dada2__denoise_single/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__deblur__denoise_16S/qiime2__deblur__denoise_16S/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__deblur__denoise_other/qiime2__deblur__denoise_other/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__deblur__visualize_stats/qiime2__deblur__visualize_stats/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__demux__emp_paired/qiime2__demux__emp_paired/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__demux__emp_single/qiime2__demux__emp_single/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__demux__filter_samples/qiime2__demux__filter_samples/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__demux__subsample_paired/qiime2__demux__subsample_paired/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__demux__subsample_single/qiime2__demux__subsample_single/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__demux__summarize/qiime2__demux__summarize/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__adonis/qiime2__diversity__adonis/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__alpha/qiime2__diversity__alpha/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__alpha_correlation/qiime2__diversity__alpha_correlation/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__alpha_group_significance/qiime2__diversity__alpha_group_significance/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__alpha_phylogenetic/qiime2__diversity__alpha_phylogenetic/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__alpha_rarefaction/qiime2__diversity__alpha_rarefaction/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__beta/qiime2__diversity__beta/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__beta_correlation/qiime2__diversity__beta_correlation/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__beta_group_significance/qiime2__diversity__beta_group_significance/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__beta_phylogenetic/qiime2__diversity__beta_phylogenetic/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__beta_rarefaction/qiime2__diversity__beta_rarefaction/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__bioenv/qiime2__diversity__bioenv/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__core_metrics/qiime2__diversity__core_metrics/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__core_metrics_phylogenetic/qiime2__diversity__core_metrics_phylogenetic/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__filter_distance_matrix/qiime2__diversity__filter_distance_matrix/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__mantel/qiime2__diversity__mantel/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__pcoa/qiime2__diversity__pcoa/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__pcoa_biplot/qiime2__diversity__pcoa_biplot/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__procrustes_analysis/qiime2__diversity__procrustes_analysis/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__tsne/qiime2__diversity__tsne/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity__umap/qiime2__diversity__umap/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity_lib__alpha_passthrough/qiime2__diversity_lib__alpha_passthrough/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity_lib__beta_passthrough/qiime2__diversity_lib__beta_passthrough/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity_lib__beta_phylogenetic_meta_passthrough/qiime2__diversity_lib__beta_phylogenetic_meta_passthrough/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity_lib__beta_phylogenetic_passthrough/qiime2__diversity_lib__beta_phylogenetic_passthrough/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity_lib__bray_curtis/qiime2__diversity_lib__bray_curtis/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity_lib__faith_pd/qiime2__diversity_lib__faith_pd/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity_lib__jaccard/qiime2__diversity_lib__jaccard/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity_lib__observed_features/qiime2__diversity_lib__observed_features/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity_lib__pielou_evenness/qiime2__diversity_lib__pielou_evenness/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity_lib__shannon_entropy/qiime2__diversity_lib__shannon_entropy/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity_lib__unweighted_unifrac/qiime2__diversity_lib__unweighted_unifrac/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__diversity_lib__weighted_unifrac/qiime2__diversity_lib__weighted_unifrac/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__emperor__biplot/qiime2__emperor__biplot/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__emperor__plot/qiime2__emperor__plot/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__emperor__procrustes_plot/qiime2__emperor__procrustes_plot/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_classifier__blast/qiime2__feature_classifier__blast/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_classifier__classify_consensus_blast/qiime2__feature_classifier__classify_consensus_blast/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_classifier__classify_consensus_vsearch/qiime2__feature_classifier__classify_consensus_vsearch/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_classifier__classify_hybrid_vsearch_sklearn/qiime2__feature_classifier__classify_hybrid_vsearch_sklearn/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_classifier__classify_sklearn/qiime2__feature_classifier__classify_sklearn/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_classifier__extract_reads/qiime2__feature_classifier__extract_reads/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_classifier__find_consensus_annotation/qiime2__feature_classifier__find_consensus_annotation/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_classifier__fit_classifier_naive_bayes/qiime2__feature_classifier__fit_classifier_naive_bayes/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_classifier__fit_classifier_sklearn/qiime2__feature_classifier__fit_classifier_sklearn/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_classifier__vsearch_global/qiime2__feature_classifier__vsearch_global/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__core_features/qiime2__feature_table__core_features/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__filter_features/qiime2__feature_table__filter_features/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__filter_features_conditionally/qiime2__feature_table__filter_features_conditionally/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__filter_samples/qiime2__feature_table__filter_samples/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__filter_seqs/qiime2__feature_table__filter_seqs/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__group/qiime2__feature_table__group/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__heatmap/qiime2__feature_table__heatmap/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__merge/qiime2__feature_table__merge/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__merge_seqs/qiime2__feature_table__merge_seqs/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__merge_taxa/qiime2__feature_table__merge_taxa/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__presence_absence/qiime2__feature_table__presence_absence/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__rarefy/qiime2__feature_table__rarefy/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__relative_frequency/qiime2__feature_table__relative_frequency/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__rename_ids/qiime2__feature_table__rename_ids/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__subsample/qiime2__feature_table__subsample/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__summarize/qiime2__feature_table__summarize/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__tabulate_seqs/qiime2__feature_table__tabulate_seqs/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__feature_table__transpose/qiime2__feature_table__transpose/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__fragment_insertion__classify_otus_experimental/qiime2__fragment_insertion__classify_otus_experimental/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__fragment_insertion__filter_features/qiime2__fragment_insertion__filter_features/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__fragment_insertion__sepp/qiime2__fragment_insertion__sepp/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__gneiss__assign_ids/qiime2__gneiss__assign_ids/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__gneiss__correlation_clustering/qiime2__gneiss__correlation_clustering/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__gneiss__dendrogram_heatmap/qiime2__gneiss__dendrogram_heatmap/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__gneiss__gradient_clustering/qiime2__gneiss__gradient_clustering/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__gneiss__ilr_hierarchical/qiime2__gneiss__ilr_hierarchical/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__gneiss__ilr_phylogenetic/qiime2__gneiss__ilr_phylogenetic/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__gneiss__ilr_phylogenetic_differential/qiime2__gneiss__ilr_phylogenetic_differential/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__gneiss__ilr_phylogenetic_ordination/qiime2__gneiss__ilr_phylogenetic_ordination/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__longitudinal__anova/qiime2__longitudinal__anova/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__longitudinal__feature_volatility/qiime2__longitudinal__feature_volatility/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__longitudinal__first_differences/qiime2__longitudinal__first_differences/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__longitudinal__first_distances/qiime2__longitudinal__first_distances/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__longitudinal__linear_mixed_effects/qiime2__longitudinal__linear_mixed_effects/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__longitudinal__maturity_index/qiime2__longitudinal__maturity_index/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__longitudinal__nmit/qiime2__longitudinal__nmit/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__longitudinal__pairwise_differences/qiime2__longitudinal__pairwise_differences/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__longitudinal__pairwise_distances/qiime2__longitudinal__pairwise_distances/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__longitudinal__plot_feature_volatility/qiime2__longitudinal__plot_feature_volatility/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__longitudinal__volatility/qiime2__longitudinal__volatility/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__metadata__distance_matrix/qiime2__metadata__distance_matrix/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__metadata__shuffle_groups/qiime2__metadata__shuffle_groups/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__metadata__tabulate/qiime2__metadata__tabulate/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__phylogeny__align_to_tree_mafft_fasttree/qiime2__phylogeny__align_to_tree_mafft_fasttree/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__phylogeny__align_to_tree_mafft_iqtree/qiime2__phylogeny__align_to_tree_mafft_iqtree/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__phylogeny__align_to_tree_mafft_raxml/qiime2__phylogeny__align_to_tree_mafft_raxml/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__phylogeny__fasttree/qiime2__phylogeny__fasttree/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__phylogeny__filter_table/qiime2__phylogeny__filter_table/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__phylogeny__filter_tree/qiime2__phylogeny__filter_tree/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__phylogeny__iqtree/qiime2__phylogeny__iqtree/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__phylogeny__iqtree_ultrafast_bootstrap/qiime2__phylogeny__iqtree_ultrafast_bootstrap/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__phylogeny__midpoint_root/qiime2__phylogeny__midpoint_root/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__phylogeny__raxml/qiime2__phylogeny__raxml/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__phylogeny__raxml_rapid_bootstrap/qiime2__phylogeny__raxml_rapid_bootstrap/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__phylogeny__robinson_foulds/qiime2__phylogeny__robinson_foulds/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__quality_control__bowtie2_build/qiime2__quality_control__bowtie2_build/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__quality_control__evaluate_composition/qiime2__quality_control__evaluate_composition/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__quality_control__evaluate_seqs/qiime2__quality_control__evaluate_seqs/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__quality_control__evaluate_taxonomy/qiime2__quality_control__evaluate_taxonomy/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__quality_control__exclude_seqs/qiime2__quality_control__exclude_seqs/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__quality_control__filter_reads/qiime2__quality_control__filter_reads/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__quality_filter__q_score/qiime2__quality_filter__q_score/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__sample_classifier__classify_samples/qiime2__sample_classifier__classify_samples/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__sample_classifier__classify_samples_from_dist/qiime2__sample_classifier__classify_samples_from_dist/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__sample_classifier__classify_samples_ncv/qiime2__sample_classifier__classify_samples_ncv/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__sample_classifier__confusion_matrix/qiime2__sample_classifier__confusion_matrix/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__sample_classifier__fit_classifier/qiime2__sample_classifier__fit_classifier/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__sample_classifier__fit_regressor/qiime2__sample_classifier__fit_regressor/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__sample_classifier__heatmap/qiime2__sample_classifier__heatmap/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__sample_classifier__metatable/qiime2__sample_classifier__metatable/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__sample_classifier__predict_classification/qiime2__sample_classifier__predict_classification/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__sample_classifier__predict_regression/qiime2__sample_classifier__predict_regression/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__sample_classifier__regress_samples/qiime2__sample_classifier__regress_samples/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__sample_classifier__regress_samples_ncv/qiime2__sample_classifier__regress_samples_ncv/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__sample_classifier__scatterplot/qiime2__sample_classifier__scatterplot/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__sample_classifier__split_table/qiime2__sample_classifier__split_table/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__sample_classifier__summarize/qiime2__sample_classifier__summarize/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__taxa__barplot/qiime2__taxa__barplot/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__taxa__collapse/qiime2__taxa__collapse/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__taxa__filter_seqs/qiime2__taxa__filter_seqs/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__taxa__filter_table/qiime2__taxa__filter_table/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__vsearch__cluster_features_closed_reference/qiime2__vsearch__cluster_features_closed_reference/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__vsearch__cluster_features_de_novo/qiime2__vsearch__cluster_features_de_novo/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__vsearch__cluster_features_open_reference/qiime2__vsearch__cluster_features_open_reference/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__vsearch__dereplicate_sequences/qiime2__vsearch__dereplicate_sequences/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__vsearch__fastq_stats/qiime2__vsearch__fastq_stats/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__vsearch__merge_pairs/qiime2__vsearch__merge_pairs/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__vsearch__uchime_denovo/qiime2__vsearch__uchime_denovo/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2__vsearch__uchime_ref/qiime2__vsearch__uchime_ref/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2_core__tools__export/qiime2_core__tools__export/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/q2d2/qiime2_core__tools__import/qiime2_core__tools__import/.*: + inherits: basic_docker_tool + toolshed.g2.bx.psu.edu/repos/iuc/cherri_train/cherri_train/.*: + inherits: basic_docker_tool + cores: 10 + mem: 90 + + toolshed.g2.bx.psu.edu/repos/iuc/cherri_eval/cherri_eval/.*: + inherits: basic_docker_tool + cores: 1 + mem: 20 + + toolshed.g2.bx.psu.edu/repos/iuc/kraken2/kraken2/.*: + cores: 2 + mem: 70 + + toolshed.g2.bx.psu.edu/repos/nml/metaspades/metaspades/.*: + cores: 2 + scheduling: + accept: + - pulsar + - condor-tpv + rules: + - if: 0.05 <= input_size < 1 + cores: 8 + mem: 100 + - if: 1 <= input_size < 60 + cores: 16 + mem: 350 + - if: input_size >= 60 + fail: Too much data, please don't use Spades for this + + toolshed.g2.bx.psu.edu/repos/nml/spades/spades/.*: + cores: 2 + rules: + - if: 0.005 <= input_size < 1 + cores: 8 + mem: 100 + - if: 1 <= input_size < 2 + cores: 12 + mem: 200 + - if: 2 <= input_size < 20 + cores: 20 + mem: 350 + scheduling: + prefer: + - condor-tpv + - if: input_size >= 20 + fail: Too much data, please don't use this tool for this. + + toolshed.g2.bx.psu.edu/repos/galaxy-australia/smudgeplot/smudgeplot/.*: + cores: 8 + rules: + - if: input_size < 1 + mem: 15 + - if: 1 <= input_size < 5 + mem: 75 + - if: 5 <= input_size < 10 + cores: 12 + mem: 150 + - if: 10 <= input_size < 15 + cores: 12 + mem: 225 + - if: 15 <= input_size < 20 + cores: 12 + mem: 300 + - if: 20 <= input_size < 25 + mem: 375 + cores: 16 + - if: input_size >= 25 + fail: Too much data, please check if the input is correct. + + toolshed.g2.bx.psu.edu/repos/iuc/shovill/shovill/.*: + inherits: toolshed.g2.bx.psu.edu/repos/nml/spades/spades/.* + + toolshed.g2.bx.psu.edu/repos/iuc/spades_rnaviralspades/spades_rnaviralspades/.*: + inherits: toolshed.g2.bx.psu.edu/repos/nml/spades/spades/.* + + toolshed.g2.bx.psu.edu/repos/iuc/rnaspades/rnaspades/.*: + inherits: toolshed.g2.bx.psu.edu/repos/nml/spades/spades/.* + toolshed.g2.bx.psu.edu/repos/iuc/spades_plasmidspades/spades_plasmidspades/.*: + inherits: toolshed.g2.bx.psu.edu/repos/nml/spades/spades/.* + toolshed.g2.bx.psu.edu/repos/iuc/spades_metaviralspades/spades_metaviralspades/.*: + inherits: toolshed.g2.bx.psu.edu/repos/nml/spades/spades/.* + toolshed.g2.bx.psu.edu/repos/iuc/spades_metaplasmidspades/spades_metaplasmidspades/.*: + inherits: toolshed.g2.bx.psu.edu/repos/nml/spades/spades/.* + toolshed.g2.bx.psu.edu/repos/iuc/spades_coronaspades/spades_coronaspades/.*: + cores: 10 + mem: 8 + toolshed.g2.bx.psu.edu/repos/iuc/spades_biosyntheticspades/spades_biosyntheticspades/.*: + inherits: toolshed.g2.bx.psu.edu/repos/nml/spades/spades/.* + + # cactus suite + toolshed.g2.bx.psu.edu/repos/galaxy-australia/cactus_cactus/cactus_cactus/.*: + context: + test_cores: 4 + cores: 20 + mem: 256 + scheduling: + prefer: + - condor-tpv + params: + singularity_enabled: true + toolshed.g2.bx.psu.edu/repos/galaxy-australia/cactus_export/cactus_export/.*: + params: + singularity_enabled: true + + toolshed.g2.bx.psu.edu/repos/iuc/trinity/trinity/.*: + cores: 8 + scheduling: + prefer: + - condor-tpv + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx{int(mem)}G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp + rules: + - if: 0.1 <= input_size < 1 + cores: 20 + mem: 100 + - if: 1 <= input_size < 2 + cores: 30 + mem: 200 + - if: 2 <= input_size < 30 + cores: 60 + mem: 950 + - if: input_size >= 30 + fail: + Too much data, we cannot support such large Trinity assemblies with our + backend. Please use another server for your job. + + '.*mothur_.*': + cores: 1 + mem: 90 + params: + submit_requirements: 'GalaxyGroup == "compute_mothur"' + docker_run_extra_arguments: --pids-limit 10000 --ulimit fsize=1000000000 --env TERM=vt100 + docker_volumes: "$_CONDOR_SCRATCH_DIR:rw,$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw,/data/db/:ro,/data/dnb01/galaxy_db/:ro,/data/dnb02/galaxy_db/:ro,/data/dnb03/galaxy_db/:ro,/data/dnb05/galaxy_db/:ro,/data/dnb06/galaxy_db/:rw,/data/dnb07/galaxy_db/:rw,/data/dnb08/galaxy_db/:rw,/data/dnb-ds02/galaxy_db/:ro,/data/dp01/galaxy_db/:rw,/data/0/galaxy_db/:ro,/data/1/galaxy_db/:ro,/data/2/galaxy_db/:ro,/data/3/galaxy_db/:ro,/data/4/galaxy_db/:ro,/data/5/galaxy_import/galaxy_user_data/:ro,/data/6/galaxy_db/:ro,/data/7/galaxy_db/:ro,/usr/local/tools/:ro" + docker_default_container_id: centos:8.3.2011 + scheduling: + require: + - docker + # see https://github.com/galaxyproject/galaxy/issues/16121#issuecomment-1555153421 + ##- embedded-pulsar + + '.*mothur_classify_seqs.*': + cores: 2 + mem: 20 + params: + submit_requirements: 'GalaxyGroup == "compute_mothur"' + docker_run_extra_arguments: --pids-limit 10000 --ulimit fsize=1000000000 --env TERM=vt100 + docker_volumes: "$_CONDOR_SCRATCH_DIR:rw,$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw,/data/db/:ro,/data/dnb01/galaxy_db/:ro,/data/dnb02/galaxy_db/:ro,/data/dnb03/galaxy_db/:ro,/data/dnb05/galaxy_db/:ro,/data/dnb06/galaxy_db/:rw,/data/dnb07/galaxy_db/:rw,/data/dnb08/galaxy_db/:rw,/data/dnb-ds02/galaxy_db/:ro,/data/dp01/galaxy_db/:rw,/data/0/galaxy_db/:ro,/data/1/galaxy_db/:ro,/data/2/galaxy_db/:ro,/data/3/galaxy_db/:ro,/data/4/galaxy_db/:ro,/data/5/galaxy_import/galaxy_user_data/:ro,/data/6/galaxy_db/:ro,/data/7/galaxy_db/:ro,/usr/local/tools/:ro" + docker_default_container_id: centos:8.3.2011 + scheduling: + require: + - docker + # see https://github.com/galaxyproject/galaxy/issues/16121#issuecomment-1555153421 + ##- embedded-pulsar + + '.*bioext_bam2msa.*': + params: + docker_run_extra_arguments: --pids-limit 10000 --ulimit fsize=1000000000 --env TERM=vt100 + docker_volumes: "$_CONDOR_SCRATCH_DIR:rw,$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw,/data/db/:ro,/data/dnb01/galaxy_db/:ro,/data/dnb02/galaxy_db/:ro,/data/dnb03/galaxy_db/:ro,/data/dnb05/galaxy_db/:ro,/data/dnb06/galaxy_db/:rw,/data/dnb07/galaxy_db/:rw,,/data/dnb08/galaxy_db/:rw,/data/dnb-ds02/galaxy_db/:ro,/data/dp01/galaxy_db/:rw,/data/0/galaxy_db/:ro,/data/1/galaxy_db/:ro,/data/2/galaxy_db/:ro,/data/3/galaxy_db/:ro,/data/4/galaxy_db/:ro,/data/5/galaxy_import/galaxy_user_data/:ro,/data/6/galaxy_db/:ro,/data/7/galaxy_db/:ro,/usr/local/tools/:ro" + docker_default_container_id: centos:8.3.2011 + scheduling: + require: + - docker + - embedded-pulsar + + 'last_*': + params: + docker_run_extra_arguments: --pids-limit 10000 --ulimit fsize=1000000000 --env TERM=vt100 + docker_volumes: "$_CONDOR_SCRATCH_DIR:rw,$job_directory:rw,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw,/data/db/:ro,/data/dnb01/galaxy_db/:ro,/data/dnb02/galaxy_db/:ro,/data/dnb03/galaxy_db/:ro,/data/dnb05/galaxy_db/:ro,/data/dnb06/galaxy_db/:rw,/data/dnb07/galaxy_db/:rw,/data/dp01/galaxy_db/:rw,/data/0/galaxy_db/:ro,/data/1/galaxy_db/:ro,/data/2/galaxy_db/:ro,/data/3/galaxy_db/:ro,/data/4/galaxy_db/:ro,/data/5/galaxy_import/galaxy_user_data/:ro,/data/6/galaxy_db/:ro,/data/7/galaxy_db/:ro,/usr/local/tools/:ro" + docker_default_container_id: centos:8.3.2011 + scheduling: + require: + - docker + - embedded-pulsar + + toolshed.g2.bx.psu.edu/repos/bgruening/blobtoolkit/blobtoolkit/.*: + cores: 8 + mem: 20 + inherits: basic_docker_tool + params: + docker_run_extra_arguments: --user 999 + + # 4GB is enough for most of the runs as it seems + toolshed.g2.bx.psu.edu/repos/iuc/purge_dups/purge_dups/.*: + cores: 1 + mem: 6 + + toolshed.g2.bx.psu.edu/repos/devteam/picard/picard_MarkDuplicates/.*: + cores: 8 + mem: 20 + inherits: basic_docker_tool + params: + docker_run_extra_arguments: --user 999 + env: + _JAVA_OPTIONS: -Xmx{int(mem)}G -Xms1G + + toolshed.g2.bx.psu.edu/repos/bgruening/diamond/diamond/.*: + cores: 6 + mem: 90 + rules: + - if: input_size >= 30 + cores: 12 + toolshed.g2.bx.psu.edu/repos/bgruening/xchem_transfs_scoring/xchem_transfs_scoring/.*: + scheduling: + require: + - docker + toolshed.g2.bx.psu.edu/repos/bgruening/openduck_run_smd/openduck_run_smd/.*: + env: + docker_set_user: 1000 + docker_run_extra_arguments: '-e "OPENDUCK_GPU_PARAM=$OPENDUCK_GPU_PARAM" --gpus all' + scheduling: + require: + - docker + toolshed.g2.bx.psu.edu/repos/bgruening-util/stress_ng/stress_ng/.*: + scheduling: + require: + - singularity + - conda + toolshed.g2.bx.psu.edu/repos/galaxyp/maxquant/maxquant/.*: + scheduling: + require: + - singularity + toolshed.g2.bx.psu.edu/repos/iuc/lumpy_prep/lumpy_prep/.*: + scheduling: + require: + - singularity + - conda + # is there a way to avoid this + ".*pcgr.*": + mem: 16 + cores: 8 + env: + GALAXY_PCGR_DIR: "/data/db/databases/pcgr" + scheduling: + require: + - docker + toolshed.g2.bx.psu.edu/repos/iuc/vardict_java/vardict_java/.*: + scheduling: + require: + - singularity + - conda +# Not for Pulsar, or is the file copied? + toolshed.g2.bx.psu.edu/repos/climate/cds_essential_variability/cds_essential_variability/.*: + env: + COPERNICUS_CDSAPIRC_KEY_FILE: /data/db/data_managers/COPERNICUS_CDSAPIRC_KEY_FILE + toolshed.g2.bx.psu.edu/repos/iuc/idr_download_by_ids/idr_download_by_ids/.*: + scheduling: + require: + - singularity + - conda + toolshed.g2.bx.psu.edu/repos/imgteam/overlay_moving_and_fixed_image/ip_viz_overlay_moving_and_fixed_image/.*: + cores: 8 + basic_numba_tool: + env: + NUMBA_CACHE_DIR: /data/2/galaxy_db/tmp + OMP_NUM_THREADS: 4 + OPENBLAS_NUM_THREADS: 4 + MKL_NUM_THREADS: 4 + VECLIB_MAXIMUM_THREADS: 4 + NUMEXPR_NUM_THREADS: 4 + NUMBA_NUM_THREADS: 4 + toolshed.g2.bx.psu.edu/repos/computational-metabolomics/dimspy_process_scans/dimspy_process_scans/.*: + inherits: basic_numba_tool + toolshed.g2.bx.psu.edu/repos/computational-metabolomics/dimspy_replicate_filter/dimspy_replicate_filter/.*: + inherits: basic_numba_tool + toolshed.g2.bx.psu.edu/repos/computational-metabolomics/dimspy_align_samples/dimspy_align_samples/.*: + inherits: basic_numba_tool + toolshed.g2.bx.psu.edu/repos/galaxyp/openms_msgfplusadapter/MSGFPlusAdapter/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp + toolshed.g2.bx.psu.edu/repos/iracooke/msgfplus/msgfplus/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp + toolshed.g2.bx.psu.edu/repos/bgruening/repeat_masker/repeatmasker_wrapper/.*: + rules: + - if: helpers.tool_version_lt(tool, '4.1.5') + env: + RM_LIB_PATH: "/data/db/databases/dfam/3.4/" + - if: helpers.tool_version_gte(tool, '4.1.5') + cores: 4 + toolshed.g2.bx.psu.edu/repos/galaxyp/reactome_pathwaymatcher/reactome_pathwaymatcher/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx17G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/iuc/bbtools_callvariants/bbtools_callvariants/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx15G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/crs4/prokka/prokka/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx15G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/devteam/picard/picard_SortSam/.*: + env: + _JAVA_OPTIONS: -Xmx4G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/peptide_shaker/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=4G -Xmx120G -Xms4G -Djava.io.tmpdir=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/iuc/migmap/migmap/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=6G -Xmx14G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/iuc/scanpy_cluster_reduce_dimension/scanpy_cluster_reduce_dimension/.*: + inherits: basic_numba_tool + + toolshed.g2.bx.psu.edu/repos/iuc/scanpy_filter/scanpy_filter/.*: + inherits: basic_numba_tool + + toolshed.g2.bx.psu.edu/repos/iuc/scanpy_inspect/scanpy_inspect/.*: + inherits: basic_numba_tool + + toolshed.g2.bx.psu.edu/repos/iuc/scanpy_normalize/scanpy_normalize/.*: + inherits: basic_numba_tool + + toolshed.g2.bx.psu.edu/repos/iuc/scanpy_remove_confounders/scanpy_remove_confounders/.*: + inherits: basic_numba_tool + + toolshed.g2.bx.psu.edu/repos/iuc/scanpy_plot/scanpy_plot/.*: + inherits: basic_numba_tool + + toolshed.g2.bx.psu.edu/repos/devteam/sam_merge/sam_merge2/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx15G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/search_gui/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/pjbriggs/trimmomatic/trimmomatic/.*: + env: + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/iuc/unicycler/unicycler/.*: + cores: 16 + mem: 90 + env: + TERM: vt100 + _JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp + + toolshed.g2.bx.psu.edu/repos/imgteam/unzip/unzip/.*: + scheduling: + require: + - singularity + + # Also on add_to_tpv_shared_db.yml but without NUMBA_CACHE_DIR + toolshed.g2.bx.psu.edu/repos/iuc/gemini_inheritance/gemini_inheritance/.*: + inherits: basic_numba_tool + + toolshed.g2.bx.psu.edu/repos/iuc/chira_map/chira_map/.*: + scheduling: + require: + - singularity + - conda + + toolshed.g2.bx.psu.edu/repos/iuc/chira_merge/chira_merge/.*: + scheduling: + require: + - singularity + - conda + + toolshed.g2.bx.psu.edu/repos/iuc/chira_quantify/chira_quantify/.*: + scheduling: + require: + - singularity + - conda + + toolshed.g2.bx.psu.edu/repos/iuc/chira_extract/chira_extract/.*: + scheduling: + require: + - singularity + - conda + + toolshed.g2.bx.psu.edu/repos/iuc/semibin/semibin/.*: + mem: 8 + + toolshed.g2.bx.psu.edu/repos/iuc/circos/circos/.*: + scheduling: + require: + - singularity diff --git a/files/galaxy/tpv/users.yml b/files/galaxy/tpv/users.yml new file mode 100644 index 000000000..fc9590444 --- /dev/null +++ b/files/galaxy/tpv/users.yml @@ -0,0 +1,3 @@ +--- +users: + kuntzm@informatik.uni-freiburg.de: diff --git a/files/gxadmin-local b/files/gxadmin-local deleted file mode 100644 index 674285dd2..000000000 --- a/files/gxadmin-local +++ /dev/null @@ -1,8 +0,0 @@ -local_cu() { ## : Shows active users in last 10 minutes - handle_help "$@" <<-EOFhelp - cu unique sorts the IP adresses from gunicorns log, using "GET /history/current_history_json" - and prints it in influx line format - EOFhelp - - echo "active_users,timespan=last_10_min users=$(journalctl -u galaxy-gunicorn@*.service --since '10 minutes ago' | grep '/history/current_history_json' | awk '{print $11}' | sort -u | wc -l)" -} diff --git a/files/welcome-sites/flying-bird-1.svg b/files/welcome-sites/flying-bird-1.svg new file mode 100644 index 000000000..f3e02e0a5 --- /dev/null +++ b/files/welcome-sites/flying-bird-1.svg @@ -0,0 +1,215 @@ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/files/welcome-sites/flying-bird-2.svg b/files/welcome-sites/flying-bird-2.svg new file mode 100644 index 000000000..76dc6b96b --- /dev/null +++ b/files/welcome-sites/flying-bird-2.svg @@ -0,0 +1,209 @@ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/galaxy-test.yml b/galaxy-test.yml index 6bcebe290..0b35a91a6 100644 --- a/galaxy-test.yml +++ b/galaxy-test.yml @@ -18,6 +18,8 @@ - secret_group_vars/oidc.yml # AAI private key - secret_group_vars/db-test.yml # DB URL + some postgres stuff - secret_group_vars/all.yml # All of the other assorted secrets... + - mounts/mountpoints.yml + - mounts/dest/all.yml handlers: - name: Restart Galaxy shell: | diff --git a/grafana.yml b/grafana.yml index 4a7ad954e..3c32d0f29 100644 --- a/grafana.yml +++ b/grafana.yml @@ -50,4 +50,5 @@ - cloudalchemy.grafana - pgs - hxr.grafana-gitter-bridge + - usegalaxy_eu.grafana_matrix_forwarder - dj-wasabi.telegraf diff --git a/group_vars/all.yml b/group_vars/all.yml index 531b92643..47008d132 100644 --- a/group_vars/all.yml +++ b/group_vars/all.yml @@ -23,6 +23,7 @@ nginx_selinux_allow_local_connections: true # Allow root login on all machines - TBD ssh_allow_root_with_key: true +redis_connection_string: "redis://:{{ redis_requirepass }}@mq.galaxyproject.eu:6379/0" # Chrony chrony_server: 'time.ruf.uni-freiburg.de iburst' chrony_acquisitionport: 0 @@ -32,6 +33,11 @@ chrony_keyfile: '/etc/chrony.keys' # Telegraf var dc: ufr-rz +# OS Hardening +os_auditd_max_log_file_action: rotate +os_auditd_space_left: 500 +os_auditd_space_left_action: suspend + # Telegraf telegraf_agent_package_state: latest telegraf_agent_output: @@ -174,12 +180,14 @@ autofs_conf_files: - gxtst -rw,hard,nosuid,nconnect=2 denbi.svm.bwsfs.uni-freiburg.de:/ws01/galaxy-sync/test gxkey: - gxkey -rw,hard,nosuid,nconnect=2 denbi.svm.bwsfs.uni-freiburg.de:/ws01/galaxy-sync/main + - galaxy-sync -rw,hard,nosuid,nconnect=2 denbi.svm.bwsfs.uni-freiburg.de:/galaxy-sync jwd: - jwd -rw,hard,nosuid,nconnect=2 denbi.svm.bwsfs.uni-freiburg.de:/ws01/& - jwd01 -rw,hard,nosuid zfs1.galaxyproject.eu:/export/& - jwd02f -rw,hard,nosuid zfs2f.galaxyproject.eu:/export/& - jwd03f -rw,hard,nosuid,nconnect=2 denbi.svm.bwsfs.uni-freiburg.de:/ws02/& - jwd04 -rw,hard,nosuid zfs3f.galaxyproject.eu:/export/& + - jwd05e -rw,hard,nosuid zfs3f.galaxyproject.eu:/export/& discontinued: - 0 -rw,hard,nosuid sn01.bi.uni-freiburg.de:/export/data3/galaxy/net/data/& - 1 -rw,hard,nosuid sn03.bi.uni-freiburg.de:/export/galaxy1/data/& @@ -198,4 +206,5 @@ autofs_conf_files: usrlocal: - /usr/local/tools -rw,hard,nosuid,nconnect=2 denbi.svm.bwsfs.uni-freiburg.de:/dnb01/tools usrlocal_celerycluster: + - /tmp -rw,hard,nosuid,nconnect=2 denbi.svm.bwsfs.uni-freiburg.de:/ws01/jwd/tmp - /opt/galaxy -rw,hard,nosuid,nconnect=2 denbi.svm.bwsfs.uni-freiburg.de:/ws01/galaxy-sync/main diff --git a/group_vars/celerycluster.yml b/group_vars/celerycluster.yml index 053e6bd5b..479368e1d 100644 --- a/group_vars/celerycluster.yml +++ b/group_vars/celerycluster.yml @@ -1,10 +1,17 @@ --- -# create_user task user_name: galaxy -user_uid: 999 -user_comment: Galaxy service user_group_name: galaxy -user_gid: 999 + +# create_user task +handy_groups: + - group_name: "{{ user_group_name }}" + group_gid: 999 + +handy_users: + - user_name: "{{ user_name }}" + user_uid: 999 + user_comment: Galaxy service + user_group: "{{ user_group_name }}" galaxy_user: name: "{{ user_name }}" diff --git a/group_vars/custom-sites.yml b/group_vars/custom-sites.yml index 3f2ea1799..6b1bb95b0 100644 --- a/group_vars/custom-sites.yml +++ b/group_vars/custom-sites.yml @@ -1,12 +1,12 @@ --- -galaxy_custom_site_base_css: '#masthead { background-color: #003399;}' +galaxy_custom_site_base_css: '#masthead { background-color: #003399 !important;}' galaxy_custom_sites: - name: assembly brand: Genome Assembly custom_css: | #masthead { - background: linear-gradient(20deg, rgb(7, 40, 98) 0%, rgb(69, 122, 184) 48%, rgba(165,204,210,0.9676562309265136) 74%, rgb(228, 195, 131) 92%, rgb(203, 119, 79) 100%); + background: linear-gradient(20deg, rgb(7, 40, 98) 0%, rgb(69, 122, 184) 48%, rgba(165,204,210,0.9676562309265136) 74%, rgb(228, 195, 131) 92%, rgb(203, 119, 79) 100%) !important; } @keyframes fly-right-one { 0% { @@ -84,7 +84,7 @@ galaxy_custom_sites: brand: Climate custom_css: | #masthead { - background: linear-gradient(to right,#2171b5 0.00%,#2171b5 0.83%,#c6dbef 0.83%,#c6dbef 1.65%,#9ecae1 1.65%,#9ecae1 2.48%,#fee0d2 2.48%,#fee0d2 3.31%,#6baed6 3.31%,#6baed6 4.13%,#9ecae1 4.13%,#9ecae1 4.96%,#08519c 4.96%,#08519c 5.79%,#08306b 5.79%,#08306b 6.61%,#2171b5 6.61%,#2171b5 7.44%,#4292c6 7.44%,#4292c6 8.26%,#9ecae1 8.26%,#9ecae1 9.09%,#2171b5 9.09%,#2171b5 9.92%,#4292c6 9.92%,#4292c6 10.74%,#9ecae1 10.74%,#9ecae1 11.57%,#deebf7 11.57%,#deebf7 12.40%,#9ecae1 12.40%,#9ecae1 13.22%,#deebf7 13.22%,#deebf7 14.05%,#4292c6 14.05%,#4292c6 14.88%,#08519c 14.88%,#08519c 15.70%,#deebf7 15.70%,#deebf7 16.53%,#9ecae1 16.53%,#9ecae1 17.36%,#c6dbef 17.36%,#c6dbef 18.18%,#6baed6 18.18%,#6baed6 19.01%,#4292c6 19.01%,#4292c6 19.83%,#2171b5 19.83%,#2171b5 20.66%,#deebf7 20.66%,#deebf7 21.49%,#fcbba1 21.49%,#fcbba1 22.31%,#9ecae1 22.31%,#9ecae1 23.14%,#deebf7 23.14%,#deebf7 23.97%,#9ecae1 23.97%,#9ecae1 24.79%,#deebf7 24.79%,#deebf7 25.62%,#4292c6 25.62%,#4292c6 26.45%,#deebf7 26.45%,#deebf7 27.27%,#2171b5 27.27%,#2171b5 28.10%,#fee0d2 28.10%,#fee0d2 28.93%,#fcbba1 28.93%,#fcbba1 29.75%,#08519c 29.75%,#08519c 30.58%,#9ecae1 30.58%,#9ecae1 31.40%,#4292c6 31.40%,#4292c6 32.23%,#c6dbef 32.23%,#c6dbef 33.06%,#fee0d2 33.06%,#fee0d2 33.88%,#9ecae1 33.88%,#9ecae1 34.71%,#c6dbef 34.71%,#c6dbef 35.54%,#2171b5 35.54%,#2171b5 36.36%,#fcbba1 36.36%,#fcbba1 37.19%,#4292c6 37.19%,#4292c6 38.02%,#c6dbef 38.02%,#c6dbef 38.84%,#4292c6 38.84%,#4292c6 39.67%,#ef3b2c 39.67%,#ef3b2c 40.50%,#deebf7 40.50%,#deebf7 41.32%,#fee0d2 41.32%,#fee0d2 42.15%,#c6dbef 42.15%,#c6dbef 42.98%,#08306b 42.98%,#08306b 43.80%,#08519c 43.80%,#08519c 44.63%,#2171b5 44.63%,#2171b5 45.45%,#fcbba1 45.45%,#fcbba1 46.28%,#c6dbef 46.28%,#c6dbef 47.11%,#fcbba1 47.11%,#fcbba1 47.93%,#deebf7 47.93%,#deebf7 48.76%,#fcbba1 48.76%,#fcbba1 49.59%,#fc9272 49.59%,#fc9272 50.41%,#fee0d2 50.41%,#fee0d2 51.24%,#9ecae1 51.24%,#9ecae1 52.07%,#fcbba1 52.07%,#fcbba1 52.89%,#6baed6 52.89%,#6baed6 53.72%,#4292c6 53.72%,#4292c6 54.55%,#08306b 54.55%,#08306b 55.37%,#fee0d2 55.37%,#fee0d2 56.20%,#c6dbef 56.20%,#c6dbef 57.02%,#fcbba1 57.02%,#fcbba1 57.85%,#deebf7 57.85%,#deebf7 58.68%,#fcbba1 58.68%,#fcbba1 59.50%,#08519c 59.50%,#08519c 60.33%,#9ecae1 60.33%,#9ecae1 61.16%,#4292c6 61.16%,#4292c6 61.98%,#deebf7 61.98%,#deebf7 62.81%,#fcbba1 62.81%,#fcbba1 63.64%,#9ecae1 63.64%,#9ecae1 64.46%,#6baed6 64.46%,#6baed6 65.29%,#deebf7 65.29%,#deebf7 66.12%,#6baed6 66.12%,#6baed6 66.94%,#c6dbef 66.94%,#c6dbef 67.77%,#fcbba1 67.77%,#fcbba1 68.60%,#deebf7 68.60%,#deebf7 69.42%,#fee0d2 69.42%,#fee0d2 70.25%,#6baed6 70.25%,#6baed6 71.07%,#4292c6 71.07%,#4292c6 71.90%,#c6dbef 71.90%,#c6dbef 72.73%,#fcbba1 72.73%,#fcbba1 73.55%,#9ecae1 73.55%,#9ecae1 74.38%,#2171b5 74.38%,#2171b5 75.21%,#9ecae1 75.21%,#9ecae1 76.03%,#2171b5 76.03%,#2171b5 76.86%,#fc9272 76.86%,#fc9272 77.69%,#ef3b2c 77.69%,#ef3b2c 78.51%,#c6dbef 78.51%,#c6dbef 79.34%,#fb6a4a 79.34%,#fb6a4a 80.17%,#deebf7 80.17%,#deebf7 80.99%,#cb181d 80.99%,#cb181d 81.82%,#fcbba1 81.82%,#fcbba1 82.64%,#08519c 82.64%,#08519c 83.47%,#fcbba1 83.47%,#fcbba1 84.30%,#fc9272 84.30%,#fc9272 85.12%,#ef3b2c 85.12%,#ef3b2c 85.95%,#cb181d 85.95%,#cb181d 86.78%,#fcbba1 86.78%,#fcbba1 87.60%,#ef3b2c 87.60%,#ef3b2c 88.43%,#fb6a4a 88.43%,#fb6a4a 89.26%,#fcbba1 89.26%,#fcbba1 90.08%,#ef3b2c 90.08%,#ef3b2c 90.91%,#cb181d 90.91%,#cb181d 91.74%,#ef3b2c 91.74%,#ef3b2c 92.56%,#fc9272 92.56%,#fc9272 93.39%,#6baed6 93.39%,#6baed6 94.21%,#ef3b2c 94.21%,#ef3b2c 95.04%,#fc9272 95.04%,#fc9272 95.87%,#fee0d2 95.87%,#fee0d2 96.69%,#67000d 96.69%,#67000d 97.52%,#cb181d 97.52%,#cb181d 98.35%,#ef3b2c 98.35%,#ef3b2c 99.17%,#67000d 99.17%,#67000d 100.00%); + background: linear-gradient(to right,#2171b5 0.00%,#2171b5 0.83%,#c6dbef 0.83%,#c6dbef 1.65%,#9ecae1 1.65%,#9ecae1 2.48%,#fee0d2 2.48%,#fee0d2 3.31%,#6baed6 3.31%,#6baed6 4.13%,#9ecae1 4.13%,#9ecae1 4.96%,#08519c 4.96%,#08519c 5.79%,#08306b 5.79%,#08306b 6.61%,#2171b5 6.61%,#2171b5 7.44%,#4292c6 7.44%,#4292c6 8.26%,#9ecae1 8.26%,#9ecae1 9.09%,#2171b5 9.09%,#2171b5 9.92%,#4292c6 9.92%,#4292c6 10.74%,#9ecae1 10.74%,#9ecae1 11.57%,#deebf7 11.57%,#deebf7 12.40%,#9ecae1 12.40%,#9ecae1 13.22%,#deebf7 13.22%,#deebf7 14.05%,#4292c6 14.05%,#4292c6 14.88%,#08519c 14.88%,#08519c 15.70%,#deebf7 15.70%,#deebf7 16.53%,#9ecae1 16.53%,#9ecae1 17.36%,#c6dbef 17.36%,#c6dbef 18.18%,#6baed6 18.18%,#6baed6 19.01%,#4292c6 19.01%,#4292c6 19.83%,#2171b5 19.83%,#2171b5 20.66%,#deebf7 20.66%,#deebf7 21.49%,#fcbba1 21.49%,#fcbba1 22.31%,#9ecae1 22.31%,#9ecae1 23.14%,#deebf7 23.14%,#deebf7 23.97%,#9ecae1 23.97%,#9ecae1 24.79%,#deebf7 24.79%,#deebf7 25.62%,#4292c6 25.62%,#4292c6 26.45%,#deebf7 26.45%,#deebf7 27.27%,#2171b5 27.27%,#2171b5 28.10%,#fee0d2 28.10%,#fee0d2 28.93%,#fcbba1 28.93%,#fcbba1 29.75%,#08519c 29.75%,#08519c 30.58%,#9ecae1 30.58%,#9ecae1 31.40%,#4292c6 31.40%,#4292c6 32.23%,#c6dbef 32.23%,#c6dbef 33.06%,#fee0d2 33.06%,#fee0d2 33.88%,#9ecae1 33.88%,#9ecae1 34.71%,#c6dbef 34.71%,#c6dbef 35.54%,#2171b5 35.54%,#2171b5 36.36%,#fcbba1 36.36%,#fcbba1 37.19%,#4292c6 37.19%,#4292c6 38.02%,#c6dbef 38.02%,#c6dbef 38.84%,#4292c6 38.84%,#4292c6 39.67%,#ef3b2c 39.67%,#ef3b2c 40.50%,#deebf7 40.50%,#deebf7 41.32%,#fee0d2 41.32%,#fee0d2 42.15%,#c6dbef 42.15%,#c6dbef 42.98%,#08306b 42.98%,#08306b 43.80%,#08519c 43.80%,#08519c 44.63%,#2171b5 44.63%,#2171b5 45.45%,#fcbba1 45.45%,#fcbba1 46.28%,#c6dbef 46.28%,#c6dbef 47.11%,#fcbba1 47.11%,#fcbba1 47.93%,#deebf7 47.93%,#deebf7 48.76%,#fcbba1 48.76%,#fcbba1 49.59%,#fc9272 49.59%,#fc9272 50.41%,#fee0d2 50.41%,#fee0d2 51.24%,#9ecae1 51.24%,#9ecae1 52.07%,#fcbba1 52.07%,#fcbba1 52.89%,#6baed6 52.89%,#6baed6 53.72%,#4292c6 53.72%,#4292c6 54.55%,#08306b 54.55%,#08306b 55.37%,#fee0d2 55.37%,#fee0d2 56.20%,#c6dbef 56.20%,#c6dbef 57.02%,#fcbba1 57.02%,#fcbba1 57.85%,#deebf7 57.85%,#deebf7 58.68%,#fcbba1 58.68%,#fcbba1 59.50%,#08519c 59.50%,#08519c 60.33%,#9ecae1 60.33%,#9ecae1 61.16%,#4292c6 61.16%,#4292c6 61.98%,#deebf7 61.98%,#deebf7 62.81%,#fcbba1 62.81%,#fcbba1 63.64%,#9ecae1 63.64%,#9ecae1 64.46%,#6baed6 64.46%,#6baed6 65.29%,#deebf7 65.29%,#deebf7 66.12%,#6baed6 66.12%,#6baed6 66.94%,#c6dbef 66.94%,#c6dbef 67.77%,#fcbba1 67.77%,#fcbba1 68.60%,#deebf7 68.60%,#deebf7 69.42%,#fee0d2 69.42%,#fee0d2 70.25%,#6baed6 70.25%,#6baed6 71.07%,#4292c6 71.07%,#4292c6 71.90%,#c6dbef 71.90%,#c6dbef 72.73%,#fcbba1 72.73%,#fcbba1 73.55%,#9ecae1 73.55%,#9ecae1 74.38%,#2171b5 74.38%,#2171b5 75.21%,#9ecae1 75.21%,#9ecae1 76.03%,#2171b5 76.03%,#2171b5 76.86%,#fc9272 76.86%,#fc9272 77.69%,#ef3b2c 77.69%,#ef3b2c 78.51%,#c6dbef 78.51%,#c6dbef 79.34%,#fb6a4a 79.34%,#fb6a4a 80.17%,#deebf7 80.17%,#deebf7 80.99%,#cb181d 80.99%,#cb181d 81.82%,#fcbba1 81.82%,#fcbba1 82.64%,#08519c 82.64%,#08519c 83.47%,#fcbba1 83.47%,#fcbba1 84.30%,#fc9272 84.30%,#fc9272 85.12%,#ef3b2c 85.12%,#ef3b2c 85.95%,#cb181d 85.95%,#cb181d 86.78%,#fcbba1 86.78%,#fcbba1 87.60%,#ef3b2c 87.60%,#ef3b2c 88.43%,#fb6a4a 88.43%,#fb6a4a 89.26%,#fcbba1 89.26%,#fcbba1 90.08%,#ef3b2c 90.08%,#ef3b2c 90.91%,#cb181d 90.91%,#cb181d 91.74%,#ef3b2c 91.74%,#ef3b2c 92.56%,#fc9272 92.56%,#fc9272 93.39%,#6baed6 93.39%,#6baed6 94.21%,#ef3b2c 94.21%,#ef3b2c 95.04%,#fc9272 95.04%,#fc9272 95.87%,#fee0d2 95.87%,#fee0d2 96.69%,#67000d 96.69%,#67000d 97.52%,#cb181d 97.52%,#cb181d 98.35%,#ef3b2c 98.35%,#ef3b2c 99.17%,#67000d 99.17%,#67000d 100.00%) !important; } #masthead .navbar-brand { background: #3337; @@ -106,8 +106,8 @@ galaxy_custom_sites: wallpaper: true custom_css: | #masthead { - background-image: url("/static/dist/hicexplorer.png"); - background-size: 340px; + background-image: url("/static/dist/hicexplorer.png") !important; + background-size: 340px !important; } #masthead .navbar-brand { background: #3337; @@ -129,8 +129,8 @@ galaxy_custom_sites: wallpaper: true custom_css: | #masthead { - background-image: url("/static/dist/annotation.png"); - background-size: 23%; + background-image: url("/static/dist/annotation.png") !important; + background-size: 23% !important; } #masthead .navbar-brand { background: #3337; @@ -163,7 +163,7 @@ galaxy_custom_sites: brand: Ecology custom_css: | #masthead { - background: linear-gradient(to right,#2171b5 0.00%,#2171b5 0.83%,#c6dbef 0.83%,#c6dbef 1.65%,#9ecae1 1.65%,#9ecae1 2.48%,#fee0d2 2.48%,#fee0d2 3.31%,#6baed6 3.31%,#6baed6 4.13%,#9ecae1 4.13%,#9ecae1 4.96%,#08519c 4.96%,#08519c 5.79%,#08306b 5.79%,#08306b 6.61%,#2171b5 6.61%,#2171b5 7.44%,#4292c6 7.44%,#4292c6 8.26%,#9ecae1 8.26%,#9ecae1 9.09%,#2171b5 9.09%,#2171b5 9.92%,#4292c6 9.92%,#4292c6 10.74%,#9ecae1 10.74%,#9ecae1 11.57%,#deebf7 11.57%,#deebf7 12.40%,#9ecae1 12.40%,#9ecae1 13.22%,#deebf7 13.22%,#deebf7 14.05%,#4292c6 14.05%,#4292c6 14.88%,#08519c 14.88%,#08519c 15.70%,#deebf7 15.70%,#deebf7 16.53%,#9ecae1 16.53%,#9ecae1 17.36%,#c6dbef 17.36%,#c6dbef 18.18%,#6baed6 18.18%,#6baed6 19.01%,#4292c6 19.01%,#4292c6 19.83%,#2171b5 19.83%,#2171b5 20.66%,#deebf7 20.66%,#deebf7 21.49%,#fcbba1 21.49%,#fcbba1 22.31%,#9ecae1 22.31%,#9ecae1 23.14%,#deebf7 23.14%,#deebf7 23.97%,#9ecae1 23.97%,#9ecae1 24.79%,#deebf7 24.79%,#deebf7 25.62%,#4292c6 25.62%,#4292c6 26.45%,#deebf7 26.45%,#deebf7 27.27%,#2171b5 27.27%,#2171b5 28.10%,#fee0d2 28.10%,#fee0d2 28.93%,#fcbba1 28.93%,#fcbba1 29.75%,#08519c 29.75%,#08519c 30.58%,#9ecae1 30.58%,#9ecae1 31.40%,#4292c6 31.40%,#4292c6 32.23%,#c6dbef 32.23%,#c6dbef 33.06%,#fee0d2 33.06%,#fee0d2 33.88%,#9ecae1 33.88%,#9ecae1 34.71%,#c6dbef 34.71%,#c6dbef 35.54%,#2171b5 35.54%,#2171b5 36.36%,#fcbba1 36.36%,#fcbba1 37.19%,#4292c6 37.19%,#4292c6 38.02%,#c6dbef 38.02%,#c6dbef 38.84%,#4292c6 38.84%,#4292c6 39.67%,#ef3b2c 39.67%,#ef3b2c 40.50%,#deebf7 40.50%,#deebf7 41.32%,#fee0d2 41.32%,#fee0d2 42.15%,#c6dbef 42.15%,#c6dbef 42.98%,#08306b 42.98%,#08306b 43.80%,#08519c 43.80%,#08519c 44.63%,#2171b5 44.63%,#2171b5 45.45%,#fcbba1 45.45%,#fcbba1 46.28%,#c6dbef 46.28%,#c6dbef 47.11%,#fcbba1 47.11%,#fcbba1 47.93%,#deebf7 47.93%,#deebf7 48.76%,#fcbba1 48.76%,#fcbba1 49.59%,#fc9272 49.59%,#fc9272 50.41%,#fee0d2 50.41%,#fee0d2 51.24%,#9ecae1 51.24%,#9ecae1 52.07%,#fcbba1 52.07%,#fcbba1 52.89%,#6baed6 52.89%,#6baed6 53.72%,#4292c6 53.72%,#4292c6 54.55%,#08306b 54.55%,#08306b 55.37%,#fee0d2 55.37%,#fee0d2 56.20%,#c6dbef 56.20%,#c6dbef 57.02%,#fcbba1 57.02%,#fcbba1 57.85%,#deebf7 57.85%,#deebf7 58.68%,#fcbba1 58.68%,#fcbba1 59.50%,#08519c 59.50%,#08519c 60.33%,#9ecae1 60.33%,#9ecae1 61.16%,#4292c6 61.16%,#4292c6 61.98%,#deebf7 61.98%,#deebf7 62.81%,#fcbba1 62.81%,#fcbba1 63.64%,#9ecae1 63.64%,#9ecae1 64.46%,#6baed6 64.46%,#6baed6 65.29%,#deebf7 65.29%,#deebf7 66.12%,#6baed6 66.12%,#6baed6 66.94%,#c6dbef 66.94%,#c6dbef 67.77%,#fcbba1 67.77%,#fcbba1 68.60%,#deebf7 68.60%,#deebf7 69.42%,#fee0d2 69.42%,#fee0d2 70.25%,#6baed6 70.25%,#6baed6 71.07%,#4292c6 71.07%,#4292c6 71.90%,#c6dbef 71.90%,#c6dbef 72.73%,#fcbba1 72.73%,#fcbba1 73.55%,#9ecae1 73.55%,#9ecae1 74.38%,#2171b5 74.38%,#2171b5 75.21%,#9ecae1 75.21%,#9ecae1 76.03%,#2171b5 76.03%,#2171b5 76.86%,#fc9272 76.86%,#fc9272 77.69%,#ef3b2c 77.69%,#ef3b2c 78.51%,#c6dbef 78.51%,#c6dbef 79.34%,#fb6a4a 79.34%,#fb6a4a 80.17%,#deebf7 80.17%,#deebf7 80.99%,#cb181d 80.99%,#cb181d 81.82%,#fcbba1 81.82%,#fcbba1 82.64%,#08519c 82.64%,#08519c 83.47%,#fcbba1 83.47%,#fcbba1 84.30%,#fc9272 84.30%,#fc9272 85.12%,#ef3b2c 85.12%,#ef3b2c 85.95%,#cb181d 85.95%,#cb181d 86.78%,#fcbba1 86.78%,#fcbba1 87.60%,#ef3b2c 87.60%,#ef3b2c 88.43%,#fb6a4a 88.43%,#fb6a4a 89.26%,#fcbba1 89.26%,#fcbba1 90.08%,#ef3b2c 90.08%,#ef3b2c 90.91%,#cb181d 90.91%,#cb181d 91.74%,#ef3b2c 91.74%,#ef3b2c 92.56%,#fc9272 92.56%,#fc9272 93.39%,#6baed6 93.39%,#6baed6 94.21%,#ef3b2c 94.21%,#ef3b2c 95.04%,#fc9272 95.04%,#fc9272 95.87%,#fee0d2 95.87%,#fee0d2 96.69%,#67000d 96.69%,#67000d 97.52%,#cb181d 97.52%,#cb181d 98.35%,#ef3b2c 98.35%,#ef3b2c 99.17%,#67000d 99.17%,#67000d 100.00%); + background: linear-gradient(to right,#2171b5 0.00%,#2171b5 0.83%,#c6dbef 0.83%,#c6dbef 1.65%,#9ecae1 1.65%,#9ecae1 2.48%,#fee0d2 2.48%,#fee0d2 3.31%,#6baed6 3.31%,#6baed6 4.13%,#9ecae1 4.13%,#9ecae1 4.96%,#08519c 4.96%,#08519c 5.79%,#08306b 5.79%,#08306b 6.61%,#2171b5 6.61%,#2171b5 7.44%,#4292c6 7.44%,#4292c6 8.26%,#9ecae1 8.26%,#9ecae1 9.09%,#2171b5 9.09%,#2171b5 9.92%,#4292c6 9.92%,#4292c6 10.74%,#9ecae1 10.74%,#9ecae1 11.57%,#deebf7 11.57%,#deebf7 12.40%,#9ecae1 12.40%,#9ecae1 13.22%,#deebf7 13.22%,#deebf7 14.05%,#4292c6 14.05%,#4292c6 14.88%,#08519c 14.88%,#08519c 15.70%,#deebf7 15.70%,#deebf7 16.53%,#9ecae1 16.53%,#9ecae1 17.36%,#c6dbef 17.36%,#c6dbef 18.18%,#6baed6 18.18%,#6baed6 19.01%,#4292c6 19.01%,#4292c6 19.83%,#2171b5 19.83%,#2171b5 20.66%,#deebf7 20.66%,#deebf7 21.49%,#fcbba1 21.49%,#fcbba1 22.31%,#9ecae1 22.31%,#9ecae1 23.14%,#deebf7 23.14%,#deebf7 23.97%,#9ecae1 23.97%,#9ecae1 24.79%,#deebf7 24.79%,#deebf7 25.62%,#4292c6 25.62%,#4292c6 26.45%,#deebf7 26.45%,#deebf7 27.27%,#2171b5 27.27%,#2171b5 28.10%,#fee0d2 28.10%,#fee0d2 28.93%,#fcbba1 28.93%,#fcbba1 29.75%,#08519c 29.75%,#08519c 30.58%,#9ecae1 30.58%,#9ecae1 31.40%,#4292c6 31.40%,#4292c6 32.23%,#c6dbef 32.23%,#c6dbef 33.06%,#fee0d2 33.06%,#fee0d2 33.88%,#9ecae1 33.88%,#9ecae1 34.71%,#c6dbef 34.71%,#c6dbef 35.54%,#2171b5 35.54%,#2171b5 36.36%,#fcbba1 36.36%,#fcbba1 37.19%,#4292c6 37.19%,#4292c6 38.02%,#c6dbef 38.02%,#c6dbef 38.84%,#4292c6 38.84%,#4292c6 39.67%,#ef3b2c 39.67%,#ef3b2c 40.50%,#deebf7 40.50%,#deebf7 41.32%,#fee0d2 41.32%,#fee0d2 42.15%,#c6dbef 42.15%,#c6dbef 42.98%,#08306b 42.98%,#08306b 43.80%,#08519c 43.80%,#08519c 44.63%,#2171b5 44.63%,#2171b5 45.45%,#fcbba1 45.45%,#fcbba1 46.28%,#c6dbef 46.28%,#c6dbef 47.11%,#fcbba1 47.11%,#fcbba1 47.93%,#deebf7 47.93%,#deebf7 48.76%,#fcbba1 48.76%,#fcbba1 49.59%,#fc9272 49.59%,#fc9272 50.41%,#fee0d2 50.41%,#fee0d2 51.24%,#9ecae1 51.24%,#9ecae1 52.07%,#fcbba1 52.07%,#fcbba1 52.89%,#6baed6 52.89%,#6baed6 53.72%,#4292c6 53.72%,#4292c6 54.55%,#08306b 54.55%,#08306b 55.37%,#fee0d2 55.37%,#fee0d2 56.20%,#c6dbef 56.20%,#c6dbef 57.02%,#fcbba1 57.02%,#fcbba1 57.85%,#deebf7 57.85%,#deebf7 58.68%,#fcbba1 58.68%,#fcbba1 59.50%,#08519c 59.50%,#08519c 60.33%,#9ecae1 60.33%,#9ecae1 61.16%,#4292c6 61.16%,#4292c6 61.98%,#deebf7 61.98%,#deebf7 62.81%,#fcbba1 62.81%,#fcbba1 63.64%,#9ecae1 63.64%,#9ecae1 64.46%,#6baed6 64.46%,#6baed6 65.29%,#deebf7 65.29%,#deebf7 66.12%,#6baed6 66.12%,#6baed6 66.94%,#c6dbef 66.94%,#c6dbef 67.77%,#fcbba1 67.77%,#fcbba1 68.60%,#deebf7 68.60%,#deebf7 69.42%,#fee0d2 69.42%,#fee0d2 70.25%,#6baed6 70.25%,#6baed6 71.07%,#4292c6 71.07%,#4292c6 71.90%,#c6dbef 71.90%,#c6dbef 72.73%,#fcbba1 72.73%,#fcbba1 73.55%,#9ecae1 73.55%,#9ecae1 74.38%,#2171b5 74.38%,#2171b5 75.21%,#9ecae1 75.21%,#9ecae1 76.03%,#2171b5 76.03%,#2171b5 76.86%,#fc9272 76.86%,#fc9272 77.69%,#ef3b2c 77.69%,#ef3b2c 78.51%,#c6dbef 78.51%,#c6dbef 79.34%,#fb6a4a 79.34%,#fb6a4a 80.17%,#deebf7 80.17%,#deebf7 80.99%,#cb181d 80.99%,#cb181d 81.82%,#fcbba1 81.82%,#fcbba1 82.64%,#08519c 82.64%,#08519c 83.47%,#fcbba1 83.47%,#fcbba1 84.30%,#fc9272 84.30%,#fc9272 85.12%,#ef3b2c 85.12%,#ef3b2c 85.95%,#cb181d 85.95%,#cb181d 86.78%,#fcbba1 86.78%,#fcbba1 87.60%,#ef3b2c 87.60%,#ef3b2c 88.43%,#fb6a4a 88.43%,#fb6a4a 89.26%,#fcbba1 89.26%,#fcbba1 90.08%,#ef3b2c 90.08%,#ef3b2c 90.91%,#cb181d 90.91%,#cb181d 91.74%,#ef3b2c 91.74%,#ef3b2c 92.56%,#fc9272 92.56%,#fc9272 93.39%,#6baed6 93.39%,#6baed6 94.21%,#ef3b2c 94.21%,#ef3b2c 95.04%,#fc9272 95.04%,#fc9272 95.87%,#fee0d2 95.87%,#fee0d2 96.69%,#67000d 96.69%,#67000d 97.52%,#cb181d 97.52%,#cb181d 98.35%,#ef3b2c 98.35%,#ef3b2c 99.17%,#67000d 99.17%,#67000d 100.00%) !important; } #masthead .navbar-brand { background: #3337; @@ -190,14 +190,14 @@ galaxy_custom_sites: brand: Imaging custom_css: | #masthead { - background: linear-gradient(90deg, rgb(0, 0, 0) 0%, rgb(0, 69, 227) 17%, rgb(9, 121, 33) 30%, rgb(180, 170, 15) 70%, rgb(193, 35, 0) 79%, rgb(0, 0, 0) 100%); + background: linear-gradient(90deg, rgb(0, 0, 0) 0%, rgb(0, 69, 227) 17%, rgb(9, 121, 33) 30%, rgb(180, 170, 15) 70%, rgb(193, 35, 0) 79%, rgb(0, 0, 0) 100%) !important; } - name: test brand: Testing domain: test.internal.usegalaxy.eu custom_css: | #masthead { - background: linear-gradient(to bottom,#e2453c 0,#e2453c 16%,#e07e39 16%,#e07e39 32%,#e5d667 32%,#e5d667 48%,#51b95b 48%,#51b95b 66%,#1e72b7 66%,#1e72b7 86%,#6f5ba7 86%) no-repeat; + background: linear-gradient(to bottom,#e2453c 0,#e2453c 16%,#e07e39 16%,#e07e39 32%,#e5d667 32%,#e5d667 48%,#51b95b 48%,#51b95b 66%,#1e72b7 66%,#1e72b7 86%,#6f5ba7 86%) no-repeat !important; } #masthead .navbar-brand { background: #3337; @@ -235,7 +235,7 @@ galaxy_custom_sites: left: 0px !important; } #masthead { - background: linear-gradient(33deg, rgba(131,58,180,1) 0%, rgba(253,29,29,1) 25%, rgba(252,176,69,1) 50%, rgba(253,29,29,1) 75%, rgba(131,58,180,1) 100%); + background: linear-gradient(33deg, rgba(131,58,180,1) 0%, rgba(253,29,29,1) 25%, rgba(252,176,69,1) 50%, rgba(253,29,29,1) 75%, rgba(131,58,180,1) 100%) !important; -webkit-animation: SuperLively 20s ease infinite; -moz-animation: SuperLively 20s ease infinite; animation: SuperLively 20s ease infinite; @@ -260,13 +260,13 @@ galaxy_custom_sites: brand: Plants custom_css: | #masthead { - background: linear-gradient(90deg, #1a5d1a 0%, #369c36 50%, rgb(29 95 28) 100%); + background: linear-gradient(90deg, #1a5d1a 0%, #369c36 50%, rgb(29 95 28) 100%) !important; } - name: virology brand: Virology custom_css: | #masthead { - background: linear-gradient(50deg, hsl(240deg 100% 20%) 0%, hsl(234deg 95% 23%) 13%, hsl(227deg 90% 26%) 26%, hsl(220deg 85% 29%) 38%, hsl(213deg 79% 32%) 52%, hsl(207deg 74% 35%) 66%, hsl(200deg 69% 38%) 82%, hsl(194deg 64% 42%) 100%); + background: linear-gradient(50deg, hsl(240deg 100% 20%) 0%, hsl(234deg 95% 23%) 13%, hsl(227deg 90% 26%) 26%, hsl(220deg 85% 29%) 38%, hsl(213deg 79% 32%) 52%, hsl(207deg 74% 35%) 66%, hsl(200deg 69% 38%) 82%, hsl(194deg 64% 42%) 100%) !important; } - name: lite brand: Lite @@ -282,3 +282,5 @@ galaxy_custom_sites: brand: Materials - name: phage brand: Phage + - name: cancer + brand: Cancer diff --git a/group_vars/galaxy-test.yml b/group_vars/galaxy-test.yml index 819a6cd44..fc7bb5561 100644 --- a/group_vars/galaxy-test.yml +++ b/group_vars/galaxy-test.yml @@ -72,7 +72,7 @@ telegraf_plugins_extra: galaxy_lastlog: plugin: "exec" config: - - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin uwsgi lastlog"] + - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin gunicorn lastlog"] - timeout = "15s" - data_format = "influx" - interval = "15s" @@ -289,19 +289,6 @@ galaxy_virtualenv_command: "/usr/bin/python3.8 -m venv" galaxy_nonrepro_tools: /data/dnb01/galaxy_db/test/custom-tools galaxy_nonrepro_commit: master -galaxy_dynamic_job_rules_src_dir: files/galaxy-test/dynamic_rules -galaxy_dynamic_job_rules_dir: "{{ galaxy_root }}/dynamic_rules" -galaxy_dynamic_job_rules: - - usegalaxy/joint_destinations.yaml - - usegalaxy/sorting_hat.py - - usegalaxy/sorting_hat.yaml - - usegalaxy/destination_specifications.yaml - - usegalaxy/blast_destinations.py - - usegalaxy/tool_destinations.yaml - - usegalaxy/dexseq.py - - usegalaxy/wig2bigwig.py - - readme.txt - # Custom override # Our galaxy_tool_dependency_dir is on NFS, and permissions are set in such a # way that they cannot be changed by the ansible connected user. @@ -424,6 +411,17 @@ galaxy_jobconf: amqp_consumer_timeout: 2.0 amqp_publish_retry: "true" amqp_publish_retry_max_retries: 60 + - id: pulsar_eu_fr01 + load: galaxy.jobs.runners.pulsar:PulsarMQJobRunner + params: + amqp_url: "pyamqp://galaxy_fr01:{{ rabbitmq_password_galaxy_fr01 }}@mq.galaxyproject.eu:5671//pulsar/galaxy_fr01?ssl=1" + galaxy_url: "https://test.usegalaxy.eu" + manager: test + amqp_acknowledge: "true" + amqp_ack_republish_time: 300 + amqp_consumer_timeout: 2.0 + amqp_publish_retry: "true" + amqp_publish_retry_max_retries: 60 - id: pulsar_eu_be01 load: galaxy.jobs.runners.pulsar:PulsarMQJobRunner params: diff --git a/group_vars/grafana.yml b/group_vars/grafana.yml index b646c605d..0bdd29e68 100644 --- a/group_vars/grafana.yml +++ b/group_vars/grafana.yml @@ -218,3 +218,5 @@ galaxy_nagios_urls: - name: phinch url: "https://usegalaxy.eu/phinch/index.html" code: 200 + +matrix_forwarder_resolve_mode: reaction diff --git a/group_vars/gxconfig.yml b/group_vars/gxconfig.yml index cb8ed2872..bc9d47961 100644 --- a/group_vars/gxconfig.yml +++ b/group_vars/gxconfig.yml @@ -19,6 +19,15 @@ base_app_main: &BASE_APP_MAIN # if running Galaxy from source or `/data` otherwise. data_dir: "{{ galaxy_mutable_data_dir }}" + # The directory containing custom templates for Galaxy, such as + # HTML/text email templates. Defaults to 'templates'. Default + # templates can be found in the Galaxy root under config/templates. + # These can be copied to if you wish to customize + # them. + # The value of this option will be resolved with respect to + # . + #templates_dir: templates + # Top level cache directory. Any other cache directories # (tool_cache_data_dir, template_cache_path, etc.) should be # subdirectories. @@ -27,10 +36,18 @@ base_app_main: &BASE_APP_MAIN #cache_dir: cache # By default, Galaxy uses a SQLite database at - # '/universe.sqlite'. You may use a SQLAlchemy connection + # '/universe.sqlite'. You may use a SQLAlchemy connection # string to specify an external database instead. # Sample default # 'sqlite:////universe.sqlite?isolation_level=IMMEDIATE' + # You may specify additional options that will be passed to the + # SQLAlchemy database engine by using the prefix + # "database_engine_option_". For some of these options, default values + # are provided (e.g. see database_engine_option_pool_size, etc.). + # The same applies to `install_database_connection`, for which you + # should use the "install_database_engine_option_" prefix. + # For more options, please check SQLAlchemy's documentation at + # https://docs.sqlalchemy.org/en/14/core/engines.html?highlight=create_engine#sqlalchemy.create_engine database_connection: "{{ galaxy_db_connection }}" # If the server logs errors about not having enough database pool @@ -52,7 +69,7 @@ base_app_main: &BASE_APP_MAIN # issues in the Galaxy process, leave the result on the server # instead. This option is only available for PostgreSQL and is highly # recommended. - # database_engine_option_server_side_cursors: "False" + #database_engine_option_server_side_cursors: false # Log all database transactions, can be useful for debugging and # performance profiling. Logging is done via Python's 'logging' @@ -217,14 +234,14 @@ base_app_main: &BASE_APP_MAIN # conda channels to enable by default # (https://conda.io/docs/user-guide/tasks/manage-channels.html) - conda_ensure_channels: "iuc,conda-forge,bioconda,defaults,bgruening" + conda_ensure_channels: "conda-forge,bioconda" # Use locally-built conda packages. #conda_use_local: false # Set to true to instruct Galaxy to look for and install missing tool # dependencies before each job runs. - conda_auto_install: "False" + conda_auto_install: false # Set to true to instruct Galaxy to install Conda from the web # automatically if it cannot find a local copy and conda_exec is not @@ -237,7 +254,7 @@ base_app_main: &BASE_APP_MAIN # creating hardlinks or symlinks. This will prevent problems with some # specific packages (perl, R), at the cost of extra disk space usage # and extra time spent copying packages. - conda_copy_dependencies: "True" + conda_copy_dependencies: true # Path to a file that provides a mapping from abstract packages to # concrete conda packages. See `config/local_conda_mapping.yml.sample` @@ -263,7 +280,7 @@ base_app_main: &BASE_APP_MAIN # This only affects tools where some requirements can be resolved but # not others, most modern best practice tools can use prebuilt # environments in the Conda directory. - use_cached_dependency_manager: "True" + use_cached_dependency_manager: true # By default the tool_dependency_cache_dir is the _cache directory of # the tool dependency directory. @@ -283,19 +300,6 @@ base_app_main: &BASE_APP_MAIN # . tool_sheds_config_file: "{{ galaxy_config_dir }}/tool_sheds_conf.xml" - # This option controls whether legacy datatypes are loaded from - # installed tool shed repositories. We're are in the process of - # disabling Tool Shed datatypes. This option with a default of true - # will be added in 22.01, we will disable the datatypes on the big - # public servers during that release. This option will be switched to - # False by default in 22.05 and this broken functionality will be - # removed all together during some future release. - load_tool_shed_datatypes: false - - # Enables user preferences and api endpoint for the beacon - # integration. - enable_beacon_integration: true - # Monitor the tools and tool directories listed in any tool config # file specified in tool_config_file option. If changes are found, # tools are automatically reloaded. Watchdog ( @@ -310,13 +314,13 @@ base_app_main: &BASE_APP_MAIN # Monitor dynamic job rules. If changes are found, rules are # automatically reloaded. Takes the same values as the 'watch_tools' # option. - watch_job_rules: "true" + watch_job_rules: false # Monitor a subset of options in the core configuration file (See # RELOADABLE_CONFIG_OPTIONS in lib/galaxy/config/__init__.py). If # changes are found, modified options are automatically reloaded. # Takes the same values as the 'watch_tools' option. - watch_core_config: "true" + watch_core_config: false # Monitor the interactive tours directory specified in the # 'tour_config_dir' option. If changes are found, modified tours are @@ -334,7 +338,7 @@ base_app_main: &BASE_APP_MAIN # external tooling. # The value of this option will be resolved with respect to # . - #short_term_storage_dir: short_term_web_storage + short_term_storage_dir: '/data/jwd04/short_term_web_storage' # Default duration before short term web storage files will be cleaned # up by Galaxy tasks (in seconds). The default duration is 1 day. @@ -363,7 +367,7 @@ base_app_main: &BASE_APP_MAIN # https://github.com/mulled. Container availability will vary by tool, # this option will only be used for job destinations with Docker or # Singularity enabled. - enable_mulled_containers: "True" + enable_mulled_containers: true # Container resolvers configuration. Set up a file describing # container resolvers to use when discovering containers for Galaxy. @@ -511,20 +515,14 @@ base_app_main: &BASE_APP_MAIN # an absolute path begin the path with '/'. This is a comma-separated # list. Add test/functional/webhooks to this list to include the demo # webhooks used to test the webhook framework. - webhooks_dir: "{{ galaxy_webhook_dir }}" + webhooks_dir: "config/plugins/webhooks,{{ galaxy_webhook_dir }}" # Each job is given a unique empty directory as its current working # directory. This option defines in what parent directory those # directories will be created. # The value of this option will be resolved with respect to # . - job_working_directory: /data/jwd01/main - - # If using a cluster, Galaxy will write job scripts and stdout/stderr - # to this directory. - # The value of this option will be resolved with respect to - # . - #cluster_files_directory: pbs + job_working_directory: /data/jwd04/main # Mako templates are compiled as needed and cached for reuse, this # directory is used for the cache @@ -538,7 +536,7 @@ base_app_main: &BASE_APP_MAIN # Number of checks to execute if check_job_script_integrity is # enabled. - check_job_script_integrity_count: 35 + #check_job_script_integrity_count: 35 # Time to sleep between checks if check_job_script_integrity is # enabled (in seconds). @@ -829,7 +827,7 @@ base_app_main: &BASE_APP_MAIN # applications, where a malicious party could provide a link that # appears to reference the Galaxy server, but contains a redirect to a # third-party server, tricking a Galaxy user to access said site. - enable_old_display_applications: "false" + enable_old_display_applications: false # This flag enables an AWS cost estimate for every job based on their # runtime matrices. CPU, RAM and runtime usage is mapped against AWS @@ -891,7 +889,8 @@ base_app_main: &BASE_APP_MAIN # Append "{brand}" text to the masthead. brand: Europe - # Display the "Galaxy" text in the masthead. + # This option has been deprecated, use the `logo_src` instead to + # change the default logo including the galaxy brand title. #display_galaxy_brand: true # Format string used when showing date and time information. The @@ -940,8 +939,7 @@ base_app_main: &BASE_APP_MAIN # file staging and Interactive Tool containers for communicating back # with Galaxy via the API. # If you plan to run Interactive Tools make sure the docker container - # can reach this URL. For more details see - # `job_conf.xml.interactivetools`. + # can reach this URL. galaxy_infrastructure_url: https://usegalaxy.eu # If the above URL cannot be determined ahead of time in dynamic @@ -961,7 +959,7 @@ base_app_main: &BASE_APP_MAIN #logo_url: / # The brand image source. - #logo_src: /static/favicon.png + #logo_src: /static/favicon.svg # The custom brand image source. #logo_src_secondary: null @@ -1123,7 +1121,7 @@ base_app_main: &BASE_APP_MAIN # robust to configure this externally, managing it in the same way # Galaxy itself is managed. If true, Galaxy will only launch the # proxy if it is actually going to be used (e.g. for Jupyter). - dynamic_proxy_manage: "False" + dynamic_proxy_manage: false # As of 16.04 Galaxy supports multiple proxy types. The original # NodeJS implementation, alongside a new Golang @@ -1185,14 +1183,35 @@ base_app_main: &BASE_APP_MAIN # "loggers" section does not appear in this configuration file. #auto_configure_logging: true - # Verbosity of console log messages. Acceptable values can be found + # Log destination, defaults to special value "stdout" that logs to + # standard output. If set to anything else, then it will be + # interpreted as a path that will be used as the log file, and logging + # to stdout will be disabled. + #log_destination: stdout + + # Size of log file at which size it will be rotated as per the + # documentation in + # https://docs.python.org/library/logging.handlers.html#logging.handlers.RotatingFileHandler + # If log_rotate_count is not also set, no log rotation will be + # performed. A value of 0 (the default) means no rotation. Size can be + # a number of bytes or a human-friendly representation like "100 MB" + # or "1G". + #log_rotate_size: '0' + + # Number of log file backups to keep, per the documentation in + # https://docs.python.org/library/logging.handlers.html#logging.handlers.RotatingFileHandler + # Any additional rotated log files will automatically be pruned. If + # log_rotate_size is not also set, no log rotation will be performed. + # A value of 0 (the default) means no rotation. + #log_rotate_count: 0 + + # Verbosity of console log messages. Acceptable values can be found # here: https://docs.python.org/library/logging.html#logging-levels A # custom debug level of "TRACE" is available for even more verbosity. #log_level: DEBUG - # Controls where and how the server logs messages. If unset, the - # default is to log all messages to standard output at the level - # defined by the `log_level` configuration option. Configuration is + # Controls where and how the server logs messages. If set, overrides + # all settings in the log_* configuration options. Configuration is # described in the documentation at: # https://docs.galaxyproject.org/en/master/admin/config_logging.html #logging: null @@ -1239,7 +1258,7 @@ base_app_main: &BASE_APP_MAIN # edited or manipulated through the Admin control panel -- see "Manage # Allowlist" # The value of this option will be resolved with respect to - # . + # . #sanitize_allowlist_file: sanitize_allowlist.txt # By default Galaxy will serve non-HTML tool output that may @@ -1315,6 +1334,11 @@ base_app_main: &BASE_APP_MAIN # Sentry. Possible values are DEBUG, INFO, WARNING, ERROR or CRITICAL. #sentry_event_level: ERROR + # Set to a number between 0 and 1. With this option set, every + # transaction created will have that percentage chance of being sent + # to Sentry. A value higher than 0 is required to analyze performance. + #sentry_traces_sample_rate: 0.0 + # Log to statsd Statsd is an external statistics aggregator # (https://github.com/etsy/statsd) Enabling the following options will # cause galaxy to log request timing and other statistics to the @@ -1399,65 +1423,62 @@ base_app_main: &BASE_APP_MAIN # Available formats are currently 'zip', 'gz', and 'bz2'. #disable_library_comptypes: null - # Boosts are used to customize this instance's toolbox search. The - # higher the boost, the more importance the scoring algorithm gives to - # the given field. Section refers to the tool group in the tool - # panel. Rest of the fields are tool's attributes. - tool_name_boost: 20 + # In tool search, a query match against a tool's name text will + # receive this score multiplier. + #tool_name_boost: 20.0 # If a search query matches a tool name exactly, the score will be # multiplied by this factor. - tool_name_exact_multiplier: 10.0 - - # Boosts are used to customize this instance's toolbox search. The - # higher the boost, the more importance the scoring algorithm gives to - # the given field. Section refers to the tool group in the tool - # panel. Rest of the fields are tool's attributes. - #tool_id_boost: 9.0 - - # Boosts are used to customize this instance's toolbox search. The - # higher the boost, the more importance the scoring algorithm gives to - # the given field. Section refers to the tool group in the tool - # panel. Rest of the fields are tool's attributes. - tool_section_boost: 3.0 - - # Boosts are used to customize this instance's toolbox search. The - # higher the boost, the more importance the scoring algorithm gives to - # the given field. Section refers to the tool group in the tool - # panel. Rest of the fields are tool's attributes. - tool_description_boost: 8.0 - - # Boosts are used to customize this instance's toolbox search. The - # higher the boost, the more importance the scoring algorithm gives to - # the given field. Section refers to the tool group in the tool - # panel. Rest of the fields are tool's attributes. - tool_label_boost: 1.0 - - # Boosts are used to customize this instance's toolbox search. The - # higher the boost, the more importance the scoring algorithm gives to - # the given field. Section refers to the tool group in the tool - # panel. Rest of the fields are tool's attributes. - tool_stub_boost: 2.0 - - # Boosts are used to customize this instance's toolbox search. The - # higher the boost, the more importance the scoring algorithm gives to - # the given field. Section refers to the tool group in the tool - # panel. Rest of the fields are tool's attributes. - tool_help_boost: 1.0 - - # Limits the number of results in toolbox search. Can be used to - # tweak how many results will appear. + #tool_name_exact_multiplier: 10.0 + + # In tool search, a query match against a tool's ID text will receive + # this score multiplier. The query must be an exact match against ID + # in order to be counted as a match. + #tool_id_boost: 20.0 + + # In tool search, a query match against a tool's section text will + # receive this score multiplier. + #tool_section_boost: 3.0 + + # In tool search, a query match against a tool's description text will + # receive this score multiplier. + #tool_description_boost: 8.0 + + # In tool search, a query match against a tool's label text will + # receive this score multiplier. + #tool_label_boost: 1.0 + + # A stub is parsed from the GUID as "owner/repo/tool_id". In tool + # search, a query match against a tool's stub text will receive this + # score multiplier. + #tool_stub_boost: 2.0 + + # In tool search, a query match against a tool's help text will + # receive this score multiplier. + #tool_help_boost: 1.0 + + # The lower this parameter, the greater the diminishing reward for + # term frequency in the help text. A higher K1 increases the level of + # reward for additional occurences of a term. The default value will + # provide a slight increase in score for the first, second and third + # occurrence and little reward thereafter. + #tool_help_bm25f_k1: 0.5 + + # Limits the number of results in toolbox search. Use to set the + # maximum number of tool search results to display. tool_search_limit: 160 - # Enable/ disable Ngram-search for tools. It makes tool search results - # tolerant for spelling mistakes in the query by dividing the query - # into multiple ngrams and search for each ngram + # Disabling this will prevent partial matches on tool names. + # Enable/disable Ngram-search for tools. It makes tool search results + # tolerant for spelling mistakes in the query, and will also match + # query substrings e.g. "genome" will match "genomics" or + # "metagenome". #tool_enable_ngram_search: true - # Set minimum size of ngrams + # Set minimum character length of ngrams #tool_ngram_minsize: 3 - # Set maximum size of ngrams + # Set maximum character length of ngrams #tool_ngram_maxsize: 4 # Ngram matched scores will be multiplied by this factor. Should @@ -1516,6 +1537,10 @@ base_app_main: &BASE_APP_MAIN # log your users out. #remote_user_logout_href: null + # This is the default url to which users are redirected after they log + # out. + #post_user_logout_href: /root/login?is_logout_redirect=true + # If your proxy and/or authentication source does not normalize e-mail # addresses or user names being passed to Galaxy - set this option to # true to force these to lower case. @@ -1533,7 +1558,7 @@ base_app_main: &BASE_APP_MAIN # Admin section of the server, and will have access to create users, # groups, roles, libraries, and more. For more information, see: # https://galaxyproject.org/admin/ - admin_users: "hxr@informatik.uni-freiburg.de,bjoern.gruening@gmail.com,kuntzm@informatik.uni-freiburg.de,sbray@informatik.uni-freiburg.de,maierw@informatik.uni-freiburg.de,berenice.batut@gmail.com,gallardo@informatik.uni-freiburg.de" + admin_users: "hxr@informatik.uni-freiburg.de,bjoern.gruening@gmail.com,kuntzm@informatik.uni-freiburg.de,sbray@informatik.uni-freiburg.de,maierw@informatik.uni-freiburg.de,berenice.batut@gmail.com,gallardo@informatik.uni-freiburg.de,srikakus@informatik.uni-freiburg.de,dominguj@informatik.uni-freiburg.de" # Force everyone to log in (disable anonymous access). #require_login: false @@ -1733,7 +1758,45 @@ base_app_main: &BASE_APP_MAIN # if you need to bootstrap Galaxy, in particular to create a real # admin user account via API. You should probably not set this on a # production server. - #master_api_key: null + #bootstrap_admin_api_key: null + + # Service ID for GA4GH services (exposed via the service-info endpoint + # for the Galaxy DRS API). If unset, one will be generated using the + # URL the target API requests are made against. + # For more information on GA4GH service definitions - check out + # https://github.com/ga4gh-discovery/ga4gh-service-registry and + # https://editor.swagger.io/?url=https://raw.githubusercontent.com/ga4gh-discovery/ga4gh-service-registry/develop/service-registry.yaml + # This value should likely reflect your service's URL. For instance + # for usegalaxy.org this value should be org.usegalaxy. Particular + # Galaxy implementations will treat this value as a prefix and append + # the service type to this ID. For instance for the DRS service "id" + # (available via the DRS API) for the above configuration value would + # be org.usegalaxy.drs. + #ga4gh_service_id: null + + # Service name for host organization (exposed via the service-info + # endpoint for the Galaxy DRS API). If unset, one will be generated + # using ga4gh_service_id. + # For more information on GA4GH service definitions - check out + # https://github.com/ga4gh-discovery/ga4gh-service-registry and + # https://editor.swagger.io/?url=https://raw.githubusercontent.com/ga4gh-discovery/ga4gh-service-registry/develop/service-registry.yaml + #ga4gh_service_organization_name: null + + # Organization URL for host organization (exposed via the service-info + # endpoint for the Galaxy DRS API). If unset, one will be generated + # using the URL the target API requests are made against. + # For more information on GA4GH service definitions - check out + # https://github.com/ga4gh-discovery/ga4gh-service-registry and + # https://editor.swagger.io/?url=https://raw.githubusercontent.com/ga4gh-discovery/ga4gh-service-registry/develop/service-registry.yaml + #ga4gh_service_organization_url: null + + # Service environment (exposed via the service-info endpoint for the + # Galaxy DRS API) for implemented GA4GH services. + # Suggested values are prod, test, dev, staging. + # For more information on GA4GH service definitions - check out + # https://github.com/ga4gh-discovery/ga4gh-service-registry and + # https://editor.swagger.io/?url=https://raw.githubusercontent.com/ga4gh-discovery/ga4gh-service-registry/develop/service-registry.yaml + #ga4gh_service_environment: null # Enable tool tags (associating tools with tags). This has its own # option since its implementation has a few performance implications @@ -1864,7 +1927,7 @@ base_app_main: &BASE_APP_MAIN # process and notifies itself of new jobs via in-memory queues. Jobs # are run locally on the system on which Galaxy is started. Advanced # job running capabilities can be configured through the job - # configuration file. + # configuration file or the option. # The value of this option will be resolved with respect to # . job_config_file: "{{ galaxy_config_dir }}/job_conf.yml" @@ -1892,11 +1955,11 @@ base_app_main: &BASE_APP_MAIN # When jobs fail due to job runner problems, Galaxy can be configured # to retry these or reroute the jobs to new destinations. Very fine - # control of this is available with resubmit declarations in - # job_conf.xml. For simple deployments of Galaxy though, the following + # control of this is available with resubmit declarations in the job + # config. For simple deployments of Galaxy though, the following # attribute can define resubmission conditions for all job # destinations. If any job destination defines even one resubmission - # condition explicitly in job_conf.xml - the condition described by + # condition explicitly in the job config - the condition described by # this option will not apply to that destination. For instance, the # condition: 'attempt < 3 and unknown_error and (time_running < 300 or # time_since_queued < 300)' would retry up to two times jobs that @@ -2017,7 +2080,7 @@ base_app_main: &BASE_APP_MAIN # completion. These bits include the job working directory, external # metadata temporary files, and DRM stdout and stderr files (if using # a DRM). Possible values are: always, onsuccess, never - cleanup_job: always + cleanup_job: onsuccess # When running DRMAA jobs as the Galaxy user # (https://docs.galaxyproject.org/en/master/admin/cluster.html#submitting-jobs-as-the-real-user) @@ -2160,6 +2223,13 @@ base_app_main: &BASE_APP_MAIN # if running many handlers. cache_user_job_count: true + # If true, the toolbox will be sorted by tool id when the toolbox is + # loaded. This is useful for ensuring that tools are always displayed + # in the same order in the UI. If false, the order of tools in the + # toolbox will be preserved as they are loaded from the tool config + # files. + #toolbox_auto_sort: true + # Define toolbox filters # (https://galaxyproject.org/user-defined-toolbox-filters/) that # admins may use to restrict the tools to display. @@ -2193,7 +2263,7 @@ base_app_main: &BASE_APP_MAIN # The base module(s) that are searched for modules for toolbox # filtering (https://galaxyproject.org/user-defined-toolbox-filters/) # functions. - #toolbox_filter_base_modules: galaxy.tools.filters,galaxy.tools.toolbox.filters + #toolbox_filter_base_modules: galaxy.tools.filters,galaxy.tools.toolbox.filters,galaxy.tool_util.toolbox.filters # Galaxy uses AMQP internally for communicating between processes. # For example, when reloading the toolbox or locking job execution, @@ -2208,17 +2278,32 @@ base_app_main: &BASE_APP_MAIN amqp_internal_connection: "pyamqp://galaxy:{{ rabbitmq_password_galaxy }}@mq.galaxyproject.eu:5671/galaxy?ssl=1" #amqp_internal_connection: "amqp://galaxy:{{ rabbitmq_password_galaxy }}@localhost:5672/galaxy" + # Configuration options passed to Celery. + # To refer to a task by name, use the template `galaxy.foo` where + # `foo` is the function name of the task defined in the + # galaxy.celery.tasks module. + # The `broker_url` option, if unset, defaults to the value of + # `amqp_internal_connection`. The `result_backend` option must be set + # if the `enable_celery_tasks` option is set. + # For details, see Celery documentation at + # https://docs.celeryq.dev/en/stable/userguide/configuration.html. + #celery_conf: + # task_routes: + # galaxy.fetch_data: galaxy.external + # galaxy.set_job_metadata: galaxy.external + + celery_conf: + result_backend: "{{ redis_connection_string }}" + task_routes: + galaxy.fetch_data: disabled + # galaxy.fetch_data: galaxy.external + galaxy.set_job_metadata: galaxy.external + # Offload long-running tasks to a Celery task queue. Activate this # only if you have setup a Celery worker for Galaxy. For details, see # https://docs.galaxyproject.org/en/master/admin/production.html enable_celery_tasks: true - # Celery broker (if unset falls back to amqp_internal_connection). - #celery_broker: null - - # If set, it will be the results backend for Celery. - #celery_backend: null - # Allow disabling pbkdf2 hashing of passwords for legacy situations. # This should normally be left enabled unless there is a specific # reason to disable it. @@ -2250,7 +2335,7 @@ base_app_main: &BASE_APP_MAIN # Set the number of predictions/recommendations to be made by the # model - topk_recommendations: 20 + #topk_recommendations: 20 # Set path to the additional tool preferences from Galaxy admins. It # has two blocks. One for listing deprecated tools which will be @@ -2289,6 +2374,20 @@ base_app_main: &BASE_APP_MAIN # . #vault_config_file: vault_conf.yml + # Display built-in converters in the tool panel. + #display_builtin_converters: true + + # Optional file containing one or more themes for galaxy. If several + # themes are defined, users can choose their preferred theme in the + # client. + # The value of this option will be resolved with respect to + # . + themes_config_file: /opt/galaxy/config/themes_conf.yml + + # Enables user preferences and api endpoint for the beacon + # integration. + enable_beacon_integration: true + # TODO(hxr): UNDOCUMENTED ucsc_build_sites: "{{ galaxy_config_dir }}/ucsc_build_sites.txt" diff --git a/group_vars/influxdb.yml b/group_vars/influxdb.yml index cfaf55f30..708bd9c4f 100644 --- a/group_vars/influxdb.yml +++ b/group_vars/influxdb.yml @@ -34,12 +34,15 @@ influxdb_container_standalone_setup_details: # see: # https://docs.influxdata.com/influxdb/v1.8/administration/config/ +# create user task +handy_groups: + - group_name: influxdb + group_gid: 999 -# create_user task -user_name: influxdb -user_uid: 999 -user_group_name: influxdb -user_gid: 999 +handy_users: + - user_name: influxdb + user_uid: 999 + user_group: influxdb # Certbot certbot_admin_email: security@usegalaxy.eu diff --git a/group_vars/maintenance.yml b/group_vars/maintenance.yml new file mode 100644 index 000000000..223038ced --- /dev/null +++ b/group_vars/maintenance.yml @@ -0,0 +1,326 @@ +--- +# MISC/Generic variables +hostname: "{{ inventory_hostname }}" +galaxy_root: /opt/galaxy +galaxy_venv_dir: "{{ galaxy_root }}/venv" +galaxy_server_dir: "{{ galaxy_root }}/server" +galaxy_config_dir: "{{ galaxy_root }}/config" +galaxy_config_file: "{{ galaxy_config_dir }}/galaxy.yml" +galaxy_mutable_config_dir: "{{ galaxy_root }}/mutable-config" +galaxy_log_dir: "/var/log/galaxy" + +galaxy_config: + galaxy: + job_working_directory: /data/jwd04/main + nginx_upload_store: "/data/jwd04/nginx_upload/main/uploads" + nginx_upload_job_files_store: "/data/jwd04/nginx_upload/main/jobfiles" + +galaxy_group: + name: galaxy + gid: 999 +galaxy_user: + name: galaxy + create_home: true + home: /opt/galaxy + uid: 999 + shell: /bin/bash + +# Role: hxr.postgres-connection +# Role: usegalaxy-eu.galaxy-slurp +postgres_user: galaxy +postgres_host: sn05.galaxyproject.eu +postgres_port: 5432 + +# Role: usegalaxy_eu.handy.os_setup +enable_hostname: true +enable_powertools: true +enable_remap_user: true +enable_exclude_packages: true +enable_pam_limits: true +enable_install_software: true +enable_create_user: true +software_groups_to_install: + - admin + - debug + - editors + - pdf_export_deps + - services + - terminals + - utils +handy_users: + - user_name: "{{ galaxy_user.name }}" + user_uid: "{{ galaxy_user.uid }}" + user_group: "{{ galaxy_group.name }}" + user_comment: "Galaxy useraccount" + user_create_home: "{{ galaxy_user.create_home }}" + user_home: "{{ galaxy_user.home }}" + user_shell: "{{ galaxy_user.shell }}" +handy_groups: + - group_name: "{{ galaxy_group.name }}" + group_gid: "{{ galaxy_group.gid }}" + +# Role: usegalaxy-eu.autofs +# Appending additional mounts to autofs_conf_files variable (var file: mounts/dest/all.yml) +# This mounts the galaxy sync directory from the NFS server to /opt/galaxy +galaxy_mount: + - "{{ sync.gxkey.path }} -{{ sync.gxkey.nfs_options | join(',') }} {{ sync.gxkey.export }}" + +autofs_service: + install: true + enable: true +nfs_kernel_tuning: true +autofs_mount_points: + - data + - gxtest + - gxkey + - jwd + - usrlocal + +# Role: usegalaxy-eu.bashrc +galaxy_pulsar_app_conf: "{{ galaxy_config_dir }}/pulsar_app.yml" +bashrc_users: + - uname: "{{ galaxy_user.name }}" + uhome: "{{ galaxy_user.home }}" + gname: "{{ galaxy_group.name }}" + +# Role: hxr.postgres-connection +pgc_users: + - uname: "{{ galaxy_user.name }}" + uhome: "{{ galaxy_user.home }}" + gname: "{{ galaxy_group.name }}" + pguser: "{{ postgres_user }}" + pgpass: "{{ postgres_pass }}" + pgdatabase: galaxy + +# Role: usegalaxy_eu.fs_maintenance +fsm_maintenance_dir: "/data/dnb01/maintenance" +fsm_intervals: + short: "3d" + medium: "7d" + long: "31d" +fsm_scripts: + temporary_dirs: + enable: true + src: "temporary_dirs.sh.j2" + dst: "{{ fsm_maintenance_dir }}/temporary_dirs.sh" + user: "{{ fsm_galaxy_user.username }}" + group: "{{ fsm_galaxy_user.groupname }}" + paths: + - /data/1/galaxy_db/tmp + - /data/2/galaxy_db/tmp + - /data/dnb01/galaxy_db/tmp + - /data/dnb02/galaxy_db/tmp + - /data/dnb03/galaxy_db/tmp + - /data/dnb05/galaxy_db/tmp + - /data/dnb06/galaxy_db/tmp + - /data/jwd/tmp + - /data/jwd02f/tmp + - /data/jwd04/tmp + time: "{{ fsm_intervals.long }}" + upload_dirs: + enable: true + src: "uploads.sh.j2" + dst: "{{ fsm_maintenance_dir }}/uploads.sh" + user: "{{ fsm_galaxy_user.username }}" + group: "{{ fsm_galaxy_user.groupname }}" + paths: + - "{{ galaxy_config['galaxy']['nginx_upload_store'] }}" + - "{{ galaxy_config['galaxy']['nginx_upload_job_files_store'] }}" + time: "{{ fsm_intervals.medium }}" + job_working_dirs: + enable: true + src: "job_working_dir.sh.j2" + dst: "{{ fsm_maintenance_dir }}/job_working_dir.sh" + user: "{{ fsm_galaxy_user.username }}" + group: "{{ fsm_galaxy_user.groupname }}" + paths: + - "{{ galaxy_config['galaxy']['job_working_directory'] }}" + - /data/dnb03/galaxy_db/job_working_directory + - /data/jwd/main + - /data/jwd01/main + - /data/jwd02f/main + - /data/jwd03f/main + - /data/jwd04/main + - /data/jwd05e/main + time: "{{ fsm_intervals.long }}" +fsm_htcondor_enable: true + +# Role: dj-wasabi.telegraf +telegraf_agent_hostname: "{{ hostname }}" +telegraf_agent_version: 1.17.2 +custom_telegraf_env: "/usr/bin/env GDPR_MODE=1 PGUSER={{ galaxy_user.name }} PGHOST={{ postgres_host }} GALAXY_ROOT={{ galaxy_server_dir }} GALAXY_CONFIG_FILE={{ galaxy_config_file }} GXADMIN_PYTHON={{ galaxy_venv_dir }}/bin/python" +telegraf_plugins_extra: + postgres: + plugin: "postgresql" + config: + - address = "{{ galaxy_db_connection }}" + - databases = ["galaxy", "galaxy-test", "apollo", "chado"] + monitor_nfsstat: + plugin: "exec" + config: + - commands = ["/usr/bin/nfsstat-influx"] + - timeout = "10s" + - data_format = "influx" + - interval = "15s" + galaxy_uploaded: + plugin: "exec" + config: + - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin iquery upload-gb-in-past-hour"] + - timeout = "600s" + - data_format = "influx" + - interval = "1h" + galaxy_jobs_queued: + plugin: "exec" + config: + - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin iquery jobs-queued"] + - timeout = "15s" + - data_format = "influx" + - interval = "1m" + galaxy_jobs_queued_internal: + plugin: "exec" + config: + - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin iquery jobs-queued-internal-by-handler"] + - timeout = "15s" + - data_format = "influx" + - interval = "1m" + galaxy_jobs_queue_overview: + plugin: "exec" + config: + - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin iquery queue-overview --short-tool-id"] + - timeout = "30s" + - data_format = "influx" + - interval = "1m" + galaxy_jobs_destination_overview: + plugin: "exec" + config: + - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin iquery queue --by destination"] + - timeout = "30s" + - data_format = "influx" + - interval = "1m" + galaxy_oidc: + plugin: "exec" + config: + - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin iquery users-with-oidc"] + - timeout = "15s" + - data_format = "influx" + - interval = "1m" + galaxy_workflow: + plugin: "exec" + config: + - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin iquery workflow-invocation-status"] + - timeout = "15s" + - data_format = "influx" + - interval = "1m" + galaxy_workflow_totals: + plugin: "exec" + config: + - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin iquery workflow-invocation-totals"] + - timeout = "15s" + - data_format = "influx" + - interval = "1m" + galaxy_job_queue_states_stats: + plugin: "exec" + config: + - commands = ["{{ custom_telegraf_env }} /usr/bin/galaxy_job_queue_states_stats"] + - timeout = "15s" + - data_format = "influx" + - interval = "1m" + galaxy_jobs_per_handler_stats: + plugin: "exec" + config: + - commands = ["{{ custom_telegraf_env }} /usr/bin/galaxy_jobs_per_handler_stats"] + - timeout = "15s" + - data_format = "influx" + - interval = "1m" + monitor_condor_queue: + plugin: "exec" + config: + - commands = ["sudo /usr/bin/monitor-condor-queue"] + - timeout = "10s" + - data_format = "influx" + - interval = "1m" + monitor_condor_util: + plugin: "exec" + config: + - commands = ["sudo /usr/bin/monitor-condor-utilisation"] + - timeout = "10s" + - data_format = "influx" + - interval = "1m" + monitor_condor_util_split: + plugin: "exec" + config: + - commands = ["sudo /usr/bin/monitor-condor-utilisation-split"] + - timeout = "10s" + - data_format = "influx" + - interval = "1m" + postgres_extra: + plugin: "exec" + config: + - commands = [ + "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-cache-hit", + "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-index-size", + "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-index-usage", + "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-table-bloat", + "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-table-size", + "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-unused-indexes", + "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-vacuum-stats", + "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-stat-bgwriter", + "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-stat-user-tables", + ] + - timeout = "60s" + - data_format = "influx" + - interval = "2m" + + +# Role: hxr.monitor-cluster +monitor_condor: true + +# Role: usegalaxy-eu.dynmotd +dynmotd_custom: + - name: Condor + command: "condor_q -totals | tail -n 2" + +# Role: usegalaxy_eu.htcondor +condor_host: "condor-cm.galaxyproject.eu" +condor_fs_domain: bi.uni-freiburg.de +condor_uid_domain: bi.uni-freiburg.de +condor_allow_write: "10.5.68.0/24, 132.230.223.0/24" +condor_daemons: + - MASTER + - SCHEDD +condor_allow_negotiator: "{{ ansible_default_ipv4.address | default(ansible_all_ipv4_addresses[0]) }},$(CONDOR_HOST)" +condor_allow_administrator: "$(ALLOW_NEGOTIATOR)" +condor_system_periodic_hold: "{{ 30 * 24 * 60 * 60 }}" +condor_system_periodic_remove: "{{ 2 * 24 * 60 * 60 }}" +condor_network_interface: "{{ ansible_default_ipv4.interface }}" +condor_extra: | + MASTER_UPDATE_INTERVAL = 150 + SCHEDD_INTERVAL = 60 + JOB_START_COUNT = 250 + JOB_START_DELAY = 0 + CLAIM_WORKLIFE = 120 + +# Role: usegalaxy-eu.galaxy-slurp +galaxy_slurper: galaxy +galaxy_slurp_influx_pass: "{{ influxdb.node.password }}" +galaxy_slurp_influx_user: "{{ influxdb.node.username }}" +galaxy_slurp_influx_url: "{{ influxdb.url }}" + +# Role: galaxyproject.gxadmin +gxadmin_commit: main +gxadmin_dir: /opt/gxadmin +gxadmin_bin_dir: /usr/bin +gxadmin_force: true + +# Role: usegalaxy-eu.logrotate +lp_logrotate_confd: + - path: rsyslog + conf: | + /var/log/remote/*/*.log { + weekly + rotate 5 + missingok + dateext + notifempty + compress + } diff --git a/group_vars/mq.yml b/group_vars/mq.yml index 3a3925a35..fbc217d79 100644 --- a/group_vars/mq.yml +++ b/group_vars/mq.yml @@ -2,10 +2,14 @@ hostname: mq.galaxyproject.eu # create_user task -user_name: rabbitmq -user_uid: 999 -user_group_name: rabbitmq -user_gid: 999 +handy_groups: + - group_name: rabbitmq + group_gid: 999 + +handy_users: + - user_name: rabbitmq + user_uid: 999 + user_group: rabbitmq # Certbot certbot_admin_email: security@usegalaxy.eu @@ -98,6 +102,9 @@ rabbitmq_users: - user: galaxy_mira_pulsar password: "{{ rabbitmq_password_galaxy_mira_pulsar }}" vhost: /pulsar/galaxy_mira_pulsar + - user: galaxy_sk01 + password: "{{ rabbitmq_password_galaxy_sk01 }}" + vhost: /pulsar/galaxy_sk01 rabbitmq_plugins: - rabbitmq_management @@ -116,12 +123,23 @@ rabbitmq_config: fail_if_no_peer_cert: "false" management_agent: disable_metrics_collector: "false" + management: + disable_stats: "false" rabbitmq_container: name: rabbit_hole image: rabbitmq:3.9.11 hostname: "{{ inventory_hostname }}" +# Redis +redis_port: 6379 +redis_bind_interface: 0.0.0.0 +redis_rdbcompression: "yes" +redis_dbfilename: dump.rdb +redis_dbdir: /var/lib/redis +redis_loglevel: "notice" +redis_logfile: /var/log/redis/redis-server.log +redis_conf_path: /etc/redis # Telegraf telegraf_plugins_extra: prometheus: diff --git a/group_vars/plausible.yml b/group_vars/plausible.yml index 85fa753cc..a1bbf465a 100644 --- a/group_vars/plausible.yml +++ b/group_vars/plausible.yml @@ -5,6 +5,9 @@ docker_service_state: started docker_service_enabled: true +# Plausible +plausible_lock_register: true + # Certbot certbot_auth_method: --webroot certbot_well_known_root: /srv/nginx/_well-known_root diff --git a/group_vars/sn05.yml b/group_vars/sn05.yml index e4476981a..2f0bb6002 100644 --- a/group_vars/sn05.yml +++ b/group_vars/sn05.yml @@ -18,23 +18,23 @@ software_groups_to_install: - utils # HTCondor -condor_host: "condor-cm.galaxyproject.eu" -condor_allow_write: "10.5.68.0/24, 132.230.223.0/24, 132.230.153.0/28" -condor_daemons: - - COLLECTOR - - MASTER - - NEGOTIATOR -condor_allow_negotiator: $(ALLOW_WRITE) -condor_allow_administrator: "$(ALLOW_NEGOTIATOR)" -condor_network_interface: ens802f0.2368 -condor_extra: | - MASTER_UPDATE_INTERVAL = 150 - CLASSAD_LIFETIME = 300 - NEGOTIATOR_INTERVAL = 15 - NEGOTIATOR_UPDATE_INTERVAL = 100 - JOB_START_COUNT = 250 - JOB_START_DELAY = 0 - NEGOTIATOR_POST_JOB_RANK = isUndefined(RemoteOwner) * (10000 - TotalLoadAvg) +# condor_host: "condor-cm.galaxyproject.eu" +# condor_allow_write: "10.5.68.0/24, 132.230.223.0/24, 132.230.153.0/28" +# condor_daemons: +# - COLLECTOR +# - MASTER +# - NEGOTIATOR +# condor_allow_negotiator: $(ALLOW_WRITE) +# condor_allow_administrator: "$(ALLOW_NEGOTIATOR)" +# condor_network_interface: ens802f0.2368 +# condor_extra: | +# MASTER_UPDATE_INTERVAL = 150 +# CLASSAD_LIFETIME = 300 +# NEGOTIATOR_INTERVAL = 15 +# NEGOTIATOR_UPDATE_INTERVAL = 100 +# JOB_START_COUNT = 250 +# JOB_START_DELAY = 0 +# NEGOTIATOR_POST_JOB_RANK = isUndefined(RemoteOwner) * (10000 - TotalLoadAvg) # PostgreSQL postgresql_conf: @@ -52,19 +52,20 @@ postgresql_conf: - effective_cache_size: "16GB" # - log_line_prefix: "'%t:%r:%u@%d:[%p]<%m>: '" - log_checkpoints: "on" - - log_min_duration_statement: 100 + - log_min_duration_statement: 500 postgresql_pg_hba_conf: - "host postgres galaxy 132.230.223.239/32 md5" - "host postgres galaxy 10.5.68.237/32 md5" - - "host galaxy galaxy 10.5.68.118/32 md5" - "host postgres galaxy-test 10.5.68.154/32 md5" - "host galaxy galaxy 132.230.223.239/32 md5" - "host galaxy galaxy 10.5.68.237/32 md5" + - "host galaxy galaxy 10.5.68.126/32 md5" + - "host galaxy galaxy 10.5.68.158/32 md5" - "host galaxy galaxy 10.5.68.168/32 md5" - "host galaxy galaxy 100.118.169.22/32 md5" - "host galaxy galaxy-readonly 132.230.223.239/32 md5" - "host galaxy galaxy-readonly 10.5.68.237/32 md5" - - "host galaxy galaxy-readonly 10.5.68.118/32 md5" + - "host galaxy galaxy-readonly 10.5.68.158/32 md5" - "host tiaas tiaas 132.230.223.239/32 md5" - "host tiaas tiaas 10.5.68.237/32 md5" - "host galaxy-test galaxy-test 132.230.223.239/32 md5" @@ -75,4 +76,4 @@ postgresql_pg_hba_conf: - "host apollo apollo 10.5.68.0/24 md5" - "host chado apollo 10.5.68.0/24 md5" - "host grt grt 10.5.68.0/24 md5" -postgresql_pgdump_dir: "/var/lib/pgsql/pgdump" +postgresql_pgdump_dir: "/var/lib/pgsql/pgdump2" diff --git a/group_vars/sn06.yml b/group_vars/sn06.yml index bc914dc57..36bfe85a7 100644 --- a/group_vars/sn06.yml +++ b/group_vars/sn06.yml @@ -96,12 +96,6 @@ telegraf_agent_hostname: "{{ hostname }}" telegraf_agent_version: 1.17.2 custom_telegraf_env: "/usr/bin/env GDPR_MODE=1 PGUSER={{ galaxy_user.name }} PGHOST={{ postgres_host }} GALAXY_ROOT={{ galaxy_server_dir }} GALAXY_CONFIG_FILE={{ galaxy_config_file }} GALAXY_LOG_DIR={{ galaxy_log_dir }} GXADMIN_PYTHON={{ galaxy_venv_dir }}/bin/python" telegraf_plugins_extra: - postgres: - plugin: "postgresql" - config: - - address = "{{ galaxy_db_connection }}" - - databases = ["galaxy", "galaxy-test", "apollo", "chado"] - listen_galaxy_routes: plugin: "statsd" config: @@ -111,122 +105,22 @@ telegraf_plugins_extra: - allowed_pending_messages = 10000 - percentile_limit = 100 - #monitor_condor_queue_split: - #plugin: "exec" - #config: - ## TODO: sudoers rule? - #- commands = ["sudo /usr/bin/monitor-condor-queue-split"] - #- timeout = "10s" - #- data_format = "influx" - #- interval = "15s" - - monitor_condor_queue: - plugin: "exec" - config: - - commands = ["sudo /usr/bin/monitor-condor-queue"] - - timeout = "10s" - - data_format = "influx" - - interval = "1m" - - monitor_condor_util: - plugin: "exec" - config: - - commands = ["sudo /usr/bin/monitor-condor-utilisation"] - - timeout = "10s" - - data_format = "influx" - - interval = "1m" - - monitor_nfsstat: - plugin: "exec" - config: - - commands = ["/usr/bin/nfsstat-influx"] - - timeout = "10s" - - data_format = "influx" - - interval = "15s" - - # Some custom galaxy monitoring stuff - galaxy_uploaded: - plugin: "exec" - config: - - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin iquery upload-gb-in-past-hour"] - - timeout = "120s" - - data_format = "influx" - - interval = "1h" + # Some custom galaxy monitoring stuff that can only run on the Galaxy server galaxy_lastlog: plugin: "exec" config: - - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin uwsgi lastlog"] + - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin gunicorn lastlog"] - timeout = "15s" - data_format = "influx" - interval = "15s" - galaxy_jobs_queued: - plugin: "exec" - config: - - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin iquery jobs-queued"] - - timeout = "15s" - - data_format = "influx" - - interval = "1m" - galaxy_jobs_queued_internal: - plugin: "exec" - config: - - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin iquery jobs-queued-internal-by-handler"] - - timeout = "15s" - - data_format = "influx" - - interval = "1m" - galaxy_jobs_queue_overview: - plugin: "exec" - config: - - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin iquery queue-overview --short-tool-id"] - - timeout = "30s" - - data_format = "influx" - - interval = "1m" - galaxy_oidc: - plugin: "exec" - config: - - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin iquery users-with-oidc"] - - timeout = "15s" - - data_format = "influx" - - interval = "1m" - galaxy_workflow: - plugin: "exec" - config: - - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin iquery workflow-invocation-status"] - - timeout = "15s" - - data_format = "influx" - - interval = "1m" - galaxy_workflow_totals: - plugin: "exec" - config: - - commands = ["{{ custom_telegraf_env }} /usr/bin/gxadmin iquery workflow-invocation-totals"] - - timeout = "15s" - - data_format = "influx" - - interval = "1m" galaxy_active_users: plugin: "exec" config: - - commands = ["/usr/bin/gxadmin local cu"] + - commands = ["/usr/bin/gxadmin gunicorn active-users"] - timeout = "15s" - data_format = "influx" - interval = "1m" - postgres_extra: - plugin: "exec" - config: - - commands = [ - "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-cache-hit", - "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-index-size", - "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-index-usage", - "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-table-bloat", - "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-table-size", - "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-unused-indexes", - "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-vacuum-stats", - "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-stat-bgwriter", - "{{ custom_telegraf_env }} /usr/bin/gxadmin iquery pg-stat-user-tables", - ] - - timeout = "60s" - - data_format = "influx" - - interval = "2m" - # Custom pip installer pip_venv_path: "{{ galaxy_venv_dir }}" pip_install_dependencies: @@ -241,7 +135,12 @@ pip_install_dependencies: - data_hacks # used by reports - WeasyPrint - - total_perspective_vortex + - nbconvert + - gitlab-arc-fs + # Needed for usegalaxy-eu.vgcn-monitoring Telegraf role + - pyyaml + - GitPython + - python-openstackclient yum_exclude_repos: - condor* @@ -256,10 +155,6 @@ all_yum_repositories: retries: 1 timeout: 10 -# Galaxy monitoring -monitor_condor: true -monitor_condor_split_util: true - # Certbot certbot_virtualenv_package_name: python3-virtualenv certbot_dns_provider: route53 @@ -346,7 +241,7 @@ galaxy_cvmfs_server_urls: # SystemD galaxy_systemd_mode: "gunicorn" -galaxy_systemd_gunicorns: 2 +galaxy_systemd_gunicorns: 3 galaxy_systemd_gunicorn_workers: 4 galaxy_systemd_gunicorn_timeout: 600 galaxy_systemd_handlers: 6 @@ -364,22 +259,28 @@ galaxy_systemd_memory_limit_workflow: 15 condor_host: "condor-cm.galaxyproject.eu" condor_fs_domain: bi.uni-freiburg.de condor_uid_domain: bi.uni-freiburg.de -condor_allow_write: "10.5.68.0/24, 132.230.223.0/24" +condor_allow_write: "10.5.68.0/24, 132.230.223.0/24,132.230.153.0/28" condor_daemons: + - COLLECTOR + - NEGOTIATOR - MASTER - SCHEDD -condor_allow_negotiator: "132.230.223.239,$(CONDOR_HOST)" +condor_allow_negotiator: "132.230.223.239,$(CONDOR_HOST),$(ALLOW_WRITE)" condor_allow_administrator: "$(ALLOW_NEGOTIATOR)" -condor_system_periodic_hold: "{{ 30 * 24 * 60 * 60}}" -condor_system_periodic_remove: "{{ 2 * 24 * 60 * 60}}" +condor_system_periodic_hold: "{{ 30 * 24 * 60 * 60 }}" +condor_system_periodic_remove: "{{ 2 * 24 * 60 * 60 }}" condor_network_interface: ens802f0.223 condor_extra: | MASTER_UPDATE_INTERVAL = 150 + CLASSAD_LIFETIME = 300 + NEGOTIATOR_INTERVAL = 15 + NEGOTIATOR_UPDATE_INTERVAL = 100 SCHEDD_INTERVAL = 60 JOB_START_COUNT = 250 JOB_START_DELAY = 0 CLAIM_WORKLIFE = 120 + NEGOTIATOR_POST_JOB_RANK = isUndefined(RemoteOwner) * (10000 - TotalLoadAvg) # gie_proxy gie_proxy_dir: "{{ galaxy_root }}/gie-proxy/proxy" @@ -461,17 +362,6 @@ galaxy_virtualenv_command: "pyvenv" galaxy_nonrepro_tools: "{{ galaxy_root }}/custom-tools" galaxy_nonrepro_commit: master -galaxy_dynamic_job_rules_src_dir: files/galaxy/dynamic_rules -galaxy_dynamic_job_rules_dir: "{{ galaxy_root }}/dynamic_rules" -galaxy_dynamic_job_rules: - - usegalaxy/joint_destinations.yaml - - usegalaxy/sorting_hat.py - - usegalaxy/destination_specifications.yaml - - usegalaxy/blast_destinations.py - - usegalaxy/tool_destinations.yaml - - usegalaxy/dexseq.py - - usegalaxy/wig2bigwig.py - - readme.txt # Custom override # Our galaxy_tool_dependency_dir is on NFS, and permissions are set in such a @@ -496,7 +386,7 @@ galaxy_instance_hostname: usegalaxy.eu galaxy_config_style: yaml galaxy_repo: "https://github.com/usegalaxy-eu/galaxy.git" -galaxy_commit_id: "release_22.05_europe" +galaxy_commit_id: "release_23.0_europe" galaxy_force_checkout: true # discard any modified files #galaxy_admin_email_from: 'noreply@usegalaxy.eu' @@ -523,6 +413,8 @@ galaxy_config_files: dest: "{{ galaxy_config['galaxy']['job_resource_params_file'] }}" - src: "{{ galaxy_config_file_src_dir }}/config/trs_servers_conf.yml" dest: "{{ galaxy_config['galaxy']['trs_servers_config_file'] }}" + - src: "{{ galaxy_config_file_src_dir }}/config/themes_conf.yml" + dest: "{{ galaxy_config['galaxy']['themes_config_file'] }}" # test? Unknown if this works. - src: "{{ galaxy_config_file_src_dir }}/config/error_report.yml" dest: "{{ galaxy_config_dir }}/error_report.yml" @@ -538,15 +430,15 @@ galaxy_config_files: dest: "{{ galaxy_config['galaxy']['ucsc_build_sites'] }}" - src: "{{ galaxy_config_file_src_dir }}/config/echo_main_env.xml" dest: "{{ galaxy_config_dir }}/echo_main_env.xml" - - src: "{{ galaxy_config_file_src_dir }}/dynamic_rules/usegalaxy/total_perspective_vortex/destinations.yml" - dest: "{{ tpv_mutable_dir }}/destinations.yml" - - src: "{{ galaxy_config_file_src_dir }}/dynamic_rules/usegalaxy/total_perspective_vortex/tools.yml" + - src: "{{ galaxy_config_file_src_dir }}/tpv/tool_defaults.yml" + dest: "{{ tpv_mutable_dir }}/tool_defaults.yml" + - src: "{{ galaxy_config_file_src_dir }}/tpv/tools.yml" dest: "{{ tpv_mutable_dir }}/tools.yml" - - src: "{{ galaxy_config_file_src_dir }}/dynamic_rules/usegalaxy/total_perspective_vortex/roles.yml" + - src: "{{ galaxy_config_file_src_dir }}/tpv/roles.yml" dest: "{{ tpv_mutable_dir }}/roles.yml" - - src: "{{ galaxy_config_file_src_dir }}/dynamic_rules/usegalaxy/total_perspective_vortex/interactive_tools.yml" + - src: "{{ galaxy_config_file_src_dir }}/tpv/interactive_tools.yml" dest: "{{ tpv_mutable_dir }}/interactive_tools.yml" - - src: "{{ galaxy_config_file_src_dir }}/dynamic_rules/usegalaxy/total_perspective_vortex/users.yml" + - src: "{{ galaxy_config_file_src_dir }}/tpv/users.yml" dest: "{{ tpv_mutable_dir }}/users.yml" #- src: files/galaxy/config/job_resource_params_conf.xml @@ -590,3 +482,5 @@ galaxy_config_templates: dest: "{{ galaxy_config_dir }}/file_sources_conf.yml" - src: "{{ galaxy_config_template_src_dir }}/config/object_store_conf.xml.j2" dest: "{{ galaxy_config['galaxy']['object_store_config_file'] }}" + - src: "{{ galaxy_config_file_src_dir }}/tpv/destinations.yml.j2" + dest: "{{ tpv_mutable_dir }}/destinations.yml" diff --git a/group_vars/sn07.yml b/group_vars/sn07.yml index 6e7ebaef5..5a3e8bf8e 100644 --- a/group_vars/sn07.yml +++ b/group_vars/sn07.yml @@ -137,7 +137,8 @@ pip_install_dependencies: - data_hacks # used by reports - WeasyPrint - - total_perspective_vortex + - nbconvert + - gitlab-arc-fs yum_exclude_repos: - condor* @@ -353,18 +354,6 @@ galaxy_virtualenv_command: "{{ conda_prefix }}/envs/_galaxy_/bin/virtualenv" galaxy_nonrepro_tools: "{{ galaxy_root }}/custom-tools" galaxy_nonrepro_commit: master -galaxy_dynamic_job_rules_src_dir: files/galaxy/dynamic_rules -galaxy_dynamic_job_rules_dir: "{{ galaxy_root }}/dynamic_rules" -galaxy_dynamic_job_rules: - - usegalaxy/joint_destinations.yaml - - usegalaxy/sorting_hat.py - - usegalaxy/destination_specifications.yaml - - usegalaxy/blast_destinations.py - - usegalaxy/tool_destinations.yaml - - usegalaxy/dexseq.py - - usegalaxy/wig2bigwig.py - - readme.txt - # Custom override # Our galaxy_tool_dependency_dir is on NFS, and permissions are set in such a # way that they cannot be changed by the ansible connected user. @@ -430,15 +419,17 @@ galaxy_config_files: dest: "{{ galaxy_config['galaxy']['ucsc_build_sites'] }}" - src: "{{ galaxy_config_file_src_dir }}/config/echo_main_env.xml" dest: "{{ galaxy_config_dir }}/echo_main_env.xml" - - src: "{{ galaxy_config_file_src_dir }}/dynamic_rules/usegalaxy/total_perspective_vortex/destinations.yml" + - src: "{{ galaxy_config_file_src_dir }}/tpv/tool_defaults.yml" + dest: "{{ tpv_mutable_dir }}/tool_defaults.yml" + - src: "{{ galaxy_config_file_src_dir }}/tpv/destinations.yml" dest: "{{ tpv_mutable_dir }}/destinations.yml" - - src: "{{ galaxy_config_file_src_dir }}/dynamic_rules/usegalaxy/total_perspective_vortex/tools.yml" + - src: "{{ galaxy_config_file_src_dir }}/tpv/tools.yml" dest: "{{ tpv_mutable_dir }}/tools.yml" - - src: "{{ galaxy_config_file_src_dir }}/dynamic_rules/usegalaxy/total_perspective_vortex/roles.yml" + - src: "{{ galaxy_config_file_src_dir }}/tpv/roles.yml" dest: "{{ tpv_mutable_dir }}/roles.yml" - - src: "{{ galaxy_config_file_src_dir }}/dynamic_rules/usegalaxy/total_perspective_vortex/interactive_tools.yml" + - src: "{{ galaxy_config_file_src_dir }}/tpv/interactive_tools.yml" dest: "{{ tpv_mutable_dir }}/interactive_tools.yml" - - src: "{{ galaxy_config_file_src_dir }}/dynamic_rules/usegalaxy/total_perspective_vortex/users.yml" + - src: "{{ galaxy_config_file_src_dir }}/tpv/users.yml" dest: "{{ tpv_mutable_dir }}/users.yml" #- src: files/galaxy/config/job_resource_params_conf.xml diff --git a/group_vars/upload.yml b/group_vars/upload.yml index de5f44eb7..3242d804a 100644 --- a/group_vars/upload.yml +++ b/group_vars/upload.yml @@ -1,27 +1,35 @@ --- -# create_user task user_name: galaxy -user_uid: 999 -user_comment: Tusd Service-Acct user_group_name: galaxy -user_gid: 999 + +# create_user task +handy_groups: + - group_name: "{{ user_group_name }}" + group_gid: 999 + +handy_users: + - user_name: "{{ user_name }}" + user_uid: 999 + user_comment: rustus Service-Acct + user_group: "{{ user_group_name }}" # Autofs -autofs_service.install: true -autofs_service.enable: true +autofs_service: + install: true + enable: true nfs_kernel_tuning: true autofs_mount_points: - - data - - gxtest - - gxkey - - jwd + - data + - gxtest + - gxkey + - jwd # usegalaxy_eu.handy.os_setup ## packages software_groups_to_install: - - editors - - utils + - editors + - utils ## kernel_5 kernel_5_package: kernel-ml diff --git a/hosts b/hosts index dda02f8c8..00594fcb5 100644 --- a/hosts +++ b/hosts @@ -70,6 +70,9 @@ sn07.galaxyproject.eu [sn05] sn05.galaxyproject.eu ansible_ssh_user=root +[maintenance] +maintenance.galaxyproject.eu + [all:vars] ansible_ssh_user=centos diff --git a/incoming.yml b/incoming.yml index 15d2c14fb..187d41ca3 100644 --- a/incoming.yml +++ b/incoming.yml @@ -7,6 +7,8 @@ proftpd_modules_config_file: /etc/proftpd/modules.conf vars_files: - secret_group_vars/all.yml + - mounts/mountpoints.yml + - mounts/dest/all.yml collections: - devsec.hardening pre_tasks: diff --git a/maintenance.yml b/maintenance.yml new file mode 100644 index 000000000..f1960ddce --- /dev/null +++ b/maintenance.yml @@ -0,0 +1,97 @@ +--- +- name: UseGalaxy EU maintenance server + hosts: maintenance + become: true + become_user: root + vars_files: + - secret_group_vars/db-main.yml + - secret_group_vars/all.yml + - mounts/dest/all.yml + - mounts/mountpoints.yml + collections: + - devsec.hardening + handlers: + - name: restart rsyslog + service: + name: rsyslog + state: restarted + pre_tasks: + - name: Set additional local mount point + set_fact: + autofs_conf_files: "{{ autofs_conf_files | combine({ 'usrlocal': autofs_conf_files['usrlocal'] + galaxy_mount }) }}" + - name: Install Dependencies + package: + name: + [ + 'git', + 'postgresql', + 'python3-psycopg2', + 'python3-virtualenv', + 'bc', + 'python3', + 'python3-devel', + ] + become: true + post_tasks: + - name: Append some users to the systemd-journal group + user: + name: '{{ item }}' + groups: systemd-journal + append: true + loop: + - '{{ galaxy_user.name }}' + - 'telegraf' + - name: Set authorized SSH key (galaxy user) + ansible.posix.authorized_key: + user: '{{ galaxy_user.name }}' + state: present + key: '{{ item }}' + loop: + - 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOBINXdjILF6x3WuppXyq6J2a2oSLR6waZ6txgjYJogHdIKPbI0TdReCv4EVxxYRY/NqGpHbjkqfRTsf2VgoU3U= mk@galaxy-mira' + - 'ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBACB5Q5blymkTIRSzVzXITOGvBuI7W0L9Ykwfz8LJGPraaGVPiezzFGvjhqwX+EyCqQPt7JprR5mimJRw/JN3nBXWAHjekvmB5FuILkk6m5fOiQJ5QhRMyQ5GfxODAvGbHpTuWHbYJLWD5fhcboKPxlXOWy4xY9kDZVuQvEKisNKYBsFLA== sanjay' + - 'ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBABRaLHL8mgW86rbtdUh6TY4rs7/la8hAGeSQ3jBF7LMwYZnbS32YDMYvDq3KgNu5WqSMFvkxNm3vfTAbd8CXBfakwDBFBaD9kO0b2t4/p4VoFUsd3B2OvmTR7Bsg7OxTGJJ7aUP/SzTg+Z4NzsmHwQ9h31gfI7n/buZD4S1edQke19Y6w== dominguj@informatik.uni-freiburg.de' + - name: rsyslog configuration + copy: + content: | + # Accept logs on TCP port 514 + module(load="imtcp") + input(type="imtcp" port="514") + + # Create a template for log file naming + $template RemoteLogs,"/var/log/remote/%HOSTNAME%/%PROGRAMNAME%.log" + + # Store logs from clients in their own directories + if $fromhost-ip != "127.0.0.1" then ?RemoteLogs + dest: /etc/rsyslog.d/remote.conf + owner: root + group: root + mode: '0644' + notify: + - restart rsyslog + roles: + - usegalaxy_eu.handy.os_setup + - geerlingguy.repo-epel + - usegalaxy-eu.autoupdates + - influxdata.chrony + - usegalaxy-eu.autofs + - hxr.monitor-cluster + - hxr.monitor-galaxy + - usegalaxy-eu.monitoring + - usegalaxy-eu.bashrc + - usegalaxy_eu.htcondor + - usegalaxy-eu.dynmotd + - ssh-host-sign + - hxr.postgres-connection + - galaxyproject.gxadmin + # # uncomment (commented roles) when in production + # - usegalaxy-eu.galaxy-slurp + # - usegalaxy_eu.fs_maintenance + # - usegalaxy-eu.htcondor_release + # - usegalaxy-eu.fix-unscheduled-workflows + # - usegalaxy-eu.fix-ancient-ftp-data + # - usegalaxy-eu.fix-user-quotas + - ssh_hardening + - dj-wasabi.telegraf + # - usegalaxy-eu.fix-stop-ITs + - usegalaxy-eu.vgcn-monitoring + - usegalaxy-eu.logrotate diff --git a/mounts b/mounts new file mode 160000 index 000000000..613300bc3 --- /dev/null +++ b/mounts @@ -0,0 +1 @@ +Subproject commit 613300bc3ecbc025474dc8bb7ecab476d919008b diff --git a/mq.yml b/mq.yml index 53462e576..f5e47b2e4 100644 --- a/mq.yml +++ b/mq.yml @@ -27,11 +27,51 @@ persistent: true loop: - httpd_can_network_connect + - name: Change the redis_t domain to permissive + community.general.selinux_permissive: + name: redis_t + permissive: true - name: Disable firewalld service ansible.builtin.service: name: firewalld - enabled: false - state: stopped + enabled: true + state: started + - name: Open ports for redis + ansible.posix.firewalld: + port: 6379/tcp + permanent: true + state: enabled + - name: Open ports for rabbitmq + ansible.posix.firewalld: + service: "{{ item }}" + permanent: true + state: enabled + loop: + - amqp + - amqps + - https + - http + - name: Create redis db dir + ansible.builtin.file: + path: /var/lib/redis + owner: redis + group: redis + state: directory + mode: 755 + - name: Create redis log dir + ansible.builtin.file: + path: /var/log/redis + owner: redis + group: redis + state: directory + mode: 755 + - name: Touches redis + ansible.builtin.file: + path: /var/log/redis/redis-server.log + owner: redis + group: redis + state: touch + mode: 644 roles: ## Starting configuration of the operating system - role: usegalaxy_eu.handy.os_setup @@ -47,6 +87,7 @@ - geerlingguy.docker - galaxyproject.nginx - usegalaxy_eu.rabbitmqserver + - geerlingguy.redis - dj-wasabi.telegraf # hardening - os_hardening diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..71f74a15a --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,25 @@ +[tool.isort] +profile = "black" +line_length = 79 +extend_skip = [ + "collections", + "roles/htcondor", + "roles/hxr.monitor-galaxy", + "roles/hxr.monitor-squid", + "roles/hxr.simple-nagios", + "roles/jasonroyle.rabbitmq", + "templates/encoder/yaml_converter.py", +] + +[tool.black] +line-length = 79 +target-version = ['py39'] +extend-exclude = """ + collections|\ + roles/htcondor|\ + roles/hxr.monitor-galaxy|\ + roles/hxr.monitor-squid|\ + roles/hxr.simple-nagios|\ + roles/jasonroyle.rabbitmq|\ + templates/encoder/yaml_converter.py\ +""" diff --git a/requirements.txt b/requirements.txt index f65aa4011..d5e900aa9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -ansible-core==2.11.3 +ansible-core==2.14.5 boto==2.49.0 diff --git a/requirements.yaml b/requirements.yaml index 186231d18..7bcdc0a1a 100644 --- a/requirements.yaml +++ b/requirements.yaml @@ -17,11 +17,14 @@ collections: source: https://galaxy.ansible.com type: galaxy - name: devsec.hardening - version: 8.3.0 + version: 8.7.0 source: https://galaxy.ansible.com type: galaxy - name: usegalaxy_eu.handy - version: 2.10.0 + version: 3.0.0 + source: https://galaxy.ansible.com + - name: ansible.windows + version: 1.14.0 source: https://galaxy.ansible.com roles: @@ -31,8 +34,8 @@ roles: version: 9.7.0 - name: devops.tomcat7 version: 1.0.0 - # - name: dj-wasabi.telegraf - # version: 0.10.0 + - name: dj-wasabi.telegraf + version: 0.14.0 - name: galaxyproject.galaxy src: https://github.com/galaxyproject/ansible-galaxy version: 0.9.18 @@ -95,10 +98,10 @@ roles: version: 1.0.0 - name: usegalaxy_eu.fs_maintenance version: 0.0.5 - - name: galaxyproject.tusd - version: 0.0.1 + - name: usegalaxy_eu.rustus + version: 0.2.0 - name: usegalaxy_eu.rabbitmqserver - version: 1.4.1 + version: 1.4.4 - name: usegalaxy_eu.influxdbserver version: 1.1.1 - name: usegalaxy_eu.flower @@ -111,3 +114,7 @@ roles: version: 0.3.1 - name: usegalaxy_eu.tpv_auto_lint version: 0.2.1 + - name: usegalaxy_eu.grafana_matrix_forwarder + version: 1.0.0 + - name: geerlingguy.redis + version: 1.8.0 diff --git a/roles/dj-wasabi.telegraf/.github/ISSUE_TEMPLATE/bug_report.md b/roles/dj-wasabi.telegraf/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 03201270f..000000000 --- a/roles/dj-wasabi.telegraf/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -name: Bug report -about: Help to improve this Ansible role. - ---- - -**Describe the bug** - - -**Installation method/version** - - -* Github / latest -* Ansible Galaxy / 1.1.0 - -**Ansible Version** - -``` - -``` - -**Targetted hosts** -Concerns the following OS(es): - - -* Ubuntu -* Debian -* CentOS -* Mint - -**Expected behavior** - - -**Additional context** - diff --git a/roles/dj-wasabi.telegraf/.github/ISSUE_TEMPLATE/feature_request.md b/roles/dj-wasabi.telegraf/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 08c83431d..000000000 --- a/roles/dj-wasabi.telegraf/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this Ansible role - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/roles/dj-wasabi.telegraf/.gitignore b/roles/dj-wasabi.telegraf/.gitignore deleted file mode 100644 index d52963b91..000000000 --- a/roles/dj-wasabi.telegraf/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -.idea -.molecule -tests/.cache -.cache -__pycache__ -*.retry -pmip diff --git a/roles/dj-wasabi.telegraf/.travis.yml b/roles/dj-wasabi.telegraf/.travis.yml deleted file mode 100644 index 86a0c6572..000000000 --- a/roles/dj-wasabi.telegraf/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -sudo: required -language: python -services: - - docker - -install: - - pip install -r requirements.txt - -script: - - molecule --version - - ansible --version - - molecule test -notifications: - webhooks: https://galaxy.ansible.com/api/v1/notifications/ diff --git a/roles/dj-wasabi.telegraf/CHANGELOG.md b/roles/dj-wasabi.telegraf/CHANGELOG.md deleted file mode 100644 index d9d022c85..000000000 --- a/roles/dj-wasabi.telegraf/CHANGELOG.md +++ /dev/null @@ -1,108 +0,0 @@ -dj-wasabi.telegraf ------------------- - -Below an overview of all changes in the releases. - -Version (Release date) - -0.10.0 (2018-08-12) - - * Updating to telegraf 1.7.3 - * Fix Deprecation warnings #54 - * Changed 'include' to 'include_tasks' to remove deprecation warning #53 (By pull request: tjend (Thanks!)) - * Add option to remove extra plugin config files #52 (By pull request: tjend (Thanks!)) - * Plugins extra hash allow multiple inputs same type #50 (By pull request: tjend (Thanks!)) - * Using specific version for tests - * Update minimum Ansible version to 2.4 - -0.9.0 (2018-05-06) - - * plugins: be able to specify the filename of extra plugings #40 (By pull request: gaelL (Thanks!)) - * Fix markdown #41 (By pull request: Angristan (Thanks!)) - * Allow to override RedHat release version #43 (By pull request: tszym (Thanks!)) - * Improved comments, split up role, moved tags and added defaults #45 (By pull request: boxrick (Thanks!)) - * Fix Travis Tests #42 - * Convert the telegraf_plugins_extra varaible to a hash so that we can … #46 (By pull request: tjend (Thanks!)) - -0.8.0 (2017-10-30) - - * Updating to Molecule V2 - * Test if LSB codename exists before using it #35 (By pull request: tszym (Thanks!)) - * Remove useless packages on RedHat. fix #28 #36 (By pull request: tszym (Thanks!)) - * Fix extra plugins by file / Change apt source filename / Change tags by global_tags #37 (By pull request: aarnaud (Thanks!)) - * Use telegra_global_tags for oldest telegraf versions #38 (By pull request: tszym (Thanks!)) - -0.7.0 (2017-02-23) - - * Replace action by modules #26 (By pull request: tszym (Thanks!)) - * Use yum repository to install telegraf on RedHat #25 (By pull request: tszym (Thanks!)) - * Remove for-loop in extra-plugin template #24 (By pull request: emersondispatch (Thanks!)) - * Update Debian.yml #23 (By pull request: zend0 (Thanks!)) - * extra plugins tags #21 (By pull request: oboukili (Thanks!)) - * Input tags support #20 (By pull request: szibis (Thanks!)) - * Fix telegraf confguration permissions #19 (By pull request: szibis (Thanks!)) - -0.6.0 (2017-01-02) - - * Fix the Influxdb repo for "hybrid" debian distros (like "jessie/sid") #9 (By pull request: Ismael (Thanks!)) - * Do "become" for the steps that require root access on Debian #10 (By pull request: Ismael (Thanks!)) - * Fix the Influxdb repo for "hybrid" debian distros (like "jessie/sid") #11 (By pull request: Ismael (Thanks!)) - * Removed imports #12 - * Fixing molecule #15 - * set telegraf hostname in defaults. #13 (By pull request: romainbureau (Thanks!)) - * use version_compare filter … #14 (By pull request: lhoss (Thanks!)) - * support missing agent settings upto telegraf v1.1 #16 (By pull request: lhoss (Thanks!)) - * update the README with the latest v0.13 - v1.1 agent settings #17 (By pull request: lhoss (Thanks!)) - -0.5.1 (2016-08-24) - - * fixed issue with ansible not getting the package #6 (By pull request: thecodeassassin (Thanks!)) - -0.5.0 (2016-07-17) - - * Removed Test Kitchen tests - * Added Molecule tests and travis make use of them - * Updated default version to 1.0.0 beta2 - * Feature/add extra plugins to telegrafd folder #5 (By pull request: stvnwrgs (Thanks!)) - -0.4.0 (2016-02-05) - - * Fixed test for test-kitchen - * Added travis-ci test for testing default installation when PR is made - * Fixed Download url for Debian - * Removed default entry for telegraf_plugins_extra - -0.3.0 (2016-01-13) - - * Made it work with telegraf 0.10.0 - * Default installation: 0.10.0 - -0.2.0 (2015-11-14) - - * Fixed kitchen test setup - * Adding "net" to the telegraf_plugins_default property - * Update etc-opt-telegraf-telegraf.conf.j2 #2 (By pull request: aferrari-technisys (Thanks!)) - * Improvement and upgrade for v0.2.0 of telegraf #1 (By pull request: aferrari-technisys (Thanks!)) - -0.1.0 (2015-09-23) - - * Updated `telegraf_agent_version` to 0.1.9 - * Added restart when package is changed (When updated for example) - * Added several plugin options: - * pass - * drop - * tagpass - * tagdrop - * interval - * Updated documentation - - -0.0.2 (2015-09-20) - - * Updated README dus to missing colon - * Forgot to update the meta file - * Added Changelog file - -0.0.1 (2015-09-20) - - * Initial release diff --git a/roles/dj-wasabi.telegraf/CODE_OF_CONDUCT.md b/roles/dj-wasabi.telegraf/CODE_OF_CONDUCT.md deleted file mode 100644 index e28ac17f7..000000000 --- a/roles/dj-wasabi.telegraf/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,3 +0,0 @@ -# Code of Conduct - -The Code of Conduct from Ansible found [here](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html) applies to this Ansible role as well. diff --git a/roles/dj-wasabi.telegraf/CONTRIBUTING.md b/roles/dj-wasabi.telegraf/CONTRIBUTING.md deleted file mode 100644 index 4b8af3768..000000000 --- a/roles/dj-wasabi.telegraf/CONTRIBUTING.md +++ /dev/null @@ -1,88 +0,0 @@ -# Contributing to this role - -**Table of content** - -- [Contributing to this role](#contributing-to-this-role) - * [Contributing](#contributing) - * [(local) Development](#-local--development) - + [Requirements](#requirements) - + [Execution](#execution) -- [Other](#other) - * [Virtualenv](#virtualenv) - * [Links](#links) - -Thank you very much for making time to improve this Ansible role. - -## Contributing - -Please note that this project is released with a Contributor Code of Conduct. By participating in this project you agree to abide by its terms. [Contributor Code of Conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html). - -1. Fork the repo - -2. Create a branch and apply your changes to this branch. - - a. Make sure you have updated the documentation when adding new variables; - - b. Don't forget to add/update tests so we can test the functionality during each Pull Request; - - c. Make sure the tests will succeed. - -3. Push the branch to your fork and submit a pull request. - -**Note** - -Pull Requests that fails during the tests will not be merged. - -## Coding Guidelines - -Style guides are important because they ensure consistency in the content, look, and feel of a book or a website. - -* [Ansible Style Guide](http://docs.ansible.com/ansible/latest/dev_guide/style_guide/) -* It's "Ansible" when referring to the product and ``ansible`` when referring to the command line tool, package, etc -* Playbooks should be written in multi-line YAML with ``key: value``. The form ``key=value`` is only for ``ansible`` ad-hoc, not for ``ansible-playbook``. -* Tasks should always have a ``name:`` - -## (local) Development - -This role make use of Molecule to test the execution of the role and verificate it. In the root of the repository, a file named `requirements.txt` exists and contains the versions used by the tests. - -### Requirements - -You can install them with the following command: - -``` -pip install -r requirements.txt -``` - -Once the dependencies are installed, please install Docker as Molecule is configured in this repository to create Docker containers. See [this](https://docs.docker.com/install/) link to install Docker on your system. - -### Execution - -Once everything is installed, you can validate your changes by executing: -``` -molecule test -``` - -It should run without any issues. - -# Other - -## Virtualenv - -Suggestion is to create a virtualenv so you won't have issues with other projects. - -Some web pages describing for virtual env: - -* http://thepythonguru.com/python-virtualenv-guide/ -* https://realpython.com/python-virtual-environments-a-primer/ -* https://www.dabapps.com/blog/introduction-to-pip-and-virtualenv-python/ - -## Links - -[Molecule](https://molecule.readthedocs.io/) - -[Ansible](https://www.ansible.com/) - -[Molecule V2 with your own role](https://werner-dijkerman.nl/2017/09/05/using-molecule-v2-to-test-ansible-roles/) - -**End note**: Have fun making changes. If a feature helps you, then others find it helpful too and I will happily have it merged. diff --git a/roles/dj-wasabi.telegraf/LICENSE b/roles/dj-wasabi.telegraf/LICENSE deleted file mode 100644 index 6922fb326..000000000 --- a/roles/dj-wasabi.telegraf/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2018 Werner Dijkerman - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/roles/dj-wasabi.telegraf/PULL_REQUEST_TEMPLATE.md b/roles/dj-wasabi.telegraf/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 70c5edefc..000000000 --- a/roles/dj-wasabi.telegraf/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,12 +0,0 @@ -**Description of PR** - - -**Type of change** - - -Feature Pull Request -Bugfix Pull Request -Docs Pull Request - -**Fixes an issue** - diff --git a/roles/dj-wasabi.telegraf/README.md b/roles/dj-wasabi.telegraf/README.md deleted file mode 100644 index 5b65c8919..000000000 --- a/roles/dj-wasabi.telegraf/README.md +++ /dev/null @@ -1,169 +0,0 @@ -# dj-wasabi.telegraf - -## Build status: - -[![Build Status](https://travis-ci.org/dj-wasabi/ansible-telegraf.svg?branch=master)](https://travis-ci.org/dj-wasabi/ansible-telegraf) - -This role will install and configure telegraf. - -Telegraf is an agent written in Go for collecting metrics from the system it's running on, or from other services, and writing them into InfluxDB. - -Design goals are to have a minimal memory footprint with a plugin system so that developers in the community can easily add support for collecting metrics from well known services (like Hadoop, Postgres, or Redis) and third party APIs (like Mailchimp, AWS CloudWatch, or Google Analytics). - -(https://github.com/influxdb/telegraf) - -## Requirements - - -No requirements. (Yes, an Influxdb server somewhere on the network will help though ;-) ) - -## Upgrade -### 0.7.0 - -There was an issue: - - If I configure a telegraf_plugins_extra, run ansible, delete the plugin and run ansible again, the plugin stays on the machine. - - - -## Role Variables - -The following parameters can be set for the Telegraf agent: - -* `telegraf_agent_version`: The version of Telegraf to install. Default: `1.0.0` -* `telegraf_agent_interval`: The interval configured for sending data to the server. Default: `10` -* `telegraf_agent_debug`: Run Telegraf in debug mode. Default: `False` -* `telegraf_agent_round_interval`: Rounds collection interval to 'interval' Default: True -* `telegraf_agent_flush_interval`: Default data flushing interval for all outputs. Default: 10 -* `telegraf_agent_flush_jitter`: Jitter the flush interval by a random amount. Default: 0 -* `telegraf_agent_aws_tags`: Configure AWS ec2 tags into Telegraf tags section Default: `False` -* `telegraf_agent_aws_tags_prefix`: Define a prefix for AWS ec2 tags. Default: `""` -* `telegraf_agent_collection_jitter`: Jitter the collection by a random amount. Default: 0 (since v0.13) -* `telegraf_agent_metric_batch_size`: The agent metric batch size. Default: 1000 (since v0.13) -* `telegraf_agent_metric_buffer_limit`: The agent metric buffer limit. Default: 10000 (since v0.13) -* `telegraf_agent_quiet`: Run Telegraf in quiet mode (error messages only). Default: `False` (since v0.13) -* `telegraf_agent_logfile`: The agent logfile name. Default: '' (means to log to stdout) (since v1.1) -* `telegraf_agent_omit_hostname`: Do no set the "host" tag in the agent. Default: `False` (since v1.1) - -Full agent settings reference: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#agent-configuration - -You can set tags for the host running telegraf: - - telegraf_global_tags: - - tag_name: some_name - tag_value: some_value - -Specifying an output. The default is set to localhost, you'll have to specify the correct influxdb server: - - telegraf_agent_output: - - type: influxdb - config: - - urls = ["http://localhost:8086"] - - database = "telegraf" - tagpass: - - diskmetrics = ["true"] - -The config will be printed line by line into the configuration, so you could also use: - - config: - - # Print an documentation line - -and it will be printed in the configuration file. - -There are two properties which are similar, but are used differently. Those are: - -* `telegraf_plugins_default` -* `telegraf_plugins_extra` - -With the property `telegraf_plugins_default` it is set to use the default set of Telegraf plugins. You could override it with more plugins, which should be enabled at default. - - telegraf_plugins_default: - - plugin: cpu - config: - - percpu = true - - plugin: disk - - plugin: io - - plugin: mem - - plugin: system - - plugin: swap - - plugin: netstat - -Every telegraf agent has these as a default configuration. - -The 2nd parameter `telegraf_plugins_extra` can be used to add plugins specific to the servers goal. It is a hash instead of a list, so that you can merge values from multiple var files together. Following is an example for using this parameter for MySQL database servers: - - cat group_vars/mysql_database - telegraf_plugins_extra: - mysql: - config: - - servers = ["root:{{ mysql_root_password }}@tcp(localhost:3306)/"] - - -Telegraf plugin options: - -* `tags` An k/v tags to apply to a specific input's measurements. Can be used on any stage for better filtering for example in outputs. -* `pass`: An array of strings that is used to filter metrics generated by the current plugin. Each string in the array is tested as a prefix against metric names and if it matches, the metric is emitted. -* `drop`: The inverse of pass, if a metric name matches, it is not emitted. -* `tagpass`: (added in Telegraf 0.1.5) tag names and arrays of strings that are used to filter metrics by the current plugin. Each string in the array is tested as an exact match against the tag name, and if it matches the metric is emitted. -* `tagdrop`: (added in Telegraf 0.1.5) The inverse of tagpass. If a tag matches, the metric is not emitted. This is tested on metrics that have passed the tagpass test. -* `interval`: How often to gather this metric. Normal plugins use a single global interval, but if one particular plugin should be run less or more often, you can configure that here. - -An example might look like this: - - telegraf_plugins_default: - - plugin: disk - interval: 12 - tags: - - diskmetrics = "true" - tagpass: - - fstype = [ "ext4", "xfs" ] - - path = [ "/opt", "/home" ] - - - -## Dependencies - -No dependencies - -## Example Playbook - - - hosts: servers - roles: - - { role: dj-wasabi.telegraf } - -## Contributors - -The following have contributed to this Ansible role: - - * Thomas Szymanski - * Alejandro - * Slawomir Skowron - * Ismael - * Laurent Hoss - * Anthony ARNAUD - * Rick Box - * Emerson Knapp - * gaelL - * Steven Wirges - * zend0 - * Angristan - * Olivier Boukili - * Romain BUREAU - * TheCodeAssassin - * tjend - -Thank you all! - -## Molecule - -This roles is configured to be tested with Molecule. You can find on this page some more information regarding Molecule: https://werner-dijkerman.nl/2016/07/10/testing-ansible-roles-with-molecule-testinfra-and-docker/ - -## License - -BSD - -## Author Information - -Please let me know if you have issues. Pull requests are also accepted! :-) - -mail: ikben [ at ] werner-dijkerman . nl diff --git a/roles/dj-wasabi.telegraf/defaults/main.yml b/roles/dj-wasabi.telegraf/defaults/main.yml deleted file mode 100644 index bd799c9c4..000000000 --- a/roles/dj-wasabi.telegraf/defaults/main.yml +++ /dev/null @@ -1,52 +0,0 @@ ---- -# defaults file for ansible-telegraf - -telegraf_agent_version: 1.7.3 -telegraf_agent_hostname: "{{ ansible_fqdn }}" -telegraf_agent_interval: 10 -telegraf_agent_debug: False -telegraf_agent_round_interval: True -telegraf_agent_flush_interval: 10 -telegraf_agent_flush_jitter: 0 -telegraf_agent_aws_tags: False -telegraf_agent_aws_tags_prefix: "" - -# v0.13 settings (not sure if supported in older version): -telegraf_agent_collection_jitter: 0 -telegraf_agent_metric_batch_size: 1000 -telegraf_agent_metric_buffer_limit: 10000 -telegraf_agent_quiet: False - -# v1.1 settings: -telegraf_agent_logfile: "" -telegraf_agent_omit_hostname: False - -telegraf_global_tags: [] - -telegraf_agent_output: - - type: influxdb - config: - - urls = ["http://localhost:8086"] - - database = "telegraf" - - precision = "s" - -# defaults - /etc/telegraf/telegraf.conf -telegraf_plugins_default: - - plugin: cpu - config: - - percpu = true - - plugin: disk - - plugin: io - - plugin: mem - - plugin: net - - plugin: system - - plugin: swap - - plugin: netstat - - plugin: processes - - plugin: kernel - -# extra configuration - /etc/telegraf/telegraf.d/* -telegraf_plugins_extra: {} - -# RedHat specific settings for convenience -telegraf_redhat_releasever: "$releasever" diff --git a/roles/dj-wasabi.telegraf/meta/.galaxy_install_info b/roles/dj-wasabi.telegraf/meta/.galaxy_install_info deleted file mode 100644 index f229fa4ff..000000000 --- a/roles/dj-wasabi.telegraf/meta/.galaxy_install_info +++ /dev/null @@ -1 +0,0 @@ -{install_date: 'Mon Sep 17 11:27:03 2018', version: 0.10.0} diff --git a/roles/dj-wasabi.telegraf/meta/main.yml b/roles/dj-wasabi.telegraf/meta/main.yml deleted file mode 100644 index 016e57c57..000000000 --- a/roles/dj-wasabi.telegraf/meta/main.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -galaxy_info: - author: Werner Dijkerman - description: Installing and configuring Telegraf - company: - license: license BSD - min_ansible_version: 2.4 - platforms: - - name: EL - versions: - - all - - name: Ubuntu - versions: - - all - - name: Debian - versions: - - all - categories: - - monitoring -dependencies: [] diff --git a/roles/dj-wasabi.telegraf/molecule/default/Dockerfile.j2 b/roles/dj-wasabi.telegraf/molecule/default/Dockerfile.j2 deleted file mode 100644 index 7ba209c3e..000000000 --- a/roles/dj-wasabi.telegraf/molecule/default/Dockerfile.j2 +++ /dev/null @@ -1,7 +0,0 @@ -FROM {{ item.image }} - -RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get upgrade -y && apt-get install -y python sudo bash ca-certificates && apt-get clean; \ - elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python python-devel python2-dnf bash && dnf clean all; \ - elif [ $(command -v yum) ]; then yum makecache fast && yum update -y && yum install -y python sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ - elif [ $(command -v zypper) ]; then zypper refresh && zypper update -y && zypper install -y python sudo bash python-xml && zypper clean -a; \ - elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; fi diff --git a/roles/dj-wasabi.telegraf/molecule/default/INSTALL.rst b/roles/dj-wasabi.telegraf/molecule/default/INSTALL.rst deleted file mode 100644 index 09e87359c..000000000 --- a/roles/dj-wasabi.telegraf/molecule/default/INSTALL.rst +++ /dev/null @@ -1,46 +0,0 @@ -******* -Install -******* - -This set of playbooks have specific dependencies on Ansible due to the modules -being used. - -Requirements -============ - -* Ansible 2.2 -* Docker Engine -* docker-py - -Install OS dependencies on CentOS 7 - -.. code-block:: bash - - $ sudo yum install -y epel-release - $ sudo yum install -y gcc python-pip python-devel openssl-devel - # If installing Molecule from source. - $ sudo yum install libffi-devel git - -Install OS dependencies on Ubuntu 16.x - -.. code-block:: bash - - $ sudo apt-get update - $ sudo apt-get install -y python-pip libssl-dev docker-engine - # If installing Molecule from source. - $ sudo apt-get install -y libffi-dev git - -Install OS dependencies on Mac OS - -.. code-block:: bash - - $ brew install python - $ brew install git - -Install using pip: - -.. code-block:: bash - - $ sudo pip install ansible - $ sudo pip install docker-py - $ sudo pip install molecule --pre diff --git a/roles/dj-wasabi.telegraf/molecule/default/create.yml b/roles/dj-wasabi.telegraf/molecule/default/create.yml deleted file mode 100644 index eca41c5ae..000000000 --- a/roles/dj-wasabi.telegraf/molecule/default/create.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -- name: Create - hosts: localhost - connection: local - gather_facts: False - no_log: "{{ not lookup('env', 'MOLECULE_DEBUG') | bool }}" - vars: - molecule_file: "{{ lookup('env', 'MOLECULE_FILE') }}" - molecule_ephemeral_directory: "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}" - molecule_scenario_directory: "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}" - molecule_yml: "{{ lookup('file', molecule_file) | from_yaml }}" - tasks: - - name: Create Dockerfiles from image names - template: - src: "{{ molecule_scenario_directory }}/Dockerfile.j2" - dest: "{{ molecule_ephemeral_directory }}/Dockerfile_{{ item.image | regex_replace('[^a-zA-Z0-9_]', '_') }}" - with_items: "{{ molecule_yml.platforms }}" - register: platforms - - - name: Discover local Docker images - docker_image_facts: - name: "molecule_local/{{ item.item.name }}" - with_items: "{{ platforms.results }}" - register: docker_images - - - name: Build an Ansible compatible image - docker_image: - path: "{{ molecule_ephemeral_directory }}" - name: "molecule_local/{{ item.item.image }}" - dockerfile: "{{ item.item.dockerfile | default(item.invocation.module_args.dest) }}" - force: "{{ item.item.force | default(True) }}" - with_items: "{{ platforms.results }}" - when: platforms.changed or docker_images.results | map(attribute='images') | select('equalto', []) | list | count >= 0 - - - name: Create molecule instance(s) - docker_container: - name: "{{ item.name }}" - hostname: "{{ item.name }}" - image: "molecule_local/{{ item.image }}" - state: started - recreate: False - log_driver: none - command: "{{ item.command | default('sleep infinity') }}" - privileged: "{{ item.privileged | default(omit) }}" - volumes: "{{ item.volumes | default(omit) }}" - capabilities: "{{ item.capabilities | default(omit) }}" - with_items: "{{ molecule_yml.platforms }}" diff --git a/roles/dj-wasabi.telegraf/molecule/default/destroy.yml b/roles/dj-wasabi.telegraf/molecule/default/destroy.yml deleted file mode 100644 index 63b5edf9d..000000000 --- a/roles/dj-wasabi.telegraf/molecule/default/destroy.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- name: Destroy - hosts: localhost - connection: local - gather_facts: False - no_log: "{{ not lookup('env', 'MOLECULE_DEBUG') | bool }}" - vars: - molecule_file: "{{ lookup('env', 'MOLECULE_FILE') }}" - molecule_yml: "{{ lookup('file', molecule_file) | from_yaml }}" - tasks: - - name: Destroy molecule instance(s) - docker_container: - name: "{{ item.name }}" - state: absent - force_kill: "{{ item.force_kill | default(True) }}" - with_items: "{{ molecule_yml.platforms }}" diff --git a/roles/dj-wasabi.telegraf/molecule/default/molecule.yml b/roles/dj-wasabi.telegraf/molecule/default/molecule.yml deleted file mode 100644 index 1ae3e205f..000000000 --- a/roles/dj-wasabi.telegraf/molecule/default/molecule.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -dependency: - name: galaxy -driver: - name: docker -lint: - name: yamllint - options: - config-file: molecule/default/yaml-lint.yml - -platforms: - - name: telegraf-centos - image: milcom/centos7-systemd - privileged: True - - name: telegraf-debian - image: minimum2scp/systemd-stretch - privileged: True - command: /sbin/init - - name: telegraf-ubuntu - image: solita/ubuntu-systemd:bionic - privileged: True - command: /sbin/init - -provisioner: - name: ansible - lint: - name: ansible-lint -scenario: - name: default -verifier: - name: testinfra - lint: - name: flake8 diff --git a/roles/dj-wasabi.telegraf/molecule/default/playbook.yml b/roles/dj-wasabi.telegraf/molecule/default/playbook.yml deleted file mode 100644 index dd38a7769..000000000 --- a/roles/dj-wasabi.telegraf/molecule/default/playbook.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- hosts: all - pre_tasks: - - name: "Installing which on CentOS" - yum: - name: which - state: present - when: - - ansible_os_family == 'RedHat' - - - name: "Installing wget on Debian" - apt: - name: "{{ item }}" - state: present - when: - - ansible_os_family == 'Debian' - with_items: - - wget - - gpg - - roles: - - role: ansible-telegraf diff --git a/roles/dj-wasabi.telegraf/molecule/default/tests/test_default.py b/roles/dj-wasabi.telegraf/molecule/default/tests/test_default.py deleted file mode 100644 index fe5e76966..000000000 --- a/roles/dj-wasabi.telegraf/molecule/default/tests/test_default.py +++ /dev/null @@ -1,26 +0,0 @@ -import os - -import testinfra.utils.ansible_runner - -testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( - os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') - - -def test_telegraf_running_and_enabled(Service, SystemInfo): - telegraf = Service("telegraf") - assert telegraf.is_enabled - if SystemInfo.distribution == 'centos': - assert telegraf.is_running - - -def test_telegraf_dot_conf(File): - telegraf = File("/etc/telegraf/telegraf.conf") - assert telegraf.user == "telegraf" - assert telegraf.group == "telegraf" - assert telegraf.mode == 0o640 - assert telegraf.contains('[[inputs.cpu]]') - - -def test_telegraf_package(Package, SystemInfo): - telegraf = Package('telegraf') - assert telegraf.is_installed diff --git a/roles/dj-wasabi.telegraf/molecule/default/tests/test_default.pyc b/roles/dj-wasabi.telegraf/molecule/default/tests/test_default.pyc deleted file mode 100644 index 4864f1799..000000000 Binary files a/roles/dj-wasabi.telegraf/molecule/default/tests/test_default.pyc and /dev/null differ diff --git a/roles/dj-wasabi.telegraf/molecule/default/yaml-lint.yml b/roles/dj-wasabi.telegraf/molecule/default/yaml-lint.yml deleted file mode 100644 index cd377bf65..000000000 --- a/roles/dj-wasabi.telegraf/molecule/default/yaml-lint.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- - -extends: default - -rules: - line-length: - max: 140 - level: warning - truthy: disable diff --git a/roles/dj-wasabi.telegraf/requirements.txt b/roles/dj-wasabi.telegraf/requirements.txt deleted file mode 100644 index f3b20214c..000000000 --- a/roles/dj-wasabi.telegraf/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -ansible==2.4.4.0 -docker==3.3.0 -molecule==2.13.1 -testinfra==1.12.0 diff --git a/roles/dj-wasabi.telegraf/tasks/Debian.yml b/roles/dj-wasabi.telegraf/tasks/Debian.yml deleted file mode 100644 index d07077d41..000000000 --- a/roles/dj-wasabi.telegraf/tasks/Debian.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- - -- name: Ensure the system can use the HTTPS transport for APT. - stat: - path: /usr/lib/apt/methods/https - register: apt_https_transport - -- name: Install APT HTTPS transport. - apt: - name: "apt-transport-https" - state: present - when: not apt_https_transport.stat.exists - become: yes - -- name: Download Telegraf apt key. - apt_key: - url: "https://repos.influxdata.com/influxdb.key" - state: present - become: yes - -- name: Add Telegraf repository (using LSB). - apt_repository: - repo: "deb https://repos.influxdata.com/{{ ansible_distribution|lower }} {{ ansible_lsb.codename }} stable" - filename: "telegraf" - state: present - become: yes - when: ansible_lsb is defined and ansible_lsb.codename is defined - -- name: Add Telegraf repository. - apt_repository: - repo: "deb https://repos.influxdata.com/{{ ansible_distribution|lower }} {{ ansible_distribution_release }} stable" - filename: "telegraf" - state: present - become: yes - when: ansible_lsb is not defined or ansible_lsb.codename is not defined - -- name: "Install telegraf package | Debian" - apt: - name: "telegraf" - state: present - notify: "Restart Telegraf" - become: yes diff --git a/roles/dj-wasabi.telegraf/tasks/RedHat.yml b/roles/dj-wasabi.telegraf/tasks/RedHat.yml deleted file mode 100644 index f43df7386..000000000 --- a/roles/dj-wasabi.telegraf/tasks/RedHat.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -# description: RedHat specific installation - -- name: "Add yum repository | RedHat" - yum_repository: - name: influxdb - description: InfluxDB Repository - RHEL $releasever - baseurl: "https://repos.influxdata.com/rhel/{{ telegraf_redhat_releasever }}/$basearch/stable" - gpgcheck: yes - gpgkey: https://repos.influxdata.com/influxdb.key - -- name: "Install telegraf package | RedHat" - yum: - name: "telegraf-{{ telegraf_agent_version }}" - state: installed - notify: "Restart Telegraf" diff --git a/roles/dj-wasabi.telegraf/tasks/configure.yml b/roles/dj-wasabi.telegraf/tasks/configure.yml deleted file mode 100644 index 1fdb6f2b0..000000000 --- a/roles/dj-wasabi.telegraf/tasks/configure.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- -# description: Configure telegraf and get all relevent ec2 information - -- name: Retrieve ec2 facts - ec2_metadata_facts: - when: telegraf_agent_aws_tags - -- name: Retrieve all ec2 tags on the instance - ec2_tag: - region: '{{ ansible_ec2_placement_region }}' - resource: '{{ ansible_ec2_instance_id }}' - state: list - when: telegraf_agent_aws_tags - register: ec2_tags - -- name: "Copy the template for versions < 0.10.0" - template: - src: etc-opt-telegraf-telegraf.conf.j2 - dest: /etc/opt/telegraf/telegraf.conf - owner: telegraf - group: telegraf - mode: 0640 - become: yes - when: telegraf_agent_version is version_compare('0.10.0', '<') - notify: "Restart Telegraf" - -- name: "Copy the template for versions >= 0.10.0" - template: - src: telegraf.conf.j2 - dest: /etc/telegraf/telegraf.conf - owner: telegraf - group: telegraf - mode: 0640 - become: yes - when: telegraf_agent_version is version_compare('0.10.0', '>=') - notify: "Restart Telegraf" - -- name: "Copy telegraf extra plugins" - template: - src: "telegraf-extra-plugin.conf.j2" - dest: "/etc/telegraf/telegraf.d/{{ item.key }}.conf" - owner: telegraf - group: telegraf - mode: 0640 - with_dict: "{{ telegraf_plugins_extra }}" - when: "telegraf_plugins_extra is defined and telegraf_plugins_extra is iterable and item.value.state|default('present') != 'absent'" - become: yes - notify: "Restart Telegraf" - -- name: "Remove telegraf extra plugins" - file: - path: "/etc/telegraf/telegraf.d/{{ item.key }}.conf" - state: absent - with_dict: "{{ telegraf_plugins_extra }}" - when: "telegraf_plugins_extra is defined and telegraf_plugins_extra is iterable and item.value.state|default('present') == 'absent'" - become: yes - notify: "Restart Telegraf" - -- name: "Force restart service after reread config" - meta: flush_handlers diff --git a/roles/dj-wasabi.telegraf/tasks/main.yml b/roles/dj-wasabi.telegraf/tasks/main.yml deleted file mode 100644 index 0f850ef90..000000000 --- a/roles/dj-wasabi.telegraf/tasks/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -# tasks file for ansible-telegraf - -- name: "Install the correct repository" - include_tasks: "RedHat.yml" - when: ansible_os_family == "RedHat" - -- name: "Install the correct repository" - include_tasks: "Debian.yml" - when: ansible_os_family == "Debian" - tags: - - telegraf - - packages - -- name: "Configure telegraf" - include_tasks: "configure.yml" diff --git a/roles/dj-wasabi.telegraf/templates/etc-opt-telegraf-telegraf.conf.j2 b/roles/dj-wasabi.telegraf/templates/etc-opt-telegraf-telegraf.conf.j2 deleted file mode 100644 index b6774b473..000000000 --- a/roles/dj-wasabi.telegraf/templates/etc-opt-telegraf-telegraf.conf.j2 +++ /dev/null @@ -1,78 +0,0 @@ -### MANAGED BY {{ role_path|basename }} ANSIBLE ROLE ### - -[tags] -{% if telegraf_global_tags is defined and telegraf_global_tags is iterable %} -{% for item in telegraf_global_tags %} - {{ item.tag_name }} = "{{ item.tag_value }}" -{% endfor %} -{% endif %} - -{% if telegraf_agent_aws_tags == true and ec2_tags is defined and ec2_tags != None %} -{% for key, value in ec2_tags.tags.items()%} - {{telegraf_agent_aws_tags_prefix}}{{ key }} = "{{ value }}" -{% endfor %} -{% endif %} - -# Configuration for telegraf itself -[agent] - interval = "{{ telegraf_agent_interval }}s" - debug = {{ telegraf_agent_debug | lower }} - hostname = "{{ ansible_fqdn }}" - -# Configuration for influxdb server to send metrics to -[outputs] -{% if telegraf_agent_output is defined and telegraf_agent_output is iterable %} -{% for item in telegraf_agent_output %} -[outputs.{{ item.type }}] -{% for items in item.config %} - {{ items }} -{% endfor %} -{% endfor %} -{% endif %} - -# PLUGINS -{% if telegraf_plugins_default is defined and telegraf_plugins_default is iterable %} -{% for item in telegraf_plugins_default %} -[{{ item.plugin }}] -{% if item.interval is defined %} - interval = "{{ item.interval }}s" -{% endif %} -{% if item.config is defined and item.config is iterable %} -{% for items in item.config %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.tagpass is defined and item.tagpass is iterable %} -[{{ item.plugin }}.tagpass] -{% for items in item.tagpass %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.tagdrop is defined and item.tagdrop is iterable %} -[{{ item.plugin }}.tagdrop] -{% for items in item.tagdrop %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.pass is defined and item.pass is iterable %} -[{{ item.plugin }}.pass] -{% for items in item.pass %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.drop is defined and item.drop is iterable %} -[{{ item.plugin }}.drop] -{% for items in item.drop %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.specifications is defined and item.specifications is iterable %} -[[{{item.plugin}}.specifications]] -{% for items in item.specifications %} - {{ items }} -{% endfor %} -{% endif %} - - -{% endfor %} -{% endif %} diff --git a/roles/dj-wasabi.telegraf/templates/telegraf-extra-plugin.conf.j2 b/roles/dj-wasabi.telegraf/templates/telegraf-extra-plugin.conf.j2 deleted file mode 100644 index 452b722c8..000000000 --- a/roles/dj-wasabi.telegraf/templates/telegraf-extra-plugin.conf.j2 +++ /dev/null @@ -1,47 +0,0 @@ -### MANAGED BY {{ role_path|basename }} ANSIBLE ROLE ### - -[[inputs.{{ item.value.plugin | default(item.key) }}]] -{% if item.value.interval is defined %} - interval = "{{ item.value.interval }}s" -{% endif %} -{% if item.value.config is defined and item.value.config is iterable %} -{% for items in item.value.config %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.value.tags is defined and item.value.tags is iterable %} -[inputs.{{ item.value.plugin | default(item.key) }}.tags] -{% for items in item.value.tags %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.value.tagpass is defined and item.value.tagpass is iterable %} -[{{ item.value.plugin | default(item.key) }}.tagpass] -{% for items in item.value.tagpass %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.value.tagdrop is defined and item.value.tagdrop is iterable %} -[{{ item.value.plugin | default(item.key) }}.tagdrop] -{% for items in item.value.tagdrop %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.value.pass is defined and item.value.pass is iterable %} -[{{ item.value.plugin | default(item.key) }}.pass] -{% for items in item.value.pass %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.value.drop is defined and item.value.drop is iterable %} -[{{ item.value.plugin | default(item.key) }}.drop] -{% for items in item.value.drop %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.value.specifications is defined and item.value.specifications is iterable %} -[[{{item.value.plugin | default(item.key)}}.specifications]] -{% for items in item.value.specifications %} - {{ items }} -{% endfor %} -{% endif %} diff --git a/roles/dj-wasabi.telegraf/templates/telegraf.conf.j2 b/roles/dj-wasabi.telegraf/templates/telegraf.conf.j2 deleted file mode 100644 index ba426a9e1..000000000 --- a/roles/dj-wasabi.telegraf/templates/telegraf.conf.j2 +++ /dev/null @@ -1,100 +0,0 @@ -### MANAGED BY {{ role_path|basename }} ANSIBLE ROLE ### - -[global_tags] -{% if telegraf_global_tags is defined and telegraf_global_tags is iterable %} -{% for item in telegraf_global_tags %} - {{ item.tag_name }} = "{{ item.tag_value }}" -{% endfor %} -{% endif %} - -{% if telegraf_agent_aws_tags == true and ec2_tags is defined and ec2_tags != None %} -{% for key, value in ec2_tags.tags.items()%} - {{telegraf_agent_aws_tags_prefix}}{{ key }} = "{{ value }}" -{% endfor %} -{% endif %} - -# Configuration for telegraf agent -[agent] - interval = "{{ telegraf_agent_interval }}s" - debug = {{ telegraf_agent_debug | lower }} - hostname = "{{ telegraf_agent_hostname }}" - round_interval = {{ telegraf_agent_round_interval | lower }} - flush_interval = "{{ telegraf_agent_flush_interval }}s" - flush_jitter = "{{ telegraf_agent_flush_jitter }}s" -{% if telegraf_agent_version is version_compare('0.13', '>=') %} - collection_jitter = "{{ telegraf_agent_collection_jitter }}s" - metric_batch_size = {{ telegraf_agent_metric_batch_size }} - metric_buffer_limit = {{ telegraf_agent_metric_buffer_limit }} - quiet = {{ telegraf_agent_quiet | lower }} -{% endif %} -{% if telegraf_agent_version is version_compare('1.1', '>=') %} - logfile = "{{ telegraf_agent_logfile }}" - omit_hostname = {{ telegraf_agent_omit_hostname | lower }} -{% endif %} - -############################################################################### -# OUTPUTS # -############################################################################### - -{% if telegraf_agent_output is defined and telegraf_agent_output is iterable %} -{% for item in telegraf_agent_output %} -[[outputs.{{ item.type }}]] -{% for items in item.config %} - {{ items }} -{% endfor %} -{% endfor %} -{% endif %} - -############################################################################### -# INPUTS # -############################################################################### - -{% if telegraf_plugins_default is defined and telegraf_plugins_default is iterable %} -{% for item in telegraf_plugins_default %} -[[inputs.{{ item.plugin }}]] -{% if item.interval is defined %} - interval = "{{ item.interval }}s" -{% endif %} -{% if item.config is defined and item.config is iterable %} -{% for items in item.config %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.tags is defined and item.tags is iterable %} -[inputs.{{ item.plugin }}.tags] -{% for items in item.tags %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.tagpass is defined and item.tagpass is iterable %} -[{{ item.plugin }}.tagpass] -{% for items in item.tagpass %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.tagdrop is defined and item.tagdrop is iterable %} -[{{ item.plugin }}.tagdrop] -{% for items in item.tagdrop %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.pass is defined and item.pass is iterable %} -[{{ item.plugin }}.pass] -{% for items in item.pass %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.drop is defined and item.drop is iterable %} -[{{ item.plugin }}.drop] -{% for items in item.drop %} - {{ items }} -{% endfor %} -{% endif %} -{% if item.specifications is defined and item.specifications is iterable %} -[[{{item.plugin}}.specifications]] -{% for items in item.specifications %} - {{ items }} -{% endfor %} -{% endif %} -{% endfor %} -{% endif %} diff --git a/roles/hxr.monitor-cluster/files/cluster_queue-condor.sh b/roles/hxr.monitor-cluster/files/cluster_queue-condor.sh index b7b8fb988..db59330f8 100755 --- a/roles/hxr.monitor-cluster/files/cluster_queue-condor.sh +++ b/roles/hxr.monitor-cluster/files/cluster_queue-condor.sh @@ -1,5 +1,5 @@ #!/bin/bash -condor_q -total | grep "all" | sed 's/.* jobs;\s*//g;s/, /\n/g' | while read line ; do +condor_q -global -total | grep "all" | sed 's/.* jobs;\s*//g;s/, /\n/g' | while read line ; do type=$(echo $line | sed 's/^[0-9]* //g'); count=$(echo $line | sed 's/ .*//g'); echo "cluster.queue,engine=condor,state=$type count=$count" diff --git a/roles/hxr.monitor-cluster/files/cluster_util-condor-split.sh b/roles/hxr.monitor-cluster/files/cluster_util-condor-split.sh index 358775433..49cbb590e 100755 --- a/roles/hxr.monitor-cluster/files/cluster_util-condor-split.sh +++ b/roles/hxr.monitor-cluster/files/cluster_util-condor-split.sh @@ -1,10 +1,33 @@ #!/bin/bash +# Details: For each GalaxyGroup we calculate the following to monitor the cluster usage + for cluster in $(condor_status -autoformat GalaxyGroup | sort | grep -v undefined | uniq); do - mem_total=$(condor_status -autoformat TotalMemory -constraint 'GalaxyGroup == "'$cluster'" && Activity == "Idle"' | paste -s -d'+' | bc) - mem_remain=$(condor_status -autoformat Memory -constraint 'GalaxyGroup == "'$cluster'" && Activity == "Idle"' | paste -s -d'+' | bc) - cpu_total=$(condor_status -autoformat DetectedCpus -constraint 'GalaxyGroup == "'$cluster'" && Activity == "Idle"' | paste -s -d'+' | bc) - cpu_remain=$(condor_status -autoformat Cpus -constraint 'GalaxyGroup == "'$cluster'" && Activity == "Idle"' | paste -s -d'+' | bc) - mem_perc=$(echo "($mem_total - $mem_remain) / $mem_total" | bc -l) - cpu_perc=$(echo "($cpu_total - $cpu_remain) / $cpu_total" | bc -l) - echo "cluster.alloc,cluster=condor-sep,group=$cluster cores=0$cpu_perc,memory=0$mem_perc" + total_slots=$(condor_status -af Name -constraint 'GalaxyGroup == "'$cluster'" && (SlotType == "Partitionable" || SlotType == "Dynamic")' | wc -l) + claimed_slots=$(condor_status -af Name -constraint 'GalaxyGroup == "'$cluster'" && State == "Claimed"' | wc -l) + unclaimed_slots=$(condor_status -af Name -constraint 'GalaxyGroup == "'$cluster'" && State == "Unclaimed"' | wc -l) + total_cpus=$(condor_status -af DetectedCpus -constraint 'GalaxyGroup == "'$cluster'" && SlotType == "Partitionable"' | paste -s -d'+' | bc) + claimed_cpus=$(condor_status -af Cpus -constraint 'GalaxyGroup == "'$cluster'" && State == "Claimed"' | paste -s -d'+' | bc) + unclaimed_cpus=$(condor_status -af Cpus -constraint 'GalaxyGroup == "'$cluster'" && State == "Unclaimed"' | paste -s -d'+' | bc) + total_memory=$(condor_status -af TotalMemory -constraint 'GalaxyGroup == "'$cluster'" && SlotType == "Partitionable"' | paste -s -d'+' | bc) + claimed_memory=$(condor_status -af Memory -constraint 'GalaxyGroup == "'$cluster'" && State == "Claimed"' | paste -s -d'+' | bc) + unclaimed_memory=$(condor_status -af Memory -constraint 'GalaxyGroup == "'$cluster'" && State == "Unclaimed"' | paste -s -d'+' | bc) + total_gpu_slots=$(condor_status -af Name -constraint 'GalaxyGroup == "'$cluster'" && CUDADeviceName =!= undefined' | wc -l) + claimed_gpus=$(condor_status -af Name -constraint 'GalaxyGroup == "'$cluster'" && State == "Claimed" && CUDADeviceName =!= undefined' | wc -l) + unclaimed_gpus=$(condor_status -af Name -constraint 'GalaxyGroup == "'$cluster'" && State == "Unclaimed" && CUDADeviceName =!= undefined' | wc -l) + + # Check if any is empty and if empty set it to 0 + if [ -z "$total_slots" ]; then total_slots=0; fi + if [ -z "$claimed_slots" ]; then claimed_slots=0; fi + if [ -z "$unclaimed_slots" ]; then unclaimed_slots=0; fi + if [ -z "$total_cpus" ]; then total_cpus=0; fi + if [ -z "$claimed_cpus" ]; then claimed_cpus=0; fi + if [ -z "$unclaimed_cpus" ]; then unclaimed_cpus=0; fi + if [ -z "$total_memory" ]; then total_memory=0; fi + if [ -z "$claimed_memory" ]; then claimed_memory=0; fi + if [ -z "$unclaimed_memory" ]; then unclaimed_memory=0; fi + if [ -z "$total_gpu_slots" ]; then total_gpu_slots=0; fi + if [ -z "$claimed_gpus" ]; then claimed_gpus=0; fi + if [ -z "$unclaimed_gpus" ]; then unclaimed_gpus=0; fi + + echo "htcondor_cluster_usage,classad='cluster',group=$cluster total_slots=$total_slots,claimed_slots=$claimed_slots,unclaimed_slots=$unclaimed_slots,total_cpus=$total_cpus,claimed_cpus=$claimed_cpus,unclaimed_cpus=$unclaimed_cpus,total_memory=$total_memory,claimed_memory=$claimed_memory,unclaimed_memory=$unclaimed_memory,total_gpu_slots=$total_gpu_slots,claimed_gpus=$claimed_gpus,unclaimed_gpus=$unclaimed_gpus" done diff --git a/roles/hxr.monitor-cluster/files/cluster_util-condor.sh b/roles/hxr.monitor-cluster/files/cluster_util-condor.sh index 39ef11275..b1a80cbf9 100755 --- a/roles/hxr.monitor-cluster/files/cluster_util-condor.sh +++ b/roles/hxr.monitor-cluster/files/cluster_util-condor.sh @@ -1,8 +1,51 @@ #!/bin/bash -mem_total=$(condor_status -autoformat TotalMemory | paste -s -d'+' | bc) -mem_alloc=$(condor_status -autoformat Memory | paste -s -d'+' | bc) -mem_perc=$(echo "$mem_alloc / $mem_total" | bc -l) -cpu_total=$(condor_status -autoformat DetectedCpus | paste -s -d'+' | bc) -cpu_alloc=$(condor_status -autoformat Cpus | paste -s -d'+' | bc) -cpu_perc=$(echo "$cpu_alloc / $cpu_total" | bc -l) -echo "cluster.alloc,cluster=condor cores=0$cpu_perc,memory=0$mem_perc" +# Details: This script is used to monitor the entire HTCondor cluster usage independent of GalaxyGroup's + +# Total number of detected CPUs at the machine level +total_detected_cpus=$(condor_status -af DetectedCpus -constraint 'SlotType == "Partitionable"' | paste -s -d'+' | bc) + +# Claimed CPUs +claimed_cpus=$(condor_status -af Cpus -constraint 'State == "Claimed"' | paste -s -d'+' | bc) + +# Unclaimed CPUs +unclaimed_cpus=$(condor_status -af Cpus -constraint 'State == "Unclaimed"' | paste -s -d'+' | bc) + +# Total memory at the machine level +total_memory=$(condor_status -af TotalMemory -constraint 'SlotType == "Partitionable"' | paste -s -d'+' | bc) + +# Claimed memory +claimed_memory=$(condor_status -af Memory -constraint 'State == "Claimed"' | paste -s -d'+' | bc) + +# Unclaimed memory +unclaimed_memory=$(condor_status -af Memory -constraint 'State == "Unclaimed"' | paste -s -d'+' | bc) + +# Total number of GPU slots +total_gpu_slots=$(condor_status -af Name -constraint 'CUDADeviceName =!= undefined' | wc -l) + +# Claimed GPUs slots +claimed_gpus=$(condor_status -af Name -constraint 'State == "Claimed" && CUDADeviceName =!= undefined' | wc -l) + +# Unclaimed GPUs slots +unclaimed_gpus=$(condor_status -af Name -constraint 'State == "Unclaimed" && CUDADeviceName =!= undefined' | wc -l) + +# Total load average at the machine level +total_loadavg=$(condor_status -af TotalLoadAvg -constraint 'SlotType == "Partitionable"' | paste -s -d'+' | bc) + +# Claimed load average +claimed_loadavg=$(condor_status -af LoadAvg -constraint 'State == "Claimed"' | paste -s -d'+' | bc) + +# Unclaimed load average +unclaimed_loadavg=$(condor_status -af LoadAvg -constraint 'State == "Unclaimed"' | paste -s -d'+' | bc) + +# Total number of slots +total_slots=$(condor_status -af Name -constraint 'SlotType == "Partitionable" || SlotType == "Dynamic" ' | wc -l) + +# Total number of Claimed slots with Activity Busy +claimed_busy_slots=$(condor_status -af Name -constraint 'State == "Claimed" && Activity == "Busy"' | wc -l) + +# Total number of Unclaimed slots with Activity Idle +unclaimed_idle_slots=$(condor_status -af Name -constraint 'State == "Unclaimed" && Activity == "Idle"' | wc -l) + +# Output in influxdb protocol format +echo "htcondor_cluster_usage,classad='machine' total_detected_cpus=$total_detected_cpus,claimed_cpus=$claimed_cpus,unclaimed_cpus=$unclaimed_cpus,total_memory=$total_memory,claimed_memory=$claimed_memory,unclaimed_memory=$unclaimed_memory,total_loadavg=$total_loadavg,claimed_loadavg=$claimed_loadavg,unclaimed_loadavg=$unclaimed_loadavg,total_slots=$total_slots,claimed_busy_slots=$claimed_busy_slots,unclaimed_idle_slots=$unclaimed_idle_slots,total_gpu_slots=$total_gpu_slots,claimed_gpus=$claimed_gpus,unclaimed_gpus=$unclaimed_gpus" + diff --git a/roles/hxr.monitor-cluster/tasks/condor.yml b/roles/hxr.monitor-cluster/tasks/condor.yml index e4df9037c..996d0bcb6 100644 --- a/roles/hxr.monitor-cluster/tasks/condor.yml +++ b/roles/hxr.monitor-cluster/tasks/condor.yml @@ -6,7 +6,6 @@ owner: root group: root mode: 0755 - when: monitor_condor_split_util - name: Allow telegraf to run monitor-condor-utilisation-split lineinfile: @@ -15,7 +14,6 @@ insertafter: EOF line: 'telegraf ALL=(ALL) NOPASSWD: /usr/bin/monitor-condor-utilisation-split' validate: 'visudo -cf %s' - when: monitor_condor_split_util - name: "Send condor cluster utilisation monitor" copy: @@ -48,43 +46,3 @@ insertafter: EOF line: 'telegraf ALL=(ALL) NOPASSWD: /usr/bin/monitor-condor-queue' validate: 'visudo -cf %s' - -#- set_fact: - #monitor_condor_queue: - #- plugin: exec - #disambiguation: monitor_condor_queue - #config: - #- commands = ["/usr/bin/monitor-condor-queue"] - #- timeout = "5s" - #- data_format = "influx" - #- interval = "10s" - -#- set_fact: - #telegraf_plugins_extra: "{{ telegraf_plugins_extra + monitor_condor_queue }}" - -#- set_fact: - #monitor_condor_util: - #- plugin: exec - #disambiguation: monitor_condor_util - #config: - #- commands = ["/usr/bin/monitor-condor-utilisation"] - #- timeout = "5s" - #- data_format = "influx" - #- interval = "10s" - -#- set_fact: - #telegraf_plugins_extra: "{{ telegraf_plugins_extra + monitor_condor_util }}" - -#- set_fact: - #monitor_condor_util_split: - #- plugin: exec - #disambiguation: monitor_condor_util_split - #config: - #- commands = ["/usr/bin/monitor-condor-utilisation-split"] - #- timeout = "5s" - #- data_format = "influx" - #- interval = "10s" - -#- set_fact: - #telegraf_plugins_extra: "{{ telegraf_plugins_extra + monitor_condor_util_split }}" - #when: "{{ monitor_condor_split_util }}" diff --git a/roles/hxr.monitor-galaxy/files/galaxy_job_queue_states.sh b/roles/hxr.monitor-galaxy/files/galaxy_job_queue_states.sh new file mode 100644 index 000000000..66da9defa --- /dev/null +++ b/roles/hxr.monitor-galaxy/files/galaxy_job_queue_states.sh @@ -0,0 +1,4 @@ +#!/bin/bash +# Description: This script is used to get the number of jobs in each state in the Galaxy job queue. +job_state_stats=$(/usr/bin/gxadmin tsvquery queue-detail --all | awk '{print $1}' | sort | uniq -c) +echo "$job_state_stats" | awk '{print "galaxy_job_queue_states_stats,job_state="$2" value="$1}' diff --git a/roles/hxr.monitor-galaxy/files/galaxy_jobs_per_handler.sh b/roles/hxr.monitor-galaxy/files/galaxy_jobs_per_handler.sh new file mode 100644 index 000000000..bbaca2891 --- /dev/null +++ b/roles/hxr.monitor-galaxy/files/galaxy_jobs_per_handler.sh @@ -0,0 +1,5 @@ +#!/bin/bash +# Description: This script is used to get the number of jobs handled by each job handler in the current Galaxy job queue. + +jobs_per_handler=$(/usr/bin/gxadmin csvquery q "select handler, state, count(state) from job where state in ('new', 'queued', 'running') and handler like '%handler_sn06_%' group by handler, state order by handler") +echo "$jobs_per_handler" | awk -F, '{print "galaxy_jobs_per_handler_stats,handler="$1",state="$2" value="$3}' diff --git a/roles/hxr.monitor-galaxy/tasks/main.yml b/roles/hxr.monitor-galaxy/tasks/main.yml index cd21505a4..76e749cc8 100644 --- a/roles/hxr.monitor-galaxy/tasks/main.yml +++ b/roles/hxr.monitor-galaxy/tasks/main.yml @@ -1,2 +1,16 @@ --- +- name: "Send Galaxy job queue states stats" + copy: + src: "galaxy_job_queue_states.sh" + dest: "/usr/bin/galaxy_job_queue_states_stats" + owner: root + group: root + mode: 0755 +- name: "Send Galaxy jobs per handler stats" + copy: + src: "galaxy_jobs_per_handler.sh" + dest: "/usr/bin/galaxy_jobs_per_handler_stats" + owner: root + group: root + mode: 0755 diff --git a/roles/hxr.postgres-connection/defaults/main.yml b/roles/hxr.postgres-connection/defaults/main.yml index 82acd4ac8..45bc3fe74 100644 --- a/roles/hxr.postgres-connection/defaults/main.yml +++ b/roles/hxr.postgres-connection/defaults/main.yml @@ -1,4 +1,7 @@ -pgc_user: - name: "{{ galaxy_user.name }}" - home: "{{ galaxy_user.home }}" - group_name: "{{ galaxy_group.name | default(galaxy_group) }}" +pgc_users: + - uname: "{{ galaxy_user.name }}" + uhome: "{{ galaxy_user.home }}" + gname: "{{ galaxy_group.name | default(galaxy_group) }}" + pguser: "{{ postgres_user }}" + pgpass: "{{ postgres_pass }}" + pgdatabase: galaxy diff --git a/roles/hxr.postgres-connection/tasks/main.yml b/roles/hxr.postgres-connection/tasks/main.yml index eec7611ce..7cef58153 100644 --- a/roles/hxr.postgres-connection/tasks/main.yml +++ b/roles/hxr.postgres-connection/tasks/main.yml @@ -1,20 +1,16 @@ --- -- name: Add env vars in bashrc - lineinfile: - path: "{{ pgc_user.home }}/.bashrc" - regexp: "^export {{ item.var }}" - line: "export {{ item.var }}='{{ item.val }}'" - with_items: - - var: PGUSER - val: "{{ postgres_user }}" - - var: PGHOST - val: "{{ postgres_host }}" +- name: Get all users + getent: + database: passwd + split: ':' -- name: Copy using the 'content' for inline data - copy: - content: | - {{ postgres_host }}:{{ postgres_port }}:*:{{ postgres_user }}:{{ postgres_pass }} - dest: "{{ pgc_user.home }}/.pgpass" - mode: 0600 - owner: "{{ pgc_user.name }}" - group: "{{ pgc_user.group_name }}" +- name: Debug info when user does not exist + debug: + msg: "INFO: User {{ item.uname }} does not exist" + loop: "{{ pgc_users }}" + when: (not item.uname in getent_passwd.keys()) + +- name: Run postgres tasks + include_tasks: postgres_tasks.yml + loop: "{{ pgc_users }}" + when: (item.uname in getent_passwd.keys()) diff --git a/roles/hxr.postgres-connection/tasks/postgres_tasks.yml b/roles/hxr.postgres-connection/tasks/postgres_tasks.yml new file mode 100644 index 000000000..b7999fb8e --- /dev/null +++ b/roles/hxr.postgres-connection/tasks/postgres_tasks.yml @@ -0,0 +1,26 @@ +--- +- name: Add postgres connection configuration + block: + - name: Add env vars in bashrc + lineinfile: + path: "{{ item.uhome }}/.bashrc" + regexp: "^export {{ task_item.var }}" + line: "export {{ task_item.var }}='{{ task_item.val }}'" + with_items: + - var: PGUSER + val: "{{ item.pguser }}" + - var: PGHOST + val: "{{ postgres_host }}" + - var: PGDATABASE + val: "{{ item.pgdatabase }}" + loop_control: + loop_var: task_item + + - name: Copy using the 'content' for inline data + copy: + content: | + {{ postgres_host }}:{{ postgres_port }}:*:{{ item.pguser }}:{{ item.pgpass }} + dest: "{{ item.uhome }}/.pgpass" + mode: 0600 + owner: "{{ item.uname }}" + group: "{{ item.gname }}" diff --git a/roles/usegalaxy-eu.bashrc/defaults/main.yml b/roles/usegalaxy-eu.bashrc/defaults/main.yml new file mode 100644 index 000000000..dcba29bf1 --- /dev/null +++ b/roles/usegalaxy-eu.bashrc/defaults/main.yml @@ -0,0 +1,7 @@ +--- +bashrc_users: + - uname: "{{ galaxy_user.name }}" + uhome: "{{ galaxy_user.home }}" + gname: "{{ galaxy_group.name }}" + +galaxy_pulsar_app_conf: "{{ galaxy_config_dir }}/pulsar_app.yml" diff --git a/roles/usegalaxy-eu.bashrc/files/galaxy_jwd.py b/roles/usegalaxy-eu.bashrc/files/galaxy_jwd.py new file mode 100644 index 000000000..e00aeca81 --- /dev/null +++ b/roles/usegalaxy-eu.bashrc/files/galaxy_jwd.py @@ -0,0 +1,505 @@ +#!/usr/bin/env python +"""Galaxy jobs's job working directory (JWD) script. + +Can get you the path of a JWD and can delete JWDs of failed jobs older than X +days. +""" + +import argparse +import os +import shutil +import sys +import textwrap +from argparse import RawDescriptionHelpFormatter +from datetime import datetime +from typing import Optional, Tuple +from xml.dom.minidom import parse + +import psycopg2 +import yaml + + +class SubcommandHelpFormatter(RawDescriptionHelpFormatter): + """Custom help formatter to hide argparse metavars.""" + + def _format_action(self, action): + """Removes the first line from subparsers.""" + parts = super(RawDescriptionHelpFormatter, self)._format_action(action) + if action.nargs == argparse.PARSER: + parts = "\n".join(parts.split("\n")[1:]) + return parts + + +def main(): + """Main function of the JWD script. + + 1. Can get you the path of a JWD + 2. Can delete JWDs of failed jobs older than X days + """ + parser = argparse.ArgumentParser( + prog="galaxy_jwd", + description=textwrap.dedent( + """ + Get the JWD path of a given Galaxy job id or clean the JWDs of old failed jobs. + + The following ENVs (same as gxadmin's) should be set: + GALAXY_CONFIG_FILE: Path to the galaxy.yml file + GALAXY_LOG_DIR: Path to the Galaxy log directory + PGDATABASE: Name of the Galaxy database + PGUSER: Galaxy database user + PGHOST: Galaxy database host + + We also need a ~/.pgpass file (same as gxadmin's) in format: + :5432:*:: + """ # noqa: E501 + ), + formatter_class=SubcommandHelpFormatter, + ) + subparsers = parser.add_subparsers( + dest="operation", + title="operations", + help=None, + metavar="", + ) + + # Parser for the get subcommand + get_parser = subparsers.add_parser( + "get", + help="Get the JWD path of a given Galaxy job id", + epilog=textwrap.dedent( + """ + example: + python galaxy_jwd.py get 12345678 + """ # noqa: E501 + ), + formatter_class=RawDescriptionHelpFormatter, + ) + get_parser.add_argument( + "job_id", + help="Galaxy job id", + ) + + # Parser for the clean subcommand + clean_parser = subparsers.add_parser( + "clean", + help="Clean JWDs of failed jobs older than X days", + epilog=textwrap.dedent( + """ + example (dry-run): + python galaxy_jwd.py clean --dry_run --days 5 + + example (no dry-run): + python galaxy_jwd.py clean --no_dry_run --days 5 + """ # noqa: E501 + ), + formatter_class=RawDescriptionHelpFormatter, + ) + dry_run_group = clean_parser.add_mutually_exclusive_group(required=True) + dry_run_group.add_argument( + "--dry_run", + help="Dry run (prints the JWDs that would be deleted)", + action="store_true", + ) + dry_run_group.add_argument( + "--no_dry_run", + help="No dry run (deletes the JWDs)", + action="store_true", + ) + clean_parser.add_argument( + "--days", + help=( + "Minimum age of jobs (in days) to be considered for deletion " + "(default: 5)" + ), + default=5, + ) + + args = parser.parse_args(args=None if sys.argv[1:] else ["--help"]) + + # Check if environment variables are set + if not os.environ.get("GALAXY_CONFIG_FILE"): + raise ValueError("Please set ENV GALAXY_CONFIG_FILE") + galaxy_config_file = os.environ.get("GALAXY_CONFIG_FILE").strip() + + # Check if the given galaxy.yml file exists + if not os.path.isfile(galaxy_config_file): + raise ValueError( + f"The given galaxy.yml file {galaxy_config_file} does not exist" + ) + + # pulsar_app.yml file path for pulsar_embedded runner + if not os.environ.get("GALAXY_PULSAR_APP_CONF"): + raise ValueError("Please set ENV GALAXY_PULSAR_APP_CONF") + galaxy_pulsar_app_conf = os.environ.get("GALAXY_PULSAR_APP_CONF").strip() + + if not os.environ.get("GALAXY_LOG_DIR"): + raise ValueError("Please set ENV GALAXY_LOG_DIR") + galaxy_log_dir = os.environ.get("GALAXY_LOG_DIR").strip() + + if not os.environ.get("PGDATABASE"): + raise ValueError("Please set ENV PGDATABASE") + db_name = os.environ.get("PGDATABASE").strip() + + if not os.environ.get("PGUSER"): + raise ValueError("Please set ENV PGUSER") + db_user = os.environ.get("PGUSER").strip() + + if not os.environ.get("PGHOST"): + raise ValueError("Please set ENV PGHOST") + db_host = os.environ.get("PGHOST").strip() + + # Check if ~/.pgpass file exists and is not empty + if ( + not os.path.isfile(os.path.expanduser("~/.pgpass")) + or os.stat(os.path.expanduser("~/.pgpass")).st_size == 0 + ): + raise ValueError( + "Please create a ~/.pgpass file in format: " + ":5432:*::" + ) + db_password = extract_password_from_pgpass( + pgpass_file=os.path.expanduser("~/.pgpass") + ) + + object_store_conf = get_object_store_conf_path(galaxy_config_file) + backends = parse_object_store(object_store_conf) + + # Add pulsar staging directory (runner: pulsar_embedded) to backends + backends["pulsar_embedded"] = get_pulsar_staging_dir( + galaxy_pulsar_app_conf + ) + + # Connect to Galaxy database + db = Database( + dbname=db_name, + dbuser=db_user, + dbhost=db_host, + dbpassword=db_password, + ) + + # For the get subcommand + if args.operation == "get": + job_id = args.job_id + object_store_id, job_runner_name = db.get_job_info(job_id) + jwd_path = decode_path( + job_id, [object_store_id], backends, job_runner_name + ) + + # Check + if jwd_path: + print(jwd_path) + else: + print( + f"ERROR: Job working directory (of {job_id}) does not exist", + file=sys.stderr, + ) + sys.exit(1) + + # For the clean subcommand + if args.operation == "clean": + # Check if the given Galaxy log directory exists + if not os.path.isdir(galaxy_log_dir): + raise ValueError( + f"The given Galaxy log directory {galaxy_log_dir} does not" + f"exist" + ) + + # Set variables + dry_run = args.dry_run + days = args.days + jwd_cleanup_log = ( + f"{galaxy_log_dir}/" + f"jwd_cleanup_{datetime.now().strftime('%d_%m_%Y-%I_%M_%S')}.log" + ) + failed_jobs = db.get_failed_jobs(days=days) + + # Delete JWD folders if dry_run is False + # Log the folders that will be deleted + if not dry_run: + with open(jwd_cleanup_log, "w") as jwd_log: + jwd_log.write( + "The following job working directories (JWDs) belonging " + "to the failed jobs are deleted\nJob id: JWD path\n" + ) + for job_id, metadata in failed_jobs.items(): + # Delete JWD folders older than X days + jwd_path = decode_path(job_id, metadata, backends) + if jwd_path: + delete_jwd(jwd_path) + jwd_log.write(f"{job_id}: {jwd_path}\n") + else: + # Print folders of JWDs of failed jobs older than X days + for job_id, metadata in failed_jobs.items(): + jwd_path = decode_path(job_id, metadata, backends) + if jwd_path: + print(f"{job_id}: {jwd_path}") + + +def extract_password_from_pgpass(pgpass_file: str) -> str: + """Extract the password from the ~/.pgpass file. + + The ~/.pgpass file should have the following format: + :5432:*:: + + Args: + pgpass_file: Path to the ~/.pgpass file. + + Returns: + Password for the given pg_host. + + Raises: + ValueError: The ~/.pgpass file cannot be parsed. + """ + pgpass_format = ":5432:*::" + with open(pgpass_file, "r") as pgpass: + for line in pgpass: + if line.startswith(os.environ.get("PGHOST")): + return line.split(":")[4].strip() + else: + raise ValueError( + f"Please add the password for '{os.environ.get('PGHOST')}'" + f"to the ~/.pgpass file in format: {pgpass_format}" + ) + + +def get_object_store_conf_path(galaxy_config_file: str) -> str: + """Get the path to the object_store_conf.xml file. + + Args: + galaxy_config_file: Path to the galaxy.yml file. + + Returns: + Path to the object_store_conf.xml file. + + Raises: + ValueError: The object store configuration file specified in the + Galaxy configuration does not exist. + """ + object_store_conf = "" + with open(galaxy_config_file, "r") as config: + for line in config: + if line.strip().startswith("object_store_config_file"): + object_store_conf = line.split(":")[1].strip() + + # Check if the object_store_conf.xml file exists + if not os.path.isfile(object_store_conf): + raise ValueError(f"{object_store_conf} does not exist") + + return object_store_conf + + +def parse_object_store(object_store_conf: str) -> dict: + """Get the path of type 'job_work' from the extra_dir's for each backend. + + Args: + object_store_conf: Path to the object_store_conf.xml file. + + Returns: + Dictionary of backend id and path of type 'job_work'. + """ + dom = parse(object_store_conf) + backends = {} + for backend in dom.getElementsByTagName("backend"): + backend_id = backend.getAttribute("id") + backends[backend_id] = {} + # Get the extra_dir's path for each backend if type is "job_work" + for extra_dir in backend.getElementsByTagName("extra_dir"): + if extra_dir.getAttribute("type") == "job_work": + backends[backend_id] = extra_dir.getAttribute("path") + return backends + + +def get_pulsar_staging_dir(galaxy_pulsar_app_conf: str) -> str: + """Get the path to the pulsar staging directory. + + Args: + galaxy_pulsar_app_conf: Path to the pulsar_app.yml file. + + Returns: + Path to the pulsar staging directory. + + Raises: + ValueError: The Pulsar staging directory does not exist. + """ + pulsar_staging_dir = "" + with open(galaxy_pulsar_app_conf, "r") as config: + yaml_config = yaml.safe_load(config) + pulsar_staging_dir = yaml_config["staging_directory"] + + # Check if the pulsar staging directory exists + if not os.path.isdir(pulsar_staging_dir): + raise ValueError( + f"Pulsar staging directory '{pulsar_staging_dir}' does not exist" + ) + + return pulsar_staging_dir + + +def decode_path( + job_id: int, + metadata: list, + backends_dict: dict, + job_runner_name: Optional[str] = None, +) -> str: + """Decode the path of JWDs and check if the path exists. + + Args: + job_id: Job id. + metadata: List of object_store_id and update_time. + backends_dict: Dictionary of backend id and path of type 'job_work'. + job_runner_name: Name of the job runner. Defaults to None. + + Returns: + Path to the JWD. + """ + job_id = str(job_id) + + # Check if object_store_id exists in our object store config + if metadata[0] not in backends_dict.keys(): + raise ValueError( + f"Object store id '{metadata[0]}' does not exist in the " + f"object_store_conf.xml file." + ) + + # Pulsar embedded jobs uses the staging directory and this has a different + # path structure + if job_runner_name == "pulsar_embedded": + jwd_path = f"{backends_dict[job_runner_name]}/{job_id}" + else: + jwd_path = ( + f"{backends_dict[metadata[0]]}/" + f"0{job_id[0:2]}/{job_id[2:5]}/{job_id}" + ) + + # Validate that the path is a JWD + # It is a JWD if the following conditions are true: + # 1. Check if tool_script.sh exists + # 2. Check if directories 'inputs', and 'outputs' exist + # 3. Additionally, we can also try and find the file + # '__instrument_core_epoch_end' and compare the timestamp in that with the + # 'update_time' (metadata[1]) of the job. + if ( + os.path.exists(jwd_path) + and os.path.exists(f"{jwd_path}/tool_script.sh") + and os.path.exists(f"{jwd_path}/inputs") + and os.path.exists(f"{jwd_path}/outputs") + ): + return jwd_path + else: + return None + + +def delete_jwd(jwd_path: str) -> None: + """Delete JWD folder and all its contents. + + Args: + jwd_path: Path to the JWD folder. + """ + try: + print(f"Deleting JWD: {jwd_path}") + shutil.rmtree(jwd_path) + except OSError as e: + print(f"Error deleting JWD: {jwd_path} : {e.strerror}") + + +class Database: + """Class to connect to the database and query DB.""" + + def __init__( + self, + dbname: str, + dbuser: str, + dbhost: str, + dbpassword: str, + ) -> None: + """Create a connection to the Galaxy database. + + Args: + dbname: Name of the database. + dbuser: Name of the database user. + dbhost: Hostname of the database. + dbpassword: Password of the database user. + """ + try: + self.conn = psycopg2.connect( + dbname=dbname, user=dbuser, host=dbhost, password=dbpassword + ) + except psycopg2.OperationalError as e: + print(f"Unable to connect to database: {e}") + + def get_failed_jobs(self, days: int) -> dict: + """Get failed jobs from DB. + + Args: + days: Minimum age of failed jobs (in days). + + Returns: + Dictionary with job_id as key and object_store_id, and update_time + as list of values. + """ + cur = self.conn.cursor() + cur.execute( + f""" + SELECT id, object_store_id, update_time + FROM job + WHERE state = 'error' + AND update_time IS NOT NULL + AND object_store_id IS NOT NULL + AND update_time <= NOW() - INTERVAL '{days} days' + """ + ) + failed_jobs = cur.fetchall() + cur.close() + self.conn.close() + + # Create a dictionary with job_id as key and object_store_id, and + # update_time as values + failed_jobs_dict = {} + for job_id, object_store_id, update_time in failed_jobs: + failed_jobs_dict[job_id] = [object_store_id, update_time] + + if not failed_jobs_dict: + print( + f"No failed jobs older than {days} days found.", + file=sys.stderr, + ) + sys.exit(1) + + return failed_jobs_dict + + def get_job_info(self, job_id: int) -> Tuple[str, str]: + """Get object_store_id and job_runner_name for a given job id. + + Args: + job_id: Job id. + + Returns: + object_store_id: Object store id. + job_runner_name: Job runner name. + """ + cur = self.conn.cursor() + cur.execute( + f""" + SELECT object_store_id, job_runner_name + FROM job + WHERE id = '{job_id}' AND object_store_id IS NOT NULL + AND job_runner_name IS NOT NULL + """ + ) + object_store_id, job_runner_name = cur.fetchone() + cur.close() + self.conn.close() + + if not object_store_id: + print( + f"Object store id and/or the job runner name for the job" + f"'{job_id}' was not found in the database", + file=sys.stderr, + ) + sys.exit(1) + + return object_store_id, job_runner_name + + +if __name__ == "__main__": + main() diff --git a/roles/usegalaxy-eu.bashrc/tasks/bashrc_tasks.yml b/roles/usegalaxy-eu.bashrc/tasks/bashrc_tasks.yml new file mode 100644 index 000000000..3622ba3ce --- /dev/null +++ b/roles/usegalaxy-eu.bashrc/tasks/bashrc_tasks.yml @@ -0,0 +1,137 @@ +--- +- name: Check and add/update bashrc when user user exists + block: + - name: Check for bashrc + stat: + path: "{{ item.uhome }}/.bashrc" + register: bashrc_stat_out + + - name: Copy default bashrc when not existing + copy: + src: /etc/skel/.bashrc + dest: "{{ item.uhome }}/.bashrc" + remote_src: yes + mode: 0640 + owner: "{{ item.uname }}" + group: "{{ item.gname }}" + when: not bashrc_stat_out.stat.exists + + - name: Check for bashprofile + stat: + path: "{{ item.uhome }}/.bash_profile" + register: bashprofile_stat_out + + - name: Check for profile + stat: + path: "{{ item.uhome }}/.profile" + register: profile_stat_out + + - name: Copy default bashprofile when not existing + copy: + src: /etc/skel/.bash_profile + dest: "{{ item.uhome }}/.bash_profile" + remote_src: yes + mode: 0640 + owner: "{{ item.uname }}" + group: "{{ item.gname }}" + when: not bashprofile_stat_out.stat.exists and not profile_stat_out.stat.exists + + - name: Copy galaxy_jwd python script + copy: + src: galaxy_jwd.py + dest: /usr/local/bin/galaxy_jwd + mode: 0755 + owner: galaxy + group: galaxy + + - name: Insert some aliases and functions + blockinfile: + path: "{{ item.uhome }}/.bashrc" + marker: "# {mark} ANSIBLE MANAGED BLOCK" + content: | + # User specific aliases and functions + function change_to_wd() { + USAGE="Please provide a Galaxy job ID or a Condor job ID" + if (( $# == 0 )); then + echo $USAGE + return 0; + fi + for i in "$@"; do + if [[ "$i" = --help || "$i" = -h ]]; then + echo $USAGE + return 0; + fi + done + JID=$1 + WD=$(dirname `condor_q -autoformat Cmd ClusterId | grep ${JID} | cut -f1 -d' '` || dirname `condor_history -autoformat Cmd ClusterId | grep ${JID} | cut -f1 -d' '` || find "{{ galaxy_config['galaxy']['job_working_directory'] }}""/0"${JID:0:2}"/"${JID:2:3} -maxdepth 1 -type d -name ${JID}) + cd $WD + } + + # Uses the /usr/local/bin/galaxy_jwd python script to change to the job working directory + function change_to_jwd() { + USAGE="Please provide a Galaxy job ID" + if (( $# == 0 )); then + echo $USAGE + return 0; + fi + + JID=$1 + JWD=$(python /usr/local/bin/galaxy_jwd get $JID) + + # Check the return code and whether the job working directory exists + if [[ $? -ne 0 || ! -d $JWD ]]; then + echo "INFO: Could not find the job working directory for job $JID" + return 1 + fi + + cd $JWD + } + alias watchendqueue='watch -n 1 "gxadmin query queue-detail | (head -n 2; tail -n 70)"' # show the end of queued state + alias watchendnew='watch -n 1 "gxadmin query queue-detail --all | (head -n 2; tail -n 70)"' # show the end of new state queue + alias highscore="gxadmin query queue-detail --all | awk -F\| '{print\$5}' | sort | uniq -c | sort -sn" # show users with most jobs in queue + alias gl='journalctl -f -u galaxy-*' + alias notsubmitted="gxadmin query queue-detail | awk -F\| '{print\$3}' | grep -vc \"\S\"" # jobs that are queued but not submitted / not have condor id + alias glg='journalctl -fu galaxy-gunicorn@* | grep -v -e "/api/upload/hooks" -e "/history/current_history_json"' + alias glh='journalctl -f -u galaxy-handler@*' + alias glw='journalctl -f -u galaxy-workflow-scheduler@*' + alias cu='journalctl -u galaxy-gunicorn@*.service --since "10 minutes ago" | grep "/history/current_history_json" | awk "{print \$11}" | sort -u | wc -l' + alias chg2wd='change_to_wd' + alias chg2jwd='change_to_jwd' + + # finds big files somewhere ;) + function findbig () { + echo -e "$(find $1 -type f -size +$2M -exec ls -lh {} +)" + } + + alias findbig=findbig + + + - name: Insert some export vars + lineinfile: + path: "{{ item.uhome }}/.bashrc" + line: "{{ task_item }}" + loop: + # ENV's for gxadmin and the galaxy_jwd python script + - "export GALAXY_CONFIG_DIR={{ galaxy_config_dir }}" + - "export GALAXY_CONFIG_FILE={{ galaxy_config_file }}" + - "export GALAXY_LOG_DIR={{ galaxy_log_dir }}" + - "export GALAXY_MUTABLE_CONFIG_DIR={{ galaxy_mutable_config_dir }}" + - "export GALAXY_ROOT={{ galaxy_server_dir }}" + - "export VIRTUAL_ENV={{ galaxy_venv_dir }}" + - "export GALAXY_PULSAR_APP_CONF={{ galaxy_pulsar_app_conf }}" + loop_control: + loop_var: task_item + + - name: Check for bash_history + stat: + path: "{{ item.uhome }}/.bash_history" + register: bashhistory_stat_out + + - name: Create bash_history + file: + path: "{{ item.uhome }}/.bash_history" + state: touch + mode: 0640 + owner: "{{ item.uname }}" + group: "{{ item.gname }}" + when: not bashhistory_stat_out.stat.exists diff --git a/roles/usegalaxy-eu.bashrc/tasks/main.yml b/roles/usegalaxy-eu.bashrc/tasks/main.yml index 782371e26..bae152bb9 100644 --- a/roles/usegalaxy-eu.bashrc/tasks/main.yml +++ b/roles/usegalaxy-eu.bashrc/tasks/main.yml @@ -1,85 +1,16 @@ --- -- name: Check for bashrc - stat: - path: "{{ galaxy_user.home }}/.bashrc" - register: bashrc_stat_out - -- name: Copy default bashrc when not existing - copy: - src: /etc/skel/.bashrc - dest: "{{ galaxy_user.home }}/.bashrc" - remote_src: yes - mode: 0640 - owner: "{{ galaxy_user.name }}" - group: "{{ galaxy_group.name | default(galaxy_group) }}" - when: not bashrc_stat_out.stat.exists - -- name: Check for bashprofile - stat: - path: "{{ galaxy_user.home }}/.bash_profile" - register: bashprofile_stat_out - -- name: Check for profile - stat: - path: "{{ galaxy_user.home }}/.profile" - register: profile_stat_out - -- name: Copy default bashprofile when not existing - copy: - src: /etc/skel/.bash_profile - dest: "{{ galaxy_user.home }}/.bash_profile" - remote_src: yes - mode: 0640 - owner: "{{ galaxy_user.name }}" - group: "{{ galaxy_group.name | default(galaxy_group) }}" - when: not bashprofile_stat_out.stat.exists and not profile_stat_out.stat.exists - -- name: Insert some aliases - blockinfile: - path: "{{ galaxy_user.home }}/.bashrc" - marker: "# {mark} ANSIBLE MANAGED BLOCK" - content: | - # User specific aliases and functions - function change_to_wd() { - USAGE="Please provide a Galaxy job ID or a Condor job ID" - if (( $# == 0 )); then - echo $USAGE - return 0; - fi - for i in "$@"; do - if [[ "$i" = --help || "$i" = -h ]]; then - echo $USAGE - return 0; - fi - done - JID=$1 - WD=$(dirname `condor_q -autoformat Cmd ClusterId | grep ${JID} | cut -f1 -d' '` || dirname `condor_history -autoformat Cmd ClusterId | grep ${JID} | cut -f1 -d' '` || find "{{ galaxy_config['galaxy']['job_working_directory'] }}""/0"${JID:0:2}"/"${JID:2:3} -maxdepth 1 -type d -name ${JID}) - cd $WD - } - - alias gl='journalctl -f -u galaxy-*' - alias glg='journalctl -fu galaxy-gunicorn@* | grep -v -e "/api/upload/hooks" -e "/history/current_history_json"' - alias glh='journalctl -f -u galaxy-handler@*' - alias glw='journalctl -f -u galaxy-workflow-scheduler@*' - alias glc='journalctl -fu galaxy-celery@*' - alias cu='journalctl -u galaxy-gunicorn@*.service --since "10 minutes ago" | grep "/history/current_history_json" | awk "{print \$11}" | sort -u | wc -l' - alias chg2wd='change_to_wd' - -- name: Insert some export vars - lineinfile: - path: "{{ galaxy_user.home }}/.bashrc" - line: "export GALAXY_CONFIG_FILE={{ galaxy_config_file }}" - -- name: Check for bash_history - stat: - path: "{{ galaxy_user.home }}/.bash_history" - register: bashhistory_stat_out - -- name: Create bash_history - file: - path: "{{ galaxy_user.home }}/.bash_history" - state: touch - mode: 0640 - owner: "{{ galaxy_user.name }}" - group: "{{ galaxy_group.name | default(galaxy_group) }}" - when: not bashhistory_stat_out.stat.exists +- name: Get all users + getent: + database: passwd + split: ':' + +- name: Debug info when user does not exist + debug: + msg: "INFO: User {{ item.uname }} does not exist" + loop: "{{ bashrc_users }}" + when: (not item.uname in getent_passwd.keys()) + +- name: Add/Update bashrc + include_tasks: bashrc_tasks.yml + loop: "{{ bashrc_users }}" + when: (item.uname in getent_passwd.keys()) diff --git a/roles/usegalaxy-eu.fix-stop-ITs/tasks/main.yml b/roles/usegalaxy-eu.fix-stop-ITs/tasks/main.yml index 3b4e0e7df..e69ed8739 100644 --- a/roles/usegalaxy-eu.fix-stop-ITs/tasks/main.yml +++ b/roles/usegalaxy-eu.fix-stop-ITs/tasks/main.yml @@ -7,19 +7,26 @@ group: root mode: 0755 -- name: Create logfile +- name: Create cron log directory file: state: directory + path: /var/log/cron + mode: 0755 + owner: root + group: root + +- name: Create logfile + file: + state: touch path: /var/log/cron/stop-ITs.log mode: 0664 owner: "{{ galaxy_user.name }}" group: "{{ galaxy_group.name }}" - state: touch - name: Setup logrotate copy: content: | - /var/log/stop-ITs.log + /var/log/cron/stop-ITs.log { rotate 6 daily diff --git a/roles/usegalaxy-eu.fix-stop-ITs/templates/stop-ITs.sh.j2 b/roles/usegalaxy-eu.fix-stop-ITs/templates/stop-ITs.sh.j2 index 6fca463c7..9bef8246d 100644 --- a/roles/usegalaxy-eu.fix-stop-ITs/templates/stop-ITs.sh.j2 +++ b/roles/usegalaxy-eu.fix-stop-ITs/templates/stop-ITs.sh.j2 @@ -1,12 +1,18 @@ #!/bin/sh DAY=86400 . {{ galaxy_root }}/.bashrc +cd ~ for job in $(gxadmin query queue-details --all | grep running | grep interactive_tool | awk '{print $3}') do - chg2wd $job + JWD=$(python /usr/local/bin/galaxy_jwd get $job) + pushd $JWD ClusterID=$( head -n 1 job_condor.log | awk '{print $2}' | awk -F. '{print $1"."$2}' | sed 's/^(//' ) - if [ $(expr $(date +"%s") - $(condor_q $ClusterID -autoformat JobStartDate)) -ge $DAY ] + if [ "$(condor_q $ClusterID -autoformat JobStartDate)" == "undefined" ] || [ "$(condor_q $ClusterID -autoformat JobStartDate)" == "" ] + then + condor_rm $ClusterID + elif [ $(($(date +"%s") - $(condor_q $ClusterID -autoformat JobStartDate))) -ge $DAY ] then condor_rm $ClusterID fi -done \ No newline at end of file + popd +done diff --git a/roles/usegalaxy-eu.plausible/defaults/main.yml b/roles/usegalaxy-eu.plausible/defaults/main.yml index f0d8f8f5f..3a5f7fd78 100644 --- a/roles/usegalaxy-eu.plausible/defaults/main.yml +++ b/roles/usegalaxy-eu.plausible/defaults/main.yml @@ -1 +1,2 @@ plausible_dir: /data/plausible +plausible_lock_register: false diff --git a/roles/usegalaxy-eu.plausible/templates/plausible.j2 b/roles/usegalaxy-eu.plausible/templates/plausible.j2 index 7e2440a39..0eed7a135 100644 --- a/roles/usegalaxy-eu.plausible/templates/plausible.j2 +++ b/roles/usegalaxy-eu.plausible/templates/plausible.j2 @@ -3,3 +3,4 @@ ADMIN_USER_NAME=admin ADMIN_USER_PWD="{{ plausible_admin_password }}" BASE_URL="https://plausible.galaxyproject.eu" SECRET_KEY_BASE="{{ plausible_secret_key }}" +DISABLE_REGISTRATION={{ plausible_lock_register }} diff --git a/roles/usegalaxy-eu.rsync-to-nfs/tasks/main.yml b/roles/usegalaxy-eu.rsync-to-nfs/tasks/main.yml index 29ee25a5b..aa9fa4cef 100644 --- a/roles/usegalaxy-eu.rsync-to-nfs/tasks/main.yml +++ b/roles/usegalaxy-eu.rsync-to-nfs/tasks/main.yml @@ -4,7 +4,7 @@ content: | #!/bin/bash cd {{ galaxy_root }}; - for dir in {config,custom-tools,dynamic_rules,mutable-config,mutable-data,server,venv,tool-data}; do + for dir in {config,custom-tools,mutable-config,mutable-data,server,venv,tool-data}; do if [ -d $dir ]; then echo "Syncing $dir" rsync -avr --delete --exclude node_modules --exclude .git --exclude __pycache__ $dir/ {{ galaxy_nfs_location }}/$dir/ diff --git a/roles/usegalaxy-eu.subdomain-themes/tasks/main.yml b/roles/usegalaxy-eu.subdomain-themes/tasks/main.yml index 5a99fe06f..7adc7f779 100644 --- a/roles/usegalaxy-eu.subdomain-themes/tasks/main.yml +++ b/roles/usegalaxy-eu.subdomain-themes/tasks/main.yml @@ -14,7 +14,7 @@ dest: "{{ multisite_dir }}/usegalaxy.eu.html" mode: 0644 with_items: - - index: "/main/" + - index: "main/" - name: "Template out welcome pages" template: diff --git a/roles/usegalaxy-eu.vgcn-monitoring/README.md b/roles/usegalaxy-eu.vgcn-monitoring/README.md new file mode 100644 index 000000000..a9ee9d0ed --- /dev/null +++ b/roles/usegalaxy-eu.vgcn-monitoring/README.md @@ -0,0 +1,37 @@ +Role Name +========= + +usegalaxy-eu.vgcn-monitoring + +Requirements +------------ + +Python requirements: + - GitPython + - PyYAML + - python-openstackclient + +_Note: These are installed in the `galaxy` venv; look into headnodes (sn06.yml, and sn07.yml) group_vars files_ + +System requirements: + - condor_status + +Role Variables +-------------- + +Role variables are defined in `defaults/main.yml` + +Description of some variables: + + - `vgcn_infra_repo`: Github repository link for the vgcn-infrastructure repository (default: `https://github.com/usegalaxy-eu/vgcn-infrastructure`) + - `vgcn_repo_dest_dir`: path to the directory where the vgcn-infrastructure repository will be cloned (default: `/tmp/vgcn-infrastructure-repo`) + - `vgcn_ven_dir`: path to the directory where the virtual environment with required dependencies are installed (default: `"{{ galaxy_venv_dir}}"` defined in the headnodes (`sn06.yml`, `sn07.yml`) group_vars) + - `openstack_executable`: Path to the OpenStack executable (default: `"{{ galaxy_venv_dir }}/bin/openstack"`) + - `custom_vgcn_env`: Defines all OpenStack environment variables and the path to the Python executable of the virtual environment (All OpenStack environment variables are defined in the vault file) + +Example Playbook +---------------- + + - hosts: maintenance + roles: + - usegalaxy-eu.vgcn-monitoring diff --git a/roles/usegalaxy-eu.vgcn-monitoring/defaults/main.yml b/roles/usegalaxy-eu.vgcn-monitoring/defaults/main.yml new file mode 100644 index 000000000..534b568c8 --- /dev/null +++ b/roles/usegalaxy-eu.vgcn-monitoring/defaults/main.yml @@ -0,0 +1,9 @@ +--- +# defaults file for usegalaxy-eu.vgcn-monitoring +vgcn_infra_repo: https://github.com/usegalaxy-eu/vgcn-infrastructure +vgcn_repo_dest_dir: /tmp/vgcn-infrastructure-repo +vgcn_ven_dir: "{{ galaxy_venv_dir }}" +openstack_executable: "{{ galaxy_venv_dir }}/bin/openstack" + +# Credentials are stored in the vault +custom_vgcn_env: "/usr/bin/env OS_AUTH_TYPE={{ bwc_OS_AUTH_TYPE }} OS_AUTH_URL={{ bwc_OS_AUTH_URL }} OS_IDENTITY_API_VERSION={{ bwc_OS_IDENTITY_API_VERSION }} OS_REGION_NAME={{ bwc_OS_REGION_NAME }} OS_INTERFACE={{ bwc_OS_INTERFACE }} OS_APPLICATION_CREDENTIAL_ID={{ bwc_OS_APPLICATION_CREDENTIAL_ID }} OS_APPLICATION_CREDENTIAL_SECRET={{ bwc_OS_APPLICATION_CREDENTIAL_SECRET }} {{ vgcn_ven_dir }}/bin/python" diff --git a/roles/dj-wasabi.telegraf/handlers/main.yml b/roles/usegalaxy-eu.vgcn-monitoring/handlers/main.yml similarity index 53% rename from roles/dj-wasabi.telegraf/handlers/main.yml rename to roles/usegalaxy-eu.vgcn-monitoring/handlers/main.yml index 78a82a724..8afefc2a5 100644 --- a/roles/dj-wasabi.telegraf/handlers/main.yml +++ b/roles/usegalaxy-eu.vgcn-monitoring/handlers/main.yml @@ -1,9 +1,8 @@ --- -# handlers file for ansible-telegraf - -- name: "Restart Telegraf" +# handlers file for usegalaxy-eu.vgcn-monitoring +- name: restart telegraf + become: yes service: name: telegraf state: restarted enabled: yes - become: yes diff --git a/roles/usegalaxy-eu.vgcn-monitoring/meta/main.yml b/roles/usegalaxy-eu.vgcn-monitoring/meta/main.yml new file mode 100644 index 000000000..3d6a8a446 --- /dev/null +++ b/roles/usegalaxy-eu.vgcn-monitoring/meta/main.yml @@ -0,0 +1,17 @@ +--- +galaxy_info: + author: The Galaxy Project + description: Installs a VGCN monitoring script and a Telegraf configuration file + to monitor the VGCN nodes. + company: The Galaxy Project + license: MIT + min_ansible_version: 2.5 + platforms: + - name: EL + versions: + - 8 + - 9 + galaxy_tags: + - system + - monitoring +dependencies: [] diff --git a/roles/usegalaxy-eu.vgcn-monitoring/tasks/main.yml b/roles/usegalaxy-eu.vgcn-monitoring/tasks/main.yml new file mode 100644 index 000000000..adf04f512 --- /dev/null +++ b/roles/usegalaxy-eu.vgcn-monitoring/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: copy vgcn_monitoring template + template: + src: vgcn_monitoring.py.j2 + dest: /usr/local/bin/vgcn_monitoring.py + owner: root + group: root + mode: 0755 + +- name: Add VGCN monitoring Telegraf configuration + template: + src: vgcn_monitoring.conf.j2 + dest: /etc/telegraf/telegraf.d/vgcn_monitoring.conf + owner: telegraf + group: telegraf + mode: 0640 + notify: restart telegraf diff --git a/roles/usegalaxy-eu.vgcn-monitoring/templates/vgcn_monitoring.conf.j2 b/roles/usegalaxy-eu.vgcn-monitoring/templates/vgcn_monitoring.conf.j2 new file mode 100644 index 000000000..d0e92723b --- /dev/null +++ b/roles/usegalaxy-eu.vgcn-monitoring/templates/vgcn_monitoring.conf.j2 @@ -0,0 +1,6 @@ +### Managed by usegalaxy-eu.vgcn-monitoring ansible role ### +[[inputs.exec]] + commands = ["{{ custom_vgcn_env }} /usr/local/bin/vgcn_monitoring.py"] + timeout = "60s" + data_format = "influx" + interval = "30m" diff --git a/roles/usegalaxy-eu.vgcn-monitoring/templates/vgcn_monitoring.py.j2 b/roles/usegalaxy-eu.vgcn-monitoring/templates/vgcn_monitoring.py.j2 new file mode 100644 index 000000000..157ec54c5 --- /dev/null +++ b/roles/usegalaxy-eu.vgcn-monitoring/templates/vgcn_monitoring.py.j2 @@ -0,0 +1,256 @@ +"""Telegraf VGCN nodes monitoring script""" +#!/usr/bin/env python + +# Python requirements: +# - PyYAML +# - GitPython + +# System requirements: +# - openstack command +# - condor_status command + +# This script will clone the vgcn-infrastructure repo and parse the resources.yaml +# file to get the list of VGCN nodes (resources and their counts). It will then +# query the OpenStack cloud and HTCondor pool to get the list of active VGCN +# nodes. It will then prepare the influxdb format output. + +import os +import shutil +import subprocess +import sys + +import yaml +from git import Repo + + +def clone_vgcn_infrastructure_repo(repo_url, dest_dir): + """Clone the vgcn-infrastructure repo and return the path to the repo + + Args: + repo_url (str): URL of the vgcn-infrastructure repo + dest_dir (str): Path to the directory where the repo will be cloned + """ + if os.path.isdir(dest_dir): + if os.path.isdir(f"{dest_dir}/.git") and os.path.isfile( + f"{dest_dir}/resources.yaml" + ): + remove_folder(dest_dir) + + # Clone the repo + try: + Repo.clone_from(repo_url, dest_dir) + except Exception as err: + print(f"Error while cloning vgcn-infrastructure repo: {err}") + sys.exit(1) + + +def get_vgcn_node_names(repo_path): + """Get the list of VGCN 'worker' nodes + + Args: + repo_path (str): Path to the vgcn-infrastructure repo + + Returns: + dict: Dictionary of VGCN worker nodes and their count + """ + workers = {} + + # Parse the resources.yaml file + with open(f"{repo_path}/resources.yaml") as infile: + resources = yaml.safe_load(infile) + for resource_id in resources["deployment"]: + if resource_id.startswith("worker"): + worker_name = f"vgcnbwc-{resource_id}" + resource = resources["deployment"][resource_id] + workers[worker_name] = resource["count"] + + return workers + + +def query_openstack_servers_list(): + """Query the OpenStack servers list and return names of the + active worker nodes + + Returns: + str: List of active worker nodes names + """ + # Check if the openstack command is available or not + check_openstack_cmd() + + try: + output = subprocess.run( + [ + "{{ openstack_executable }}", + "server", + "list", + "--name", + "vgcnbwc-worker", + "--status", + "ACTIVE", + "-f", + "value", + "-c", + "Name", + "--sort-column", + "Name", + ], + check=True, + capture_output=True, + ) + except Exception as err: + print(f"Error while querying OpenStack server list: {err}") + sys.exit(1) + + return output.stdout.decode("utf-8").splitlines() + + +def query_htcondor_status(): + """Query the HTCondor status and return the machine names + + Returns: + str: List of machine names + """ + # Check if the openstack command is available or not + check_condor_status_cmd() + + try: + output = subprocess.run( + ["condor_status", "-autoformat", "Machine"], + check=True, + capture_output=True, + ) + except Exception as err: + print(f"Error while querying HTCondor status: {err}") + sys.exit(1) + + # Remove domain name from the machine names + return [node.rsplit('.')[0] for node in output.stdout.decode("utf-8").splitlines()] + + +def check_openstack_cmd(): + """Check if the openstack command is available or not""" + try: + subprocess.run( + ["{{ openstack_executable }}", "--version"], check=True, stdout=subprocess.DEVNULL + ) + except Exception as err: + print(f"Error while checking openstack command: {err}") + sys.exit(1) + + +def check_condor_status_cmd(): + """Check if the condor_status command is available or not""" + try: + subprocess.run( + ["condor_status", "--version"], check=True, stdout=subprocess.DEVNULL + ) + except Exception as err: + print(f"Error while checking condor_status command: {err}") + sys.exit(1) + + +def influxdb_format(node_name, resource_def, bwcloud, htcondor): + """Prepare the influxdb format output + + Args: + node_name (str): Name of the node + resource_def (bool): Exists in the resource definition file or not + bwcloud (bool): Exists in the OpenStack cloud or not + htcondor (bool): Exists in the HTCondor pool or not + """ + return f"vgcn_monitoring,resource={node_name} resource_def={resource_def},bwcloud={bwcloud},htcondor={htcondor}" + + +def group_count(lst, pattern): + """Return the count of the elements in the list that match the pattern + + Args: + lst (list): List of strings + pattern (str): Pattern to match + + Returns: + int: Count of the elements in the list that match the pattern + """ + return len([x for x in lst if x.startswith(pattern)]) + + +def remove_folder(path): + """Remove the folder and its contents + + Args: + path (str): Path to the folder to be removed + """ + try: + if os.path.isdir(path): + shutil.rmtree(path) + except Exception as err: + print(f"Error while removing the folder: {err}") + sys.exit(1) + + +def main(): + """Parse, query, process and prepare the influxdb data for VGCN nodes""" + # Clone the vgcn-infrastructure repo + vgcn_infra_repo = "{{ vgcn_infra_repo }}" + repo_dest_dir = "{{ vgcn_repo_dest_dir }}" + clone_vgcn_infrastructure_repo(vgcn_infra_repo, repo_dest_dir) + + # Get the list of VGCN nodes (resources and their counts from resources.yaml) + vgcn_nodes = get_vgcn_node_names(repo_dest_dir) + + # Query the OpenStack servers list and collect the names of the active + # 'vgcnbwc-worker' nodes + active_workers = query_openstack_servers_list() + + # Query the HTCondor status and collect the machine names + htcondor_machines = query_htcondor_status() + + # Merge the data from OpenStack and HTCondor and get unique list of nodes + # and remove the ones that are not vgcnbwc-worker nodes + all_nodes = [ + node + for node in list(set(active_workers + htcondor_machines)) + if node.startswith("vgcnbwc-worker") + ] + + # Create a dictionary where keys are node names and values are + # a dictionary with keys resource_def, bwcloud and htcondor with boolean values + nodes = {} + for node in all_nodes: + nodes[node] = { + "resource_def": node.rsplit("-", 1)[0] in vgcn_nodes, + "bwcloud": node in active_workers, + "htcondor": node in htcondor_machines, + } + + # Check the count of the nodes and then compare it with the count mentioned + # in the resources.yaml file. If the count is less then add the missing nodes + # with the format: vgcnbwc--XXXXX with values as resource_def True, + # bwcloud False and htcondor False + for resource_name, resource_count in vgcn_nodes.items(): + nodes_count = group_count(all_nodes, resource_name) + if nodes_count < resource_count: + for num in range(1, resource_count - nodes_count + 1): + nodes[f"{resource_name}-{'X' * num}"] = { + "resource_def": True, + "bwcloud": False, + "htcondor": False, + } + + # Clean up the repo directory + remove_folder(repo_dest_dir) + + # Print the influxdb format output + for node_name, node_data in nodes.items(): + print( + influxdb_format( + node_name, + resource_def=node_data["resource_def"], + bwcloud=node_data["bwcloud"], + htcondor=node_data["htcondor"], + ) + ) + + +if __name__ == "__main__": + main() diff --git a/secret_group_vars/all.yml b/secret_group_vars/all.yml index 2a23882d3..f9b08ea77 100644 --- a/secret_group_vars/all.yml +++ b/secret_group_vars/all.yml @@ -1,190 +1,229 @@ $ANSIBLE_VAULT;1.1;AES256 -65653735646264623436383265343866323237663130373061393465633331363066656630613731 -3766656564363064366661633265303834396632316262340a306632636636333661373135383937 -30383732393761353630666435356537393833303931353234623936633130373230656461343331 -3338386562313438640a396538376263326562386261343832386139343633646430303234656536 -31356264613835656232373838313062663361333233303664623533393365323939363331656130 -33626161363134376531633432316464626362393364306532636464323963373132396531643230 -61663032346331396633373066356163626130306666303936653739633536376139623635316534 -34343236376363386637333234343435613432343633383631613931383632343165636439313339 -32323335366637303630633232613239326462356364636166333133366232303530346236303666 -33313166633131643432383632383433653065373235643961316136643836383830363831653639 -33623939346562636537663263356465663636656231363135396362396330303930646438636265 -65643962396134373366663130323662636434356362323662646631616632366232653338323631 -31313038653031376631376136306564616137613862326164373333346161316436343036633138 -64393334313766373866663439386330656136323138346263666430666637393733386532663837 -35623463346339386138326137336530353164336363613233636630396461323936393735373234 -30646433316462313436386661386363383363626336303966653235333638353134313061663731 -32323433383836383064303766363466373439663165396363373136366339636363333933336265 -62323336363134613537326632643064383932346631313938363166386566646234636264333539 -64303030656266306361366139646333333231616235623133373939306435323331613936383934 -34333335396636616263646364363861336362313838333139616434353438373633613631356436 -34336430373730383538393362376339653335393036306265303134333734626162663035663138 -37353664383666633634323965623437383764383836373634363563616362366264646162363730 -64386465313366316330663062376462636166313266653538383565323230616334663161333537 -64376238356332643432626334396232393531616661633232616465393564346637376264643139 -30626337316565363534613164346136633534376233393636323066396365663764326130323935 -35323033316437366265656634626535386161633132343332356133396239313165663963666363 -66316232373365653533643630333332663531646432313165623038313939663266633063636161 -39613430643030656436343365393337363838373462313536386364646365623162643631326165 -39623636393863333262633236356638326631376362616264353663383031633066396263356361 -62363331626335636230646563656364636236323234343462663234326533333731666134303034 -38386637373763623964383531633062623037633562643031333639303965333938366263336466 -34663632656664386633383762623631363532316365373234643266636431353434626438376135 -32366464393163663864636466336137656265343332373833393963373530373261653034346637 -33316331623735643739623734383933303332306562643831343831646563393566623037353631 -38303434663461613535633532336335373939616565653261656666386662353761323164626238 -36316563336364393362306534326236636633386337376437346136373765323664626365633833 -35353638663330323735306439393230346262653262643432336338383530333664346565653364 -35313136653962326463336638343162666438363835366136393364376266666634356366666634 -61316433303936313063346337663963303435323136616566636432373264326634636363326464 -38613637383438323038656334393035336139643261636661653636396532616262626464613061 -63376636643439313964616132613338613530303833313038383266653162663566366361663065 -32303835303135643633366537383763353764363266626664386634633736643566636665373136 -37356235643264373362363236346663363861653939366439323066646436656263646638383235 -61373934653934303132656561643065656464653333353665613764386237663661353031663761 -30373332313264376339393637366166326137623336303339346135333565643965663035353532 -66303162643066313662316231353865323731333034616635333361616161653330376433333364 -32333439636137623238646265666132306664643334653234303761666233623930386130616162 -35613963653065383032393633633632336561343435663663666535366166623631386639323064 -65333732316661323963343139366664383864623736393764636139393362633963393566633236 -39323163663764616332383333643866616530356437633633663938373832386431366364613162 -30306334626539386333353231306461303136616366326637343634633738646465373237666431 -35393936656439663930636261633033613865653063633231626162316331326364373336393233 -30366537653939353139646161323139353664646231393930666638353530613132393266643638 -33393030303934333539303861346532303564636163383039316162666232373632313961343238 -38376136663565633035656231373035396635353764636662366637333432626230353838346665 -64343538386462386261343933303737323663343737653531333738393538386233353562613031 -32613461383262393437346335353437303839383835656464346263386431363262623236313564 -30393235396536343133623762363139633039636334623162386436333731333738323130373064 -63316162336435343833303365643636393330613266363936386239353264643161653766386663 -33383734613934393762623233373336373832636261386239363739623665316461666461343733 -65363637333762316232376239303639646338376166323234386133323833353035363238373964 -35373038333534393133346631353434316431323662313164666438303335383632616463353336 -38336631373066663264373830626665333730656164303933383564336166316536666366396134 -32306164656262343466393733613734353934383062303631633565663733343361363961316662 -39333438666664613638393964653132323462383839336439373230303764383563363561333736 -61333730363137343631393561653037376134653930393833643035633938333338373861623265 -65313938653738633731336261653536333433653262656439386437653732343034396235356263 -32333433326365616233323339366130323535343131316131633838613139316564393234663733 -65306532636539626631613863646634656262393838373932313832356437626166343763343632 -39666566336332323337343839303737643034346363623366613964613433353236653464333838 -38333835373863366135393662613861316639376165623335356366336534353531356430346234 -34333462663039383038393739316239313332326238623065663734323336653030643662666665 -30323437306564346634626636303664353835303263623664613464373330663737306263393036 -37336439383630643637356364356632663862623630353634636630346131653538356361313737 -64626266346664643030356163393631646134333065323661663936333762633437396437303366 -30613761633264383862313063663465636238303031663162636231633564636531643537633762 -36643863353734386134613663323337383962643431316166643831666163376637316461313361 -32363031356130613239353863383439666666643539306433396337363463653731633036353731 -33316330313235366530323734343161306538346163636636636166313638343839626532373534 -66663664383365663537613462316538393733326639353732663034303036373035373234633035 -36393762643734333235333064356439636562636562376238383363306536396434643530633738 -30396436636434323137386432636563643037346466633937663466356333393435613335353034 -31623564303463373837343932393965626632663732663238316462653136316162626666353464 -31636135623139633064393832366239316635653563643161613766663438393631333837383638 -65353561663664353534373833646265313362396539316364636237306330346463313061396334 -32323438386535343764343136376336656162623566383432353165346266653430353564373064 -35636161396661343465346232396163386339346162666661383264396162303564663732373165 -32636461653864353336616437633663616465333963373037306430366539323536343436623436 -38393830353961613831323635633865613035623361613936323734343834643164336161623537 -31373837646537656133333666663061363233623361363039633230343061616335313562363261 -33643263313732326336323864373139396430363232386662353937333561346463636237623938 -66653430666563353638646136393061393363373535353934303735393966626437373566373062 -65383236396233396234666534343664386235373730333463303761616434613237393262303766 -30353730663833346662363935643663313633666438666561343163636137316237356261613064 -32656561343764303864336166653066363039363335386435353238383564613231613038363239 -34626665396162373436373636303337653432373630616239656133333634316133363739336664 -34363565333965333864343330383439326632353939653530336635646430623133336439373534 -39663861633838653561366337653639353765323661643835666133323130623835626463343539 -62303063366631356161356131633232313963393631633931326233323363636637303862323431 -36643163393163373563336638623630353163336236313737663234393535363361653238326132 -62656263626162393438373362343965353961373534383634623466666661646532633039383630 -37623164356464656231306638323338306462336233643130656262323535316462366562383236 -65663762326465363139313430386263303331653739396362616131653234366638326631383864 -66663862363935653338306630353762636662366566646562383362646530613835383638646230 -30383036373765316533323435626531363239613638346639646135643862313565373263643064 -64636230373434383263366364353763383435383732343036636337313462656539653863353263 -35303138653763663363376637373138353432663261326465303863633933313630313163363832 -39663833666261663236306435633562353266333334333535323438393833656633303539393237 -66623933343466303666626137303736313139613332356630633630313362663733323636623834 -63313331333565623535653162656538323239316262373466323939356334373661613065633764 -64613137373336373765633537393430313731363034376537666539303266633835623766353765 -34643264653236373163393530663835366363386463646330313461646439363362633366366531 -65306339393461663935643866663866333137373034353837373864303038623836386530386635 -65356438623436383131356531653338303333633137396430633562323363373030663533396638 -34633761633631653165333463323834653830303165393561613739326335363836316363333764 -65376239373931303338363438323530356537323135343966356632313837633864303235343563 -61363532646334363331366135323063396230343966313066343434383261643633323932363736 -35373461623262663831623930356137653364663037323836626362333139393539633930363266 -35336130336664303335393931353636353932333333383564383963616139323132623639626531 -62613463306335366530313535313361313463393337316631633736376434623535366330323463 -38303334356437646564383831636135623534386532393464636461666662353738336235366166 -32303330666666663331373335353561333835613935616130626436613030636536653933666363 -39633464663632656131643933306563643963313063363532316661626463326234316639663163 -66356236303033333237373337323034363334393436303762313939643130336534393765353630 -36303634313138363339353032303934323033373762346232313533333638386238313938326636 -64303766313733656263383331393338336565643432386633633032633135363766373661396538 -34333234306238623832616261643963623561646537306565303033653530626333376462366332 -39633739313730346537323533656134393564613462633266313235646363623533333766396336 -37626565663731306532383334383532383765373638393136663162303461656431326538376266 -35363362333762663166346164343964643832656337333839633032626334373465626261303062 -37633162346538613838393761303434366661303363313233663436636162363564323464646462 -30643737353536353032396262626438666365393865623031323438663435393938653261613230 -65313531663837336338356362393533326264623236323862336433613462613635623032313430 -37323734396261616665303663393336666666623765613135346163623237333566313134353435 -66633336396162656339666663306633633762353932333330643338383732633133343438343736 -63333136363832373037303561666336356336393638656666353530626437363736323231616635 -34336533643862623237646537326430383064306333663363626333626163336231313264336431 -66313439346265623330663934613037363165623033333437313666393137353038396566643733 -30306437313133313430643831643836326436336264623835373662613862353833633862333634 -34393262306630393366633837393234393365313036333766343765663163393134303935393436 -64396433373964613463633939346462393965343265636532323630323937383266643462313164 -35323866393961633032323738356331656333346235386235303339646136656237643664643137 -64626665396335353666626330353630626131303934383732386337613337633536303063616261 -66376134616263373838313934643834376439373537656664303330386164316139316161643061 -39393938343663646131333263643032323336636164356333313937666666336335376433373634 -30393137643835326163653337343735393564343938333665623435653665363265343463353535 -38366132333133626464396539663937383162393363393334383438396234646338343635613662 -33643236336164343432336531323563653763356532663237356164356238366664333334373764 -63613433613063336266336265306330323361643666383830333631646131373136373366363164 -39376633343030393937643436653839373961356533383239636233323039633334343334393262 -36356631656130626332343064343235306562303130613866393064336238656432643234373237 -31303833623630343130353434366236646231396264653239383330353635366132333661613138 -63626638396563306230336361613732616139373034326235353331613035393266636330396234 -32393435323333316165333964323663646166346161356433326566663835333064616263323334 -65303833313934323237613962626432613963646533666634333166626136383265323461613963 -65653163666462386163663563303634653837316366383831643965326631666138393035366534 -62326435353933646361306265343833386162323663303738393837373263303164346462636562 -63646230376535353561646462386563613133643366633933386630383938393566616535653331 -39363664383666303766656461666464353863653337376466306365366361353730313839323566 -39383331626264616335323932326365353139353263336530646264306664376233323531303239 -31383063343535656431336535393030353439376134373638386164653039343938663736353437 -36343666386664663262303063303763303162303338393865363735336236363332633837383138 -32633966316131323464616434323639313938373233373836323364386636636661396665616163 -37393534633134313763646261653732623330333165616137326266633462326630383661653139 -30363631376231313133303166346630343964346562326461343639613234306462346431306436 -31613335306333656361383363343335373033656639386266376464313736613739356237333631 -33323237303630666166323730666231316433386434643632633766626265653765353238323864 -63303032343264376134376136333065633461363539613337363537343362376232393830306533 -35393866353333356161386565343338326264653336323430303435626638313432343863333030 -66303165323764393132316638346337326337333761633230356339643630353039633237663138 -37343838343635393736643866633339666433383137646632383061363935386538353864376431 -31623965373330313235646131626235393434336361633135323439613864376536393934313236 -39366331633232616363336439616331633363363732643536316533333361313462643064356661 -66313866373238626432373662666536366365633835623939623665393962633464633330366136 -62623632653236626234616264616232373135623337346261626533396562623665373739386332 -30313530333936313365303464303639306136336637663765343963633733393166653035346565 -65396165663365353862303466313931343939633531653230383433656635313361326438623364 -66356162613138643465363637386262363164353365303264343233636331663465613161366664 -63326436633832303532393039323939326261363737646336303563626462396663633061663737 -38343537346236653139346164343339623662353566366662373166306261653166636663613437 -39653762656230323662396133653438653538313438646362323338626235303163373662313761 -31306338656637343835613830666632386361393438333134333063336265333830653534333730 -61366138306331613038303430666537613264373931643461333961343839303637363432623731 -36643133303462383465306532626335623938646164326565366632653332663733316332323934 -36336362333836376264353562613662383661366461623834646635366331336137353733376338 -64376237376633643936623732323964383730323436363735383830616134343863396364336661 -35336634353039323762326662313062376564333864303531313135343131333563353331343666 -63613337386431326136326639333761663565376238643364316637623663613166386461366235 -3365 +30313334656664363538386161353336633465326330343361613764393563666635313761613335 +3838313733616333383565663366353562303638316639650a373462643036336163623132313532 +62373733383731343832346232336331663631326536633365333166656638363131343361663363 +3634373066363533370a366137356464356335643836343066663932353666336431373934623636 +35393564356361316430386631333465373537666463613935333837613639323530363139353932 +35663531343436353330323239646662316131383932623964386639623030616165383030663630 +63363065336231326462663938656335636130363932346166643533626138353862353831373735 +61643263386530633434333530353839343763333038623834373832313366393137653463623936 +39306435633466663933343930306539333335646633656533616633663631376563303030613139 +38633666393833346264336665346665653532646431333436326366633433383733653066303465 +31353835316365333331653236336565613963383338396663646136633861363263646137386461 +38633032323532646666646165353761393139376437373565633933626239323063313531356430 +32373430666162333936303261333361663462613761363034316461336238663733323634353538 +65323132643364306662343130383865333265306165363865336531346263386334353263353033 +37383737326465613962303866656137613439613863656439643730623830353066386664333064 +64626438386165333835333564373238646639643933393037656164316364343730376362626539 +35653932666432376532383631623165353634643161333665333834396532306339623332616534 +64373064373536343737343030323361383361346461646237636639356664306566623633326635 +66666364326236366463303330373861343932323438343061303933393434333961636262666236 +61323934326237636364626261313166313061633161643139383233396330303930373339373130 +66383939356438343235643334383535366362323035333563656631663732386234383134346133 +38643739336339663230613666366338653030393033663730323533376230343163313238316564 +31643739366564313364396638623231313133623664306236306632616535346538313934663965 +33656538386139356230623136343933303039323865306335346365623263616230386464356437 +34383763393563393232643838326231353236346632353466663134393337626362613234646638 +31636165626635303239343764313061343665613530383665313138626265356439626239376463 +36626134346664396261383565383731303362636231316338303166656337323136633333356535 +66313561333535313564636564323730636431396431386564653730376638636163663238363836 +37333639373965316130343462373731656637313332653132653037383131356636333265386438 +39643363336666333435373536616130633862353332356162323065326132326661373435643334 +37653932396264396133303130653539653264633939656265313435363330643661653434653961 +38326264306334623430383035363631383330623361653637323362643662383735653062366133 +38313865373266346166636233363830303766366134373763363264393039656364663133343062 +61633538326232633833306331303964623535346164316638303261303138366261333831383639 +30653364396165663032373635393565623232363230323039346438343561663332613839356466 +30363537383765323661383332626238393933623834616138393836626338326638376135623335 +32336366363363653963643664373036383464353966626161343830393033336432383834653462 +32616631616335376632656337316361616531306139653735343732623639616331373131616437 +34656566353361393466626563376230346461383839333466633433643635343035653165636665 +65346161656239633831663131306165343232373934626434633839613037326234646465313233 +38636433376666656435333537333865633830316363346466326335636635666637353263356635 +63343066306264366334306130303930373264316534323631386439393937353534656536353734 +38323834623137393865633937653836616131613562313737663039376439643864326435373135 +39333666633965646632613563323431333833613738383162353562663562333463616331373331 +32366634313161663662393365356431353538666534303536363763383337323537383964376162 +62633265333839633332376236613563363466346532313631646365383766333834353034393264 +66616365613962303733336536633233383966633636346630396431633566626639306536366535 +32316261303535616137653335363765316636356563306566326364623238383561623338356466 +38346230393837646231623233643539623231326533396234303733356464663165646535653666 +33646438663838646632376563646564316430376638366661343830323336306361393735346436 +39653230303034616363323338383634366535663030636538356664376566643438353263346161 +63653036633335343633383931383633346131393462396333646162363164326662346432613264 +66366631653937363463346330376162643931316361393938303965363635363963656632376236 +36306566626432376439613836386464383035393433313631306136316438656632316663356439 +36366563303838353939656532396639353365396366386366636532326363643037613163383939 +65306362393732363064383962346239343062343963636239643466613065646133323334653032 +33356562633162666431306532616464396364636534616665323465386162663033646131303637 +64363333353437636461653438653461323030366431643662303637346635386566636432663766 +62646630626238613532643865616164346236333531643736663165646538376632393163393966 +30363434343631346239386433303765343630336265363331373166613431353266393138666536 +66316464313331303265393363323230313530383165376336616661313266626235613262653935 +35383364653830643737386338616236363563663565326137373636336337633865643833646566 +35633533653538326431346362316434646564666333396434656239393539376436313061313165 +38326566623139336535316134323164653937653337353235353162653137333962663232343039 +62356337303035366531646536353130323231346161653566373562303663386136336463333933 +64353363663161346564323330376432653961666539383463353361353763353835396638366130 +34653532303139636531663061303865376361373834366434663134353464663234383637356162 +34323231356266393030626134653765333932646665373735356163623266393332353130616162 +65316434316637343039643634336662336630306533343338633837613732666239313033303161 +35643963386561356562393564643636316336373336643764613663663132306536663332316466 +37343665323232313535313733373864646164353966646465663261363032326466343666306565 +62336134346335316465356632663135376231346337303565363062613935343363333739366634 +30626662633532336436326663323533323163306338383561636666326164383734316261323465 +34383835343636336636653330663763656430363336303866356464396239313330366534663831 +35383130323230643630653261323435396531356137653861313634346164353534316339363866 +62323530336566663433366337396463323332616466373064363132633136613765393765313264 +64376264396230613464306661383865643433623264643961366531613337316134313063303164 +61373035306539646565316564343931373365336338656532346566623962633930376635326632 +37323935386432636161663034356662363236376433373238393061313635373962363034346639 +34306238393166303135636535633034383338666436333663393230663637326637343633386266 +33653966346137373736623935393266643838346339313935646633306462353632663563343339 +36366263636537313639633763323363336532643162623262373334656134626535393366343262 +66343631636233336331626139393963653137346337623638386533326562313964363136616662 +39653365633435356531363166623832393934303739343262313137376232653430393361373165 +34663166333434303366323738303936613664306139636465313664643239346633633136393334 +66363262376163333839366232306664663637303132303135346433633735333439383835363962 +61383235636566393064323030666538333030383731356363616565306534396631336634326139 +30633565663966316664623933356439396265373161386261656237313733313032663537346162 +35306662303961363161316162346464633036646438373336373463306164633034306164646532 +34663231656561636235383336616565623131353733616235616364323862636666393230396537 +62383139633836663532336236306333356166643737393830656464653765386439353736343432 +32326566383566356539343738383231646362326231613236333966316166323138366136306166 +63363665316565316664366361636136376533353338306332386664646234643038663934613830 +34396134653232366331343838353331316565386535626138636564656630303932343039623734 +34663535653665336161373962616134326430366161633438623837383138616163376434383066 +64353938633234313435323734333565633566343437636466613861616562353730306363613732 +37383130653439336632343430393663343135366534366230393633353333333936306266376634 +36333162633434623930646235343661646239373931303462353935643439336238663633613433 +62326631313434333265303166633861666234636563373430643834653866643132653766303730 +65366162343532353838323461633663396339326332663934333834353333323361353265303766 +65326261306435323763373839386639343865346136316265636362316164623131623232343736 +65323731643731656439383230636530663232383864666239633239616662336164353164636637 +39633733306131353364323063346664356238643838343765383030653035303936346235616366 +30303338303866386238386165643937373461626436323638396635643431366534303137323961 +32386630373266326333376465656535376561633662396165663362353831353034343833326566 +38333331376466363761316230616136336366656136363732343564366137626331636331633638 +37343934333333663163303630613531323965616135396463386339326438316337623466376139 +38643366666161313539356366326434373238393662363936636530396338386338366331613361 +36373037663230303432353630633034646138323663333862313834393637633935623963383532 +39353938333937343466356564343639353138383039373135313837383838626165306330363066 +34663231353539653433373763346435316563333031353338643362353935326363376662326430 +34353063343535356366303136333235643830356261633563366536316437636236306564646434 +39643731323230383065343866363861303634613532643165333463623261313663643265316130 +62363963366136313635666165333134653139306230326466316430346537326134623033396230 +65303037376434386366393838643933393337363434623838353432393534656433353232343531 +32613932616566393235313832656431363166303731303539643536383635313266613734633432 +37653961616232393233326561363838653166393434666335336634636137323330643366633063 +66366538343062653037363563313063663238376630376335666231343137336262656461313539 +39653634643837653565663231343365666331663031353230653232666266333364323766643866 +31333933643362366563313861626365323731373564353635333763303035353933343662333736 +33373063386638616162616133303464376334396137636461643261646463663432383562656436 +63393231333439336538646637303232626161633131376164326137336634363236636233333965 +36666131393736643636306161356666633539646538383562343933383964613339616235323032 +32393061353736643564376534383762366363333134666162663736333165613963616364613639 +34373366343630343232366563323166363262326363643435393566363966306166666133323232 +39303963343934333736323630636166613931663430316337326137353239306233363135376661 +36633466303366366163643963303264363039306433323632373032643463366233633830663736 +32356363653937303030373936303531363330613330666531336335353465363439323039336637 +65653064653361343539393964653361303536356534303565303462653630373962376330356436 +63386564656162366464383036633031633436383538393664396635613339633833366234326236 +63643830346233353661666432616333636266613135626531316639366664313033363535373338 +36663163343839613965383261643533373637353363396366623436326331373533356234656163 +62363962343037333339316632313361386431636433303631373230313064663263376566643164 +66626464396232333139323233636332393134346166656538623332616633623763653461646132 +32393535636461303766366338346631393433396235313536306634376138333334663938343230 +65346236303862633831653136653435636530343362663266333038326637306161393834646663 +36336438653762666138313564393832333664393632636532393236666632663233386333373962 +34356162336430323636613530343138643836653439326433626130666163313962303334626362 +38663065373565366536373366393832316537316634346332356365646533633863303937353065 +37336165623266363263353062373035323132393063316632303465373132646332363562666639 +37636136356631306566336339646133343036613533663162616532373138633634363165366336 +65393030366639333366613738323031653232336235363930336438313364613534663839656132 +62323233303330393631623364653163323533373761326535373464666531393330336336303535 +63306635316133623630333134653164316539616638636461393063383138666361326338636231 +64393434396662333831366462653336313963393866323933633464393161306431653765636339 +61333937373437383766363865353437656364666531633332353536393665326336303865323935 +63326235333264656532366364343634333337623437633539623166336564333430626432626438 +36353338346531336339636164633163633138613765343031663862393737393262313762616336 +63393338656631303133663339336135366266306134323165663463656365633030656338613631 +30653230663835643266326633333362633862343533646161653361366661393031656435346431 +66313736666165363861616634633162303363313635316136623163326531323863623838396433 +65383764323962333662633036656665623634613433323333373038353132356431343366653431 +33363466663361663332373533613335383831316234666136623030343465653362353533613634 +65313134636465643239303238646537663332623066633066656239343136383732663637643165 +65316666663137313432663535343164653030636137653239643061323238623863656336323164 +62396532316438383365653163363536653664653632643762303563303934363562333739376562 +33376137333166396466303735386463303535613564353231343038633565626131363262376237 +34633236623063353836663833633131663232366330366334623031333035653539336132626539 +61383437613333376365333166616439393531323062646438393661626461633231333239343033 +37653433383137336439356235636133393231316136396331313465383766666661386163383033 +62303836323663656463383261356563626164633137336337353362353661613533353164623662 +66643832363932316362306230316233303536613134623833373836656165643965393638323432 +65643336383635666161636639646263346261626462636135336433656266333134303132623936 +37386132353636623635623065326364313863633461653663633662623933343233306632653864 +61636431303439626334356164373339373230376261303232383339386635356430323935343431 +61303536343133313633653034656262343932643066306236356266336162323264343433323263 +36323134376432373664313030353833303735353734666539613138336162663536343533383135 +64363962363033623332333232393837306131363439353230363737633166333331343332303361 +63633039396537336439636639353030326435346461613535643865373335623730393238323561 +62663364366639313261356234323663656664383934383136306434343264356265343536666564 +38376162663536343363633333396439613633366265363938326233663232616661653033623265 +61353033396530613535303136376666306562666438336438313266373534666633636366386238 +61396666363938653232343539663530636161306134666463353962353565373539306335323665 +39656565333437373763643939626630346164323037326431323635666339356561303538626265 +61336161623531643839343732336238356530383634386230643830383833643033386534356636 +32363962356363303539383161346231383836316532663338373461663639313066353735613631 +31626264346130663736313232623564636631643138313861626230656166616333383536663566 +38636636613261346361666431346632323738393037323364373161333938386163346333313938 +65323537353863303835663936306236633838646461333264333938613464383861333833643237 +61323065396238626435653731393535643962383664626539623466626563356330393330643766 +36366663666435386636306635666636376233383139343862323062343136633661666235363964 +35306135616136653430653361666536663737373939306132636562626339636265326261633038 +30383564306238306238306430316163663539353934363630333534393037313064343436386233 +63633731393137376633356337313933383564346162356232393135373335636535343362396563 +31626261643632656562306331646134376234336231316563346239623032623939643137316463 +30383930373461616238393065646334323164616362643962633365306139393134656465316436 +37303033353761646261376562613164663336663132303233316665373837653263323365326165 +66346130626137366635653135653739616563303666313363613331343131303161326464303736 +61363836656631643336363464386532626465656639306339313862333865363765386361376538 +36653266363938636533653337633830643562623630366365663862343835623433356366636161 +37323931396133633532313135383832353331623061376531323439613332643736653733643539 +30363135333039393164373065323237383466636561633839393865353362343364663532353634 +62613534313734653234346364343561373466666162303961383432356432373339666337313234 +35626637363837336138646231633930646466356262323265366364323331626633616263663830 +39663335383239393062343532393465313933313833366366356634636366613831303835626661 +61633539343433373937336133333764323933316237373232343633353833343232303639306531 +64643738363364663465343763303262363839303431383066616137343562393165323163663264 +37643936613061313738626138626337316166373431373062633037663437633635306436343531 +38393061643263383966393539386635346364346230326265663065373230643436333236356362 +65626462313331666432626531663033326266376334366339353835626538343836373065386334 +31393139656561643837626230653862653663613633313535663938366362343332343233393833 +37363335613963343033373336333462653332356664646462656130633830313930353962363737 +31396334336338323465383036373965643364623031616434316230636539613066303330643336 +36633438633830663436363164303631626436653931303165363036623339666130386134366234 +64393561363737656332616161376339313064316565386563623531336234646530363033323430 +33656533336439626434653137353931356336393162666464356134656461353038633962363463 +38643530326534306634656562306635316432363831356433306138653233393864396237653565 +35303737343464326161656536306164303963393762633638626135633930353339333238356161 +33343865643732663037643565663664656233656462663239643236633538616361616565653435 +30316663656434336365613331623936356534363233336135316465396366313431326138643531 +31353034353466353234616536643061303131653065663030336631373033653030366537646631 +36613562376138333761333637336135323961393962346630343164336562383662326161363336 +36326534303539666366373236336339643138396638646430343832633862353566663162353435 +39356338313262656337663534376166306132343131376432343338616534643036306334646536 +63333239326661636539656664633431376533663066316364633531653462653538636338656364 +33633436333336656363643436323363303861383737613533643461663666353034356666623964 +39343164653630653439393632343762383961396461383239613137393835636638393434316634 +36653937666461383762323238323064666531356236323238313736303636363439633033356635 +63613532393837326635306130306134363764333434633132616137336330323763373861613532 +31663135323361333834343635323437373035663132306139386431343031353632373264633665 +64646139373563326531363336386335643634333265306334306262393164653965666536303363 +34393838366632353161643534653139623330666436326135633364363538353164343534616439 +31303631636634366231303230316461346234393433346331326338346264383935623230623030 +34666438613838393462323866393064373865383030333939383839353935393830666535653231 +36333761386561323531626434633361383565653666653864663865326463643364316333613061 +33356663646432343331383261663335643262343634303766356538396133643534383430363665 +62376261623035373263393563623234336161346534336530386562643330336666663566346336 +30646664626339613239 diff --git a/secret_group_vars/beacon.yml b/secret_group_vars/beacon.yml index 720393def..20850abb5 100644 --- a/secret_group_vars/beacon.yml +++ b/secret_group_vars/beacon.yml @@ -1,14 +1,14 @@ $ANSIBLE_VAULT;1.1;AES256 -32393333656138643632623337643963393330663534616139343166666361376532306237373237 -3336323131303032313631646164336336386537383333300a373662656230356135653137396162 -61663130623635363939323030633635643535303661316161663939613936353364643330613335 -3933373131333364610a383130393533356364633836643930353966366339366636623961656431 -63393530336434663837613564343661353135613336646230313739353461363038306633663563 -37383539656336396535386265653730343265333535333531373730666365393034356561353836 -36316262653835666430623535616131323763633539393166346433663366303737366635633461 -64336265383463383066663265326536613137356439623463663862356333383062353034633761 -33613066306337396330353466343933306637323038376533356431363363356665313439353861 -36346464336362623238373330653534303964313162386139623463393735306232333133326461 -38306436313166386639613533643933333937623961646134626534616234376533373961366161 -65353330303836613438666532386536333939363636623435396432383166646661373536383665 -65373135636338363262346635373731643338363265663834386438306632653761 +65393763643136636265646230636465383031383063393030373863623566306362356162666365 +6437633134643061633865643032366237303438353432350a386461336632656139363733373839 +62633663613538663133343439623533323930353561326361643838316139663865383462663231 +6438616339356632360a316338646431353438303865633538373335366135336435366562656639 +64336564313034646438303439396432613434646137373361336536336438393762623463346237 +62323930323732646261346232656135636230383265336132326265373833346465626435386533 +66366235633363326331393736653833383936333564303062343834633866356463326464316362 +35363631353630383333323966613730373336313433386162373236353564306664366230663030 +32613833343637663363373931663535393664643132363236663436356261353161633435613136 +35373439373231613163613734376462616531376333643065623666643339303434666236333630 +30316233336336366635336537356465623963653232666433363465393762306637373133623365 +63396363656330323463633034393162323537333566336133336232356465333230383439626366 +30313237393437373732333134366133306538613331363234663165633033643834 diff --git a/secret_group_vars/db-main.yml b/secret_group_vars/db-main.yml index db64e1c49..0629bd046 100644 --- a/secret_group_vars/db-main.yml +++ b/secret_group_vars/db-main.yml @@ -1,28 +1,34 @@ $ANSIBLE_VAULT;1.1;AES256 -61613835356463393566373136623532326163663230316361653034656263376562663635303734 -3338393061663432663639313764383839653735326335310a306535333533643539643532326539 -34373036353331626433386336646466626265646630333061306639346164623936393336373631 -6164333538396139640a326664643666333963613938623539353962313666336262666134623832 -65373038613633323337346363323564356464303765636431346365323933336437323866393734 -37353731333637613135373338666430363865393663636335313536333139376432653734633031 -33323132623964366534646132316163666234616539333932323231613537663937633732373865 -66373165643563353739323736323032613237653663653437653263353164623535383035613636 -37633033613863303030626631333765306462623561373433313664633265363864303062663231 -62383430333233333435393762396439313766306363616433333461633666353232343865623634 -37343038363334333433666462313465343865653365323231643661626566386535343261306636 -35393933333739643532636461346665366666313139353730333330656363623636393165303433 -65646638396565333238343233663061363261303738306430376633633465376633363562326536 -38666433306666666362393465666637613666343731646139356638343463363538343234633530 -31343732656462663062303036366230366365623334386665393238636136323635663739623131 -34633335643934613135613361346563363862666132316335366366383438353030343333386431 -35366661343464363530643634323138663962643436376136366263376462366463393462303838 -35383735326164393730613434363133366537386265393130343031633837383632396233623164 -66393633363639383638303736616564633266333937343461326262386564376137393533393634 -35663439336539623433633931336164393665386439643631633963623963313263616163326236 -66636263653064336265386432373338373630316337616532613563343434363264626465633639 -35306365646163383832333063333134346261623364306432343536323530646638353138313437 -31343965626565363166356632346562336435326438316332373163333765646366366166653536 -38666532613665363163616336323236646338646635643464386264313234313965346335306263 -31386362663535353334383864313236633834666439323038383766363831373462663034373832 -62613134643766643961663736636664366232653238643437326461316664353937326163623630 -663637666664386464653339616336383739 +32363835343162383736393837653534326237313362343234363037623661386634323939343839 +6434343662383832613038643834396634376135633164660a623032383231383664666531393465 +39616363313136373636616536616634313432363732306538366462383536373764316266626166 +3339666466313036370a333337646531393166646666343963323734323630396366333162626638 +39633139363435626337363235353364623731643965313830303239316265616165333838656562 +39336461626130366139636664323762616330373032326562653630396233656337303966386139 +36366161336563646666343438613834363734613861373737303165323838376564666361633937 +65613833666638303433626537663563653538623962343537363033636166323038626236346262 +31663830343532343430366231393139356264636134303131613132643435343834356336626437 +37373934613035333333383836353762313866343733396133323133663635626561323437653038 +32613236316362613436323961633666363639343135383862353764643639663837636538346162 +37336136386263333534356162383638613761396462393265336339346264306465316565323561 +66343239336562373137303538396230383836343831656535313063663137666365616434366337 +39383337343266326263643637353033623032396461373439323661313134303266663263316635 +32396362393935333963346237633239653936363263313239336130623039356565343034343435 +32656533363864393463373839343662666562616233336262326463316333356262376365636464 +64393931346532336438333564313838666432383434393531306563653335653166633565616137 +32386438623061656434353430326630313736663237326265386133383461333930393266383234 +64393433613832316533303661616334386136373336316137323636396530353365363263663966 +33386233343630336562303062313362313364353762366633323136393264656239633561633761 +39386338396132333862303530316339333531363066393637353263633663636365616261353639 +63643038323062373964376562313139653333396661636265623435613963653966666661333763 +38383663363532316636383961653338653864653361346131633364323863626566616265383238 +33383865623531663135616561643530613932376532336534343139366433383334306434363364 +38353964393466633635316262626339343333633636643265313562353432666262643130336638 +30636264313131653264373237333062323637623439366632353933666330396165663466326533 +62383762663332666432373262633737663365313063643838316163656533376439656438346430 +66656462616437313430663364303434373738346636366439646563663237376532656439373166 +31366436363866383638646435336461396665636430616365643563646566386565383435306566 +38333331386439356264333534353934633035323232623233313935356238343765613362386638 +64383261616466396532393834356263383662376539306662666437333938333434383531393033 +39316137343738616166303861616136636638383338383035623138323437666630303965326237 +6561 diff --git a/secret_group_vars/keys.yml b/secret_group_vars/keys.yml new file mode 100644 index 000000000..63c773ca7 --- /dev/null +++ b/secret_group_vars/keys.yml @@ -0,0 +1,220 @@ +$ANSIBLE_VAULT;1.1;AES256 +62613330363966616537313961323535323839306364376432366335653131383962373037633734 +6662366365643936343339333931366639356434303733320a626239393066386537323536363735 +30633932383737636261343238333231303762376632396165356561313262303039346262643036 +6562616330353035380a643139666230313163303365663031383437616333646264353061353964 +39643862666634633431636563306532636239313238323765663638383663363265633336303635 +32353437306265623232376333363535653561633937623231633663643264626331643935643466 +63383132663239383830323633656462633061316661636630653064363430383838663737306637 +66633464366365306666633030353738646537623732656237353239366230643035306230656466 +31363061386433343738323466333664336461656235336664656435313966646635623866663664 +38663939343538633439626434633539393261633363366465323337306534383663396437356530 +61346266633163663436323166623166326265306239386665363532303864616432366461663865 +36386634616463353331363339356539303039383135303561303163633565386238653830386330 +32373331616431336666656364666466326361386331313764356633356632653731313863616237 +31393037316439666532383130613139396338373634356639343265613461363463373362356637 +64333465616538633534306565396430316336333238326535346632333839666565323932656638 +32363531306261386136363130363736376138636337323265303932363335373635343061393535 +30653135643766313033353338366234636363303339636230323439656463333066303036643939 +64396466336336383663313039623033666535306363353939346634336331633363303933383265 +35343566666165343531396137653562393134666566383066303639636461346533653862383462 +34633533303232323765333332653239393832393436666230633565323137633866363235383531 +65373863346538626536313438663930373666323532623562626538386139643830333435393030 +35646433616437396234326564363234393862623065386538653039313964646639653231383265 +65393032383663303061383637383565643434393163323336353563333766376238316439383839 +33356565316133633032303666643834303630613336333834333932646265323133303737633238 +62643530623262616233376135353335356133306334303032333462306536396435363833623531 +64616465326438313531316662336162383632313964376264643237333164616535366166333963 +62663430363563333938353930623361643764626536663930336266376666666636653262326535 +35383331383035376430356164393833313762656238656264353033656163313964633033373066 +37316432366365643635633130376261643032643332363063313663326637663839663863323462 +35373066353362633536653965643164333137633331396430343665346333323437356234643535 +64356666323337643933386139363234663063616163386362353066346433313838396265646533 +64326135353866373636613665396265346630303830346132303830643537643439363135336263 +66313065653937363965653065626365613438386263346261643463666334666264613765356463 +36656332323030656538313138376666396266613037356161393365613531303732623131623539 +31363937633639363534363930623265633366343334643037383661346564326630323537313935 +30653432386632356165313039366333383539353762336430363131613065396431633039616330 +32343039383861323939633938376631343330646437653566373232393839653330653437646435 +33386330323630656464626137323263363539353033316461613137343461366462323966343231 +35643437326562346263653738383335613865356566326138386431343666633836353533663432 +61366631613037323265616536326662313934353162306638333738656565666630386634643338 +30313431666230303534333162343631656138623561313263393032633161613361373933376333 +38363436366336393231323535643561663433653061623236376436363165343162356530316237 +34303063366463646138393464656137663031656561666564623763363931323634376630643861 +37373432303239613733353530353164396234363064343138383535326364653532346438333238 +34663863343366633161393638326634623139653231333464333339316431626433613365653339 +62306461303130313863393235316166343763363635303537633935383639313637646634656239 +66393430386565313061666162643265356461373531346162623962663566623461316138663134 +65623333613964323937623633646238396136666631633063316262663162303361346337383730 +33303535383663346139353563356530326535333730666561353436303761356561343737636164 +34623762303565393363636338383034306337336338383736376333363930343665316434393439 +31363564653538613465343733386266393137633138666437333735633332643431633261373732 +37373065343032366165333264373836663962343534663463616365643634666534333235623766 +31356533353834613730326338386166363436323536653763376363363262643339376536323233 +62366239323039616563316435363930653133326163386362373834393761386636326465366539 +33333934343339616462616666636462393061303630653466306233313837623431323962336339 +31626266636335636265623266646630353839313333383930643836323734666230653566646566 +33653533636139376435383665346365393861353039353665643562656265653264366266663361 +33393535383766376437663138303239613661613861646237303933616433353035303631333164 +38346432363538396266626662646539303761653661363036313539393333303231623330616131 +37393538646436656135666234343866393232626364363436343432353465646432626263633835 +33663130633733656661623564356661663533353136613339353236393135626530376530383137 +37343332313161613761663933343230366234313139343432636131376666316531633836346262 +34373961653766323661396237613563626637373164643633363536373738366430333233663832 +34323534303134353161393131393936336335303033633635666438633832633638613065303137 +36333636346432393666306362636564356535643633646233333561653331303033626137333832 +65663632336637613034396461633737343836333231306334303738353231326661353965333864 +62306136633531666661613238633537666639393661316234356338336561636634643561333335 +64343361643262313539373432626364366165316564626237653564363461646330363066656166 +31633033373663396534373166386663383261363365323335363366303033373464313264386165 +36323531613530353539333230363763663662396536666231613361663564636534613966353336 +34336132396564653061616333643430316666393730663266383635626666666337633563643936 +61396539323831646564393635363834356131613832663837396437356436356638306336303037 +66333866303633356366356365386139643064643234393737373861343330376638313466646234 +37363864326565393339323034376164336163306466663564663634656364333263653662373439 +62636432633732393939373337663832333733613734613830363666346464613962353536393866 +62616365616636336130633131353637623538353532343763613133356530363634623762393137 +39363130643631636131393465303437616236333434326263663361363039353831636137356436 +65313165353062333534383061636462363462333438613334346237373431396363386163346266 +32353433636661643064353864353764663232636363313965643264353539643137336366373330 +61326635313261363838353036623135343662323762313635363065386261393263316632383766 +37383234663632376637326265333832376635653137383635303932306264613739613861643635 +30633234333733313137633033323765646262333365333934323637653232363433383239636164 +32316136653133663438643662396166353135393038366431373230383863383338323631343364 +62366666386236316462336635373665386635353239316438353065666333303665313266626163 +38636165663838303239353362393063316366373466306132383366323639306563336164623538 +35383163353066663736353236353763653334336165633966656665303763396130353931613364 +37316134343431383632323032626233393233386333346237633030373231336239663365666161 +66626362363666306630626462326532306562343064333233323563343236613330663662393131 +61313066373666383166356466373533623439626639363534623064346437636235323435656532 +39326538343364333364626465613731316361663363666165383561616665336635613533323633 +62666331663366653636623164613635336631313031633761663534626236663263643539383263 +34663438386163656131396531303766613430336536356662396536646536393137376131663839 +61633165326664373634316436343437653731353435333937343565616437373235636566313733 +34326137663933633432323937663431323039623239356634396166303535366565333764313661 +37653031303263613634316162326662363230636464643563336133356364326166313063646630 +65613436613161636332346437356265376631393132303366643761613063633338623539323262 +61396664333165393362326533616538623035343735333366656438646238363031303864633265 +34333562373932373761313234343237336134333039626238633033336237643064353764663362 +31386565373233343131643965633861313734663139363832316332666366383830393133346232 +34316637346561303938636331373162653662643931316633396333643361396334323531666232 +34333631373139626130626332613531343962343437666135343539376530383936333430376435 +62613135346633356165313032653337363238313137333636323530653561383064366539303364 +65373130666437316365366464636138656133336135336638316564643939656565343738396664 +66636430616432363138663231386439383236613437663438346435356638303061656435653034 +36616434316338313338366631613430356536663436373334633862343033303038383662306265 +63656537653966353632626331393266366434343939636263653266316134663035343865383733 +39373036626435393364316631623331633036353265633435656165363765323732383163323532 +33376236356130633031666538363331653365383237663365303538313036666364356630373065 +64343733393965623365383762396463306335306264333335643163636637363937666132306261 +63313465373437396231316331336162336135633438333538313737613431323938656536383562 +32323537633261636635653736333232653233613861633338306665306433366136333037313930 +32383461383565333732613530623566343938313363633439323866663638643662663837346432 +35396235333165376138633364653735353432353536626138336661353031373436343633626261 +64613634313438343362613835353436623866333239346561656532316635613563363861613964 +32653837313038373261383035643939383332613938633035653238393062633335616433643737 +31653831396662343531646438336436643437333661383637376563306562326634353930306565 +31313732663535323535663665383565663434613832626234363166623738636362353635376238 +62326537303165303430366461626466396638323566343331303364383966613632613030323235 +66386537323839383133653838353336633333343039383334343632376466383438336533613066 +38393535336564353233383032353836353366303361636531353062623936643632636365386164 +39333961396331663263333732663664313362623639633234326437336237636231333466366265 +33306239323530653966396533336335336137313038373833633164653930333664346230326331 +62363135343238396431363234623236656466616432333039653031393363656238356665386365 +34346134373266643666623539336533373939376366366436613833373765393965343532306263 +64663137613230623634396564303564653063323738346432623839626133376264326566333937 +30313462613830306431306366633834326465383166353964333036383131323535653638313739 +64353662353036643863356137393233343437313035303661383639343037656662626232393235 +36396238626139363536356537306365663936333632346134613836656161303764366637353761 +34343061616661653465323565313532383135313337643431336363343330636630313835323135 +64363433303031633135626634383139663535306631316334306530393630336537336336333733 +39623363323135326137393630306432356236396439643265623632376664666431323339336534 +61626631346661356665343262626262316266623863373263323736303637366238333034396437 +35383534353961303961373862323936373333343761376539333230636436316561303537366466 +33636339393839623233306632326433333630616531633039306532613635333063613532636266 +38626661353130336636363834346435663163353462623362366564373337373131343964663332 +36643161646263363635653832643863393235303165656137663035643334643031316632666332 +31336638383266633736303561386464333436343065653332363037653130393062346236653736 +32626165623865373837376436313439363962373133643038306631393262396538616532633233 +32616163303065333463373732656331613134613136633930346131636333316666656661366535 +37653964653632343831323666303533646166663465343764633538306462396265326533396434 +39333932303061373332336630343436653736363466323836303935333432343933366565323263 +39663033346631363738633233646432396433613732373832343562363139303539643732306436 +66663333383331356136383038653763346439353864323339356334616634346431366434383135 +33613361653232306663643134633130376237383032373739386666393663303836663838363438 +62346638303134666330363663346539353235363664393433663632333438333562323462313162 +61636337333337656566343365313864353133353834313137393636313232653761376331376235 +35656534626465396433653837656139623965353032636339313362306636643837313764313432 +39316463343032626633623837613934323963373335376164356164626666373663386132616531 +64376131363836303965313634393365333231636563313439386437396630316433643636623430 +62643666383336386262323138646332383836343331393137373431376438623338653464326166 +65323639393831376635643465383561316535376366633830646462383138333337666163346638 +65316466633537356366336435303532656137383630616130306561373739326163663234616664 +39363331613266643936656561353461616137363632313964643430613939646433393366663639 +36653862663836386631303139326665306330316561373235313132616537656264636365663933 +66303330313365643438633535646265386232396430343638393463663634633838396566613638 +62653535663435626337363934343934356436396466643338316164336633303535316337333136 +37643764366361633162626237663265333062626330313034383463303535336563333735313130 +37333437633937393630626330396637306237313233393861613533316333383365666631303933 +35343338306362316663393738323539333034333632643135326666633437363962653031313264 +33663938623065653935383334386461626232613435636361306239366336623161646264626566 +36363664333835323735613739393861316163393866616161376333653564613336363730376465 +39333365646664656264303665366363303331666334363263323938346562666337353632613465 +35306130343432306639353565663161353661636634353734643862353765373032363764626561 +31626539363731326265633437623539336532333238366137633263376661633033623031313432 +30316438363630316136353630376561653635386235633466393563666432333834313263386366 +66663363306133316339616162396633303636386562336462346461333839346433626561383338 +65636239383439346662313539323538343766303632363965323435623966363935356639653335 +39663664633262346539656134356439616364343539356234306266636235396134373536363335 +33383366653134313738373565343539616465313434633838656661326139303033656661646533 +31383336303730336632653239643236366338623263336232383561666635393534363934323936 +61333330376337626266663863333738326136663561333635313531313863343734323761386538 +38373537323336666462616231643837633866613763356336353434646434376436306361373661 +64663437666639323839376165666234316233613131373332303836613532333664323666663665 +63363035346330303433386530373563636363336163303663306165626162366561346231626262 +30323062383162336166333763323235343738376435656165313861373737326131316266386532 +38373339313066373766653065353366396438363163663832653238373332663964636162363463 +35333832373365633561373732346637623832653064356531643232333537326132353362333032 +63373535393366633031623161626337656230363830373261316239636335343931376663313833 +33623939313432323562663332376635353339623234613665313563383562386539393263393631 +39386661313930373963386433393662633936663931623731623939633039646664373933313333 +64373634646533366161303464643965373765653733343439353432316164356639396137653633 +35343639336263646566656235643232336361653131626235313431333831353466303035346332 +35663030346333613932346136343731646366643865636337376438666237353033633663643231 +61646538373432323065623338636631643334363634376637643263356461373062386362333663 +64313432353965646334653965373636353730373961393035376139396563303739653532623666 +32393163666638346263326465646533326463306434643365313235333666366338633233383930 +62656238376339396362383361663839373533353435316431643166333762646235613862646639 +35633932646265396261636638396338653532356166626439383762326662366231326462313764 +30303234313930393364653137646434386262373261633461346263373037643539616135363432 +63346432646662613665343066623538653037636462303835613366653132366266353363646239 +30663162343262373961373139363639373966316633333436386536383737366666336634646664 +30613662313832636130393265643833313937353337623664656638373937343338386234636238 +30316237393339373132636335353961653237623264306238336239396562656631306630346530 +61303534383531303031616662663433396666666233626236656463643938393939326664326534 +62396239633163333431613061646164396431343863653934653866663566386664366361313061 +66323230616335666532383734383931383864343666653163613536613736626238336264356538 +35373434343462323434383964386465333433363239386563396439663734376331373934343338 +61343964353531393430396165633930383661666639643431393563383634613835643837303033 +65363661353064366464373835333161333330356131343332343237343031343435663634303734 +39323234356537663631313765383139653833393731383366623837653238363139393863396339 +33383065626364663635633133383866353338336436366434623363303039323731396237316435 +34323766343434393839303032363666353833343664333064316665633538363464316430333364 +34333730663462643665633239383464383963623361333163376433343832646237366333326337 +64323336306131633031336236383937323030356636623232363431653432353536346263356434 +32623034633636666531313232306565623938366563663830383531313330613161376361313639 +65663731363963333039393234333462303738303336663834396238383130386331393035373031 +34383439616363653231393630313862346331643464343736643633323931373937313332313839 +32653634393962643365396366316162363739623532323530616236616331353563383161336466 +38323862326536396233636438343062353365386639336262333737636461373463663634643237 +34323736366461653731303536346266616431616163316139396636333832306235633464366630 +31633338613335346134396133643163663563393863326538623536366661303333616433363930 +66386530333431636539373630346466663166623465643035313735366533363632393232373031 +63333934666631636535373433383238303036383231326332623065303837376634663038363866 +38633532333639666433396166616639396539336363363537393262376437323865663934626132 +65623363303761663332666563663235363738383834346165366331386631623863323736366636 +38303231306631663763313731343134323239323037346466363536333135353437613632393439 +64653663353334313936343431356236356333373337353938623536396133316133356664353233 +61326135636666666430306636303563616639396666616137646265643231316239313763373437 +336665306635633563306233373363616335 diff --git a/secret_group_vars/pulsar.yml b/secret_group_vars/pulsar.yml index f081d7c3e..c134b0adb 100644 --- a/secret_group_vars/pulsar.yml +++ b/secret_group_vars/pulsar.yml @@ -1,59 +1,62 @@ $ANSIBLE_VAULT;1.1;AES256 -61663735623166653365663961363236623362303430646631643435373066653234376133383735 -3035393338393337306162356536663231363138306238370a393937333130373236613739633863 -62386136663631366533353565383135666566666138303137363430613266393365386563663364 -3830393631323063350a393732396133356664326334643432373232306435363962373235386232 -34303764343963313337643866613934353333656239623733393135396232363863323434366161 -39366236653466613635343830333163313865326433366136306663623461306636326533396237 -61343934646131323332366532623532366339323164353335336139346330626266303437623066 -30663837376338323465373964646564343739313130363965366461383334326235343239383535 -62323937623364333364373336616566383339656132316536653961366162396434363562343733 -32376661343164306465666138303734643938363537313063323566383032326133333738333761 -65643866303666386365636464633664333437663036643237643762666135326165643730343264 -38356337316162396635616438383036666634353963373561383430636432616333393564373062 -33363237623432373037616363633430393732363665376232303565393132613139613439373463 -34663739643464383566393064366239666366343336333362363239383231663864633230626435 -65633239663561303039316432313638633434616364353463303733336330633965396638646163 -39376266653739383264663162393636303566653437393931373762653734313336613336326534 -31323638356233343636396533623131306338643735393962626133303139386561306565663363 -62313462336535616536663233333139626438626433656138316533373836393366333634373965 -64383561393738323963656337613737303434353538386630656438653834396266326532343930 -62313730666134663435616461616633613436666633343831626562336565663263346563663563 -39373363653062666465333732326533313131353433373736616337663464333363303461373931 -32313733303939383831363231333138616530376565313363353232333766383332636564626665 -34636238656462313031396530653365626635656436323165383638323561633032313539633432 -34653465353033366232303963323637383938393933363030643635303061323962663531373138 -30653962616431663837383133366363333631643262343238653666383062353433323836303166 -38363263633636393665376237666634303966386234613038353262336538303664656263346563 -66353462653463643530383261626332663530396333356263316666323964626439653364333532 -30356261643231343330343334343839356231343038303730616165346566343364326464303736 -32386436376133363961323239326462373463303238366163383064363261623636323138616637 -61663666636638316338623266376662396462383633363661326634303339313464646635653832 -31636565373661653265663538666539356530623764366538376637613761616230323162303763 -38353637383137336332636338343835333130646162653062616537313963633536666563643965 -35386263356662666135613037636434363834326335363062303632356331363132363135663236 -34383730373930393664336536666665633062333035383361323661366564343233386439626134 -34653436616162366138653163626161323464313161326265616366366333613961326135613131 -37646136373539313264313763613663653466383661626333386162393735656632346633656237 -32386636356138366636363934303766343065343637343835363338653866333734313030346435 -30336362366532653664306562306165373865636162613839616232326130333635383134643135 -66353834333731623438323134396637363835646563373237373331383961633832663333643163 -39306633653063633837643062303263376362393661393131363931336666353133363039643064 -35616165353661393966626432326439343964653935666565386432373765313838663063353266 -36363532316632306665346231333139393463613134643364653937383663343265323730323839 -39303435333138653164353335633332346666323038336166616362646465383736353437346631 -61326135646637616537383263396437396434613638306264343136326432343865333735363466 -34376261633532393533663365313733343064396164663635373538633430343631666330386363 -31613564333065626465396234326437323764396632353130363039353338346533383265616164 -63643636306232376165353831386134353030636166643430343237613739636130613939636533 -38306637366464646261613935626561353164313733636562613934373266313261356464646465 -37656438623864323135663830396337303437656434303966323738393739373332303064333236 -35356561353334633530666130613934353634356233303933666636663331633461383830366639 -39333238656139386336616165393537623564343765626235323837316635386235633263306364 -37663566306664373637623663306334336665353033613034303139363666326138623335316334 -61613263376464353934393135303835323261616231343134633732663338633733373163313037 -32343137666439393638313734383235363566633939356632626334623430396233353366633964 -30306665316635396238396361643937323261653463303637306165636266393431646133633364 -39396134346135336332643139396636353461653531643138366366653865373866363164626166 -33663834373761356631636265613639653834313139393234333833636438313237376134383062 -3539653766303830356233313437663734316333626130356234 +35386435356363613338643238343765383534396135376365323866623630646333623864643937 +3865383865383838323734656234323365336437366565340a303966666162363239323732383336 +66343238326463393137326266666664386262636538373331336162393464313239333233666664 +6439316430663763610a333563323565343233666264353764653637323865333839323336363664 +65366439663761613266613965393936376334646334643939306663356436353164626234656234 +35333537333030663037313562313764343337373030626233623465353030343637633864326139 +65373766356138616433383862373631333862313364623163613830663236643738646531303335 +30383432636362373434343831373430613664393134633439306365613235336138393831313164 +30643433363435343834333736646665373239393731616635636632316234653931343931623431 +38663063386662666536623138303237333461353834353765323830636639303866646438393333 +32373536333833633339396262646466626636623932616438323435313566373164636537303938 +62643762343464323761366438383033393730303836376335373932353964663038383464356365 +37333732646436396566383032316230373535363861633232363332343732323633383564623331 +64343031303839353334626138663433643961376236626534343366653061343530356637383931 +36373231633461396135623236623833376230653037373632643736643365353639343533613961 +32363630636132306437386133303064613063366631386163613563333930336166343235316535 +63623837383030666633633330656266623334333535333861666138663837383265346332363637 +66646136346164626334663830336465336166613137373138643939356334363461653537376465 +35646466346266313532313130373038666236386264366134376361373631393432333730313565 +36653738353837623761626536346636343133363231323938373036343934303264333463363635 +32353335666661663036396536306164613566643934373032643933616535356634633030396362 +66393333663332663862313438316130376561333466383666663038623931383939623631613237 +37383336313838666162336533323238326430636461386632383934626533613134646432393431 +33333137666362663637646130626235613733303164346336393363333232326133646331333534 +33653539613862333336653736376266373566643530616461333262386664663438653537353736 +61333236363334343363336530343732373463376230353063643035363962653434333334623133 +34623035333632663565383835313238346462663331616632393361613064643931393561356530 +32313731333334623830623466663835643937383230623135303466363737313333373239643538 +30613437653432353230383161666166656234373131393430646634363764386664663033393365 +64656636336666303536653832656561643932643866323738313133393364643235346136626631 +33353432376137363336353063336463396263333864666230333337636236666165396364346437 +35663730393339366230303334313636636165363531383264653639643930356438393633633961 +39623238643366626663363166343836323162393866626134663739326365666166663364396638 +39626636643736666630396665326337373036616539633964343466636333343663373938646663 +38306230313535383635643963623639666632306433306231383233373037343339353965376364 +31356338666635396232306435303737633232303730613733303435616534303131306339623364 +30386434616232316637373439343431633136363762366139366664386566393234643735656533 +33353735316561306337383336346363656538366436366539633338343436613066356537393561 +35316466336335623966616133643835613965333434376232396465313639366265636363323030 +35343466363634666463383430666238326338366436343433363062353231616239656537643135 +32343530323635313530633131356564323033646132343463373133393064616336326664366638 +62613539646163373938376664303961303031303639366630353039656430336338316334376562 +63636534396366653364623738343433643836386436646133626638303032366436623431306165 +62663932343963646464656263343833636534373536316166663437363234616632373765366163 +63636237353531303163613533653137633437663337343536336333663664336166326565626538 +31306339333932393733393537643062333939336139333835376433613866393861666535653039 +32623364303034303962343739373431353530313031363532653538326230363336666631303465 +32313632383935623165613935323938303735336237363033323934306530653164643537643732 +39303365313361363837363538653262616336626231373733663435366232326338343033343039 +38313166383132326536343761323031306262306461353263373631376262376430626234636164 +66356434353336653266653632316361663133653061323165646563303864353338323864613836 +37616632616633313665336265373762303834393265656431643439363766626338313961626432 +61666438326433333564616530663536383430636137663032393763643666643538333834336631 +36626637656132653564663434366431366265643265313833363761656264333235623130363933 +36653563346334636136396336653737633337303838326261373031373937633865666332393864 +63363738303530343135663362323530323736613531616161323538613565353231346161316433 +36656235616532336433353465343931356231313933646666333136663762353034336266623631 +39323039396432396163373430306238363230326435623032626239323738336566393430373665 +62393333626366653033326464313936623933353534666237373437336566636237363062356462 +34626365623331333636633134633361306434353733646330393939326439376665303639326133 +3864 diff --git a/sn05.yml b/sn05.yml index 0ace6206c..3de4e98d3 100644 --- a/sn05.yml +++ b/sn05.yml @@ -1,5 +1,5 @@ --- -- name: HTCondor 8.8 Central Manager +- name: Galaxy DB server hosts: sn05 become: true vars: @@ -11,32 +11,28 @@ pre_tasks: - name: Install Dependencies package: - name: ['python36', 'rsync'] + name: ['python3', 'rsync', 'perl', 'glibc-langpack-en'] become: true - - name: Set default version of Python - alternatives: - name: python - path: /usr/bin/python3 - name: Disable firewalld service ansible.builtin.service: name: firewalld enabled: false state: stopped roles: + - geerlingguy.repo-epel - role: usegalaxy_eu.handy.os_setup vars: enable_hostname: true enable_powertools: true # geerlingguy.repo-epel role doesn't enable PowerTools repository enable_install_software: true # Some extra admin tools (*top, vim, etc) - usegalaxy-eu.dynmotd - - geerlingguy.repo-epel - influxdata.chrony - hxr.monitor-email - usegalaxy-eu.autoupdates # keep all of our packages up to date - usegalaxy-eu.autofs - ssh-host-sign # Applications - - usegalaxy_eu.htcondor + #- usegalaxy_eu.htcondor - usegalaxy-eu.ansible-postgresql # End Applications - dj-wasabi.telegraf diff --git a/sn06.yml b/sn06.yml index f7af0d969..a4872833e 100644 --- a/sn06.yml +++ b/sn06.yml @@ -52,7 +52,10 @@ - secret_group_vars/db-main.yml # DB URL + some postgres stuff - secret_group_vars/file_sources.yml # file_sources_conf.yml creds - secret_group_vars/all.yml # All of the other assorted secrets... + - secret_group_vars/keys.yml # SSH keys - templates/galaxy/config/job_conf.yml + - mounts/dest/all.yml + - mounts/mountpoints.yml handlers: - name: Restart Galaxy shell: | @@ -97,8 +100,10 @@ - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFH54+qZEBeU5uwIeWWOViLcC509qxoRW6oN0VHRQr4r nate@treehouse" - "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOBINXdjILF6x3WuppXyq6J2a2oSLR6waZ6txgjYJogHdIKPbI0TdReCv4EVxxYRY/NqGpHbjkqfRTsf2VgoU3U= mk@galaxy-mira" - "ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBACB5Q5blymkTIRSzVzXITOGvBuI7W0L9Ykwfz8LJGPraaGVPiezzFGvjhqwX+EyCqQPt7JprR5mimJRw/JN3nBXWAHjekvmB5FuILkk6m5fOiQJ5QhRMyQ5GfxODAvGbHpTuWHbYJLWD5fhcboKPxlXOWy4xY9kDZVuQvEKisNKYBsFLA== sanjay" + - "ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBABRaLHL8mgW86rbtdUh6TY4rs7/la8hAGeSQ3jBF7LMwYZnbS32YDMYvDq3KgNu5WqSMFvkxNm3vfTAbd8CXBfakwDBFBaD9kO0b2t4/p4VoFUsd3B2OvmTR7Bsg7OxTGJJ7aUP/SzTg+Z4NzsmHwQ9h31gfI7n/buZD4S1edQke19Y6w== dominguj@informatik.uni-freiburg.de" - https://github.com/wm75.keys - https://github.com/gmauro.keys + - "{{ galaxy_user_public_key }}" - name: Set authorized SSH key (stats user) ansible.posix.authorized_key: user: "stats" @@ -115,7 +120,8 @@ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC9itwmMTgcRCrwjO3RCke/NWJsRb3N5lUTF5fubw/V0PlSbFDuv7IMKp5hhFfhiOuVRMoXQBHnnOWWu1xd9jDqZKXmNGHg06JtnddQ/OVprZ6Dpu2g9pZ6uc4r1As94mvpICLVK9lNHPNA60sIwTsTRVFSb1VbXALI4iuLOxPLzIxhrZ1LouK19VWetJIQ/Uq8UalfTDr1KOyQQ/ZgjBeWdD5InSOl3sbPJZhGQLqSHIi0MVxH527CMnh1PQIxiD/vqX8SK7HaKvUZIHHzz5TFrUgrw7BkfRd04UIgr1OhnMf1E413yZdeQzJQV7C1CL9MbAThXX6Ruvs0Rg3ylazpYfwDifMWvqLeRoTCDUbGx94ySO/wzer/kcjpJ27iydNo+en/hImMYz7kktf6A3BzOYxFmOQvnQ9cChP+iuk7fTiQZS7Qtkz+axNIvwCkm7Hmgt7vYizHc+OAtKmzZFTHecozjxCXs9RwynnWpToheP3ZPYgOpKc7pkUngRSjXyc= bebatut@bebatut-ThinkPad-T14-Gen-1" - "ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBADKblzzPBc3+dEfFvhJQHsHGkFFN6ORjfXo71P1OutwcKEMCIcNkZKJHhYkLLrfTDN5JJ5tK2L5AaSxdwETofwm4AG1xv3LuoYsXC6e3sjKi09BVmzef520pIMW+rvL+hESwSazZaUAC0wDcH4aNDTonZYcAY87rpMX7pNMkNPJvWilUA== mira" - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFH54+qZEBeU5uwIeWWOViLcC509qxoRW6oN0VHRQr4r nate" - + - "ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBABRaLHL8mgW86rbtdUh6TY4rs7/la8hAGeSQ3jBF7LMwYZnbS32YDMYvDq3KgNu5WqSMFvkxNm3vfTAbd8CXBfakwDBFBaD9kO0b2t4/p4VoFUsd3B2OvmTR7Bsg7OxTGJJ7aUP/SzTg+Z4NzsmHwQ9h31gfI7n/buZD4S1edQke19Y6w== dominguj@informatik.uni-freiburg.de" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC3UNEZrYCERrpZl/NzL9/q2dOyJJT0Ini6AEmFyPubh6yaVf4Iko+8MVYUx92RKuaY5bE8cC7JcLRZMKZ2lPVkP5tUUwILl2d2/sWlHnyZ1oGdOTfPB6b+boDansJcm3HnREO7umAqgbUErahWIDWOk3SPXOxirqsWrElNm5pKr8Ng8KWz9Ht9/J0oDxPxwYQcz64ogT7ERtCn3+UoEM/XQ4iVHvAk/rwLev8symd5SzrxFt6nI2vqduFBEMoA2VTISuI0rYVFqYpQ7I4QxOl5GbNHUeuvB5+YKh5P2QAi9mOPdp8MO2ZJnjtT+x7EgJKXHu2EBKDwW/93bPfL5SGFUK80HSyYG8sicSYpAUk1asE+T90QvpGUrXPaIabWXHvhj7vq7Y6l103o9T4N2qCpe8e4e3WthGzYu9Tfau7vko7pd+F7rnkvjvmj+WmSJPv+jadpMDyiUyYEIj4ncik89FOWIzt/IYYiC+po6nmD1del4s0Zenr2gEEsjyhvOiPLI7+Bw+w4m/xiHvlP1s40ZtIsUhwOYBEQUbfoHG0zR4jIgD8h/VA47oAvkNDGKJpkEbXpRpUtQxJLDNcnUFjEG86+NGWOuXzwllrH80TFueSFVg9dHu8d5Jx5m9LeZbf+vRnpSpIgQlVR/C4hFFYtK44/Daz+TqOXMgQqabypfQ== catherine.bromhead@unimelb.edu.au" roles: ## Starting configuration of the operating system - role: usegalaxy_eu.handy.os_setup @@ -136,11 +142,9 @@ - galaxyproject.cvmfs # Galaxy datasets ## Monitoring - - hxr.monitor-cluster - hxr.monitor-email - hxr.monitor-galaxy-journalctl # - usegalaxy-eu.monitor-disk-access-time - - usegalaxy-eu.monitoring # Setup Galaxy user - role: galaxyproject.galaxy @@ -221,7 +225,6 @@ - usegalaxy-eu.log-cleaner # do not retain journalctl logs, they are unnecessary/risky under GDPR - usegalaxy-eu.fix-ancient-ftp-data # Remove FTP data older than 3 months, create FTP user directories - usegalaxy-eu.galaxy-procstat # Some custom telegraf monitoring that's templated - - usegalaxy-eu.fix-missing-api-keys # Workaround for IE users not have a key set. - usegalaxy-eu.fix-user-quotas # Automatically recalculate user quotas and attribute ELIXIR quota to ELIXIR AAI user on a regular basis - usegalaxy-eu.fix-stop-ITs # remove IT jobs after 24h from queue - usegalaxy_eu.tpv_auto_lint @@ -229,3 +232,4 @@ - dj-wasabi.telegraf # - dev-sec.os-hardening - dev-sec.ssh-hardening + # - usegalaxy-eu.vgcn-monitoring diff --git a/sn07.yml b/sn07.yml index 53f4a0d25..fa3fee6a1 100644 --- a/sn07.yml +++ b/sn07.yml @@ -52,7 +52,10 @@ - secret_group_vars/db-main.yml # DB URL + some postgres stuff - secret_group_vars/file_sources.yml # file_sources_conf.yml creds - secret_group_vars/all.yml # All of the other assorted secrets... + - secret_group_vars/keys.yml # SSH keys - templates/galaxy/config/job_conf.yml + - mounts/mountpoints.yml + - mounts/dest/all.yml collections: - devsec.hardening handlers: @@ -93,8 +96,10 @@ - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFH54+qZEBeU5uwIeWWOViLcC509qxoRW6oN0VHRQr4r nate@treehouse" - "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOBINXdjILF6x3WuppXyq6J2a2oSLR6waZ6txgjYJogHdIKPbI0TdReCv4EVxxYRY/NqGpHbjkqfRTsf2VgoU3U= mk@galaxy-mira" - "ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBACB5Q5blymkTIRSzVzXITOGvBuI7W0L9Ykwfz8LJGPraaGVPiezzFGvjhqwX+EyCqQPt7JprR5mimJRw/JN3nBXWAHjekvmB5FuILkk6m5fOiQJ5QhRMyQ5GfxODAvGbHpTuWHbYJLWD5fhcboKPxlXOWy4xY9kDZVuQvEKisNKYBsFLA== sanjay" + - "ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBABRaLHL8mgW86rbtdUh6TY4rs7/la8hAGeSQ3jBF7LMwYZnbS32YDMYvDq3KgNu5WqSMFvkxNm3vfTAbd8CXBfakwDBFBaD9kO0b2t4/p4VoFUsd3B2OvmTR7Bsg7OxTGJJ7aUP/SzTg+Z4NzsmHwQ9h31gfI7n/buZD4S1edQke19Y6w== dominguj@informatik.uni-freiburg.de" - https://github.com/wm75.keys - https://github.com/gmauro.keys + - "{{ galaxy_user_public_key }}" - name: Set authorized SSH key (stats user) ansible.posix.authorized_key: user: "stats" @@ -111,6 +116,7 @@ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC9itwmMTgcRCrwjO3RCke/NWJsRb3N5lUTF5fubw/V0PlSbFDuv7IMKp5hhFfhiOuVRMoXQBHnnOWWu1xd9jDqZKXmNGHg06JtnddQ/OVprZ6Dpu2g9pZ6uc4r1As94mvpICLVK9lNHPNA60sIwTsTRVFSb1VbXALI4iuLOxPLzIxhrZ1LouK19VWetJIQ/Uq8UalfTDr1KOyQQ/ZgjBeWdD5InSOl3sbPJZhGQLqSHIi0MVxH527CMnh1PQIxiD/vqX8SK7HaKvUZIHHzz5TFrUgrw7BkfRd04UIgr1OhnMf1E413yZdeQzJQV7C1CL9MbAThXX6Ruvs0Rg3ylazpYfwDifMWvqLeRoTCDUbGx94ySO/wzer/kcjpJ27iydNo+en/hImMYz7kktf6A3BzOYxFmOQvnQ9cChP+iuk7fTiQZS7Qtkz+axNIvwCkm7Hmgt7vYizHc+OAtKmzZFTHecozjxCXs9RwynnWpToheP3ZPYgOpKc7pkUngRSjXyc= bebatut@bebatut-ThinkPad-T14-Gen-1" - "ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBADKblzzPBc3+dEfFvhJQHsHGkFFN6ORjfXo71P1OutwcKEMCIcNkZKJHhYkLLrfTDN5JJ5tK2L5AaSxdwETofwm4AG1xv3LuoYsXC6e3sjKi09BVmzef520pIMW+rvL+hESwSazZaUAC0wDcH4aNDTonZYcAY87rpMX7pNMkNPJvWilUA== mira" - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFH54+qZEBeU5uwIeWWOViLcC509qxoRW6oN0VHRQr4r nate" + - "ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBABRaLHL8mgW86rbtdUh6TY4rs7/la8hAGeSQ3jBF7LMwYZnbS32YDMYvDq3KgNu5WqSMFvkxNm3vfTAbd8CXBfakwDBFBaD9kO0b2t4/p4VoFUsd3B2OvmTR7Bsg7OxTGJJ7aUP/SzTg+Z4NzsmHwQ9h31gfI7n/buZD4S1edQke19Y6w== dominguj@informatik.uni-freiburg.de" # Add SELinux policy for nginx to interact with the gunicorn socket - name: Check if nginx-gunicorn selinux policy is installed diff --git a/templates/galaxy-test/config/uwsgi.ini.j2 b/templates/galaxy-test/config/uwsgi.ini.j2 deleted file mode 100644 index 43db9b5b9..000000000 --- a/templates/galaxy-test/config/uwsgi.ini.j2 +++ /dev/null @@ -1,54 +0,0 @@ -[uwsgi] -; basic settings -master = true -processes = 4 -threads = 1 -listen = 200 -umask = 027 - -{% if galaxy_zergpool %} -zerg = {{ galaxy_mutable_data_dir }}/zergpool.sock -{% endif %} - -; uwsgi performance/robustness features -single-interpreter = true -post-buffering = 65536 -thunder-lock = true -harakiri = 600 -buffer-size = 16384 - -# Mapping to serve style content. -#static-map= /static/style=static/style/blue - -# Mapping to serve the remainder of the static content. -static-map= /static=static - -# Mapping to serve the favicon. -static-map= /favicon.ico=static/favicon.ico - -# Allow serving images out of `client`. -static-safe: client/galaxy/images - -; logging -; log-maxsize = 134217728 -log-master-bufsize = 15865856 - -; application -pythonpath = lib -module = galaxy.webapps.galaxy.buildapp:uwsgi_app_factory() -set = galaxy_config_file={{ galaxy_config_dir }}/{{ galaxy_config_file_basename }} -set = galaxy_root={{ galaxy_server_dir }} - -die-on-term = true -hook-master-start = unix_signal:2 gracefully_kill_them_all -hook-master-start = unix_signal:15 gracefully_kill_them_all - -set = interactivetools_map={{ galaxy_mutable_config_dir }}/interactivetools_map.sqlite -python-raw = scripts/interactivetools/key_type_token_mapping.py -route-host = ^([a-f0-9]+)-([a-f0-9]+)\.(interactivetoolentrypoint)\.(interactivetool\..*usegalaxy\.eu)$ goto:interactivetool -route-run = goto:endendend -route-label = interactivetool -route-host = ^([a-f0-9]+)-([a-f0-9]+)\.(interactivetoolentrypoint)\.(interactivetool\..*usegalaxy\.eu)$ rpcvar:TARGET_HOST rtt_key_type_token_mapper_cached $1 $3 $2 $4 $0 5 -; If the TARGET_HOST variable isn't empty, then route the request to that server -route-if-not = empty:${TARGET_HOST} httpdumb:${TARGET_HOST} -route-label = endendend diff --git a/templates/galaxy/config/build_sites.yml.j2 b/templates/galaxy/config/build_sites.yml.j2 index 4776ee74c..aeb8b9f2f 100644 --- a/templates/galaxy/config/build_sites.yml.j2 +++ b/templates/galaxy/config/build_sites.yml.j2 @@ -1,6 +1,7 @@ --- - type: ucsc - file: "/cvmfs/data.galaxyproject.org/managed/location/ucsc_build_sites.txt" + ##file: "/cvmfs/data.galaxyproject.org/managed/location/ucsc_build_sites.txt" + file: "/opt/galaxy/config/ucsc_build_sites.txt" display: [main,archaea,ucla] - type: gbrowse file: "{{ galaxy_server_dir }}/tool-data/shared/gbrowse/gbrowse_build_sites.txt" diff --git a/templates/galaxy/config/container_resolvers_conf.xml.j2 b/templates/galaxy/config/container_resolvers_conf.xml.j2 index 44137bdeb..934028d33 100644 --- a/templates/galaxy/config/container_resolvers_conf.xml.j2 +++ b/templates/galaxy/config/container_resolvers_conf.xml.j2 @@ -12,8 +12,8 @@ automatic mapping, preferring cached images in the accessible docker engine. Requires docker engine. --> - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/templates/galaxy/config/pulsar_app.yml b/templates/galaxy/config/pulsar_app.yml index 48c28db52..5dae7d45a 100644 --- a/templates/galaxy/config/pulsar_app.yml +++ b/templates/galaxy/config/pulsar_app.yml @@ -1,5 +1,5 @@ private_token: {{ pulsar_private_token }} -staging_directory: /data/dnb01/galaxy_db/pulsar_staging/ +staging_directory: /data/jwd01/pulsar_staging/ tool_dependency_dir: /data/dnb01/galaxy_db/pulsar_dependencies/ managers: diff --git a/templates/galaxy/config/tool_conf.xml.j2 b/templates/galaxy/config/tool_conf.xml.j2 index 98de3bcbb..cbab81c8a 100644 --- a/templates/galaxy/config/tool_conf.xml.j2 +++ b/templates/galaxy/config/tool_conf.xml.j2 @@ -53,9 +53,11 @@ +
+