From 3b7c2a647e397d54ea12ae3debf741b2cfdbe398 Mon Sep 17 00:00:00 2001 From: meesters Date: Tue, 4 Jun 2024 10:07:55 +0200 Subject: [PATCH 1/4] docs: added two commas for better grammar --- docs/further.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/further.md b/docs/further.md index 48aa352..4cebf76 100644 --- a/docs/further.md +++ b/docs/further.md @@ -2,7 +2,7 @@ ## The general Idea -To use this plugin, log in to your cluster's head node (sometimes called the "login" node), activate your environment as usual and start Snakemake. Snakemake will then submit your jobs as cluster jobs. +To use this plugin, log in to your cluster's head node (sometimes called the "login" node), activate your environment as usual, and start Snakemake. Snakemake will then submit your jobs as cluster jobs. ## Specifying Account and Partition @@ -210,7 +210,7 @@ shared-fs-usage: local-storage-prefix: "" ``` -It will set the executor to be this SLURM executor, ensure sufficient file system latency and allow automatic stage-in of files using the [file system storage plugin](https://github.com/snakemake/snakemake-storage-plugin-fs). +It will set the executor to be this SLURM executor, ensure sufficient file system latency, and allow automatic stage-in of files using the [file system storage plugin](https://github.com/snakemake/snakemake-storage-plugin-fs). Note, that you need to set the `SNAKEMAKE_PROFILE` environment variable in your `~/.bashrc` file, e.g.: From 1ffca4cd3069abe7c0372739231d09c8226e13b0 Mon Sep 17 00:00:00 2001 From: meesters Date: Tue, 4 Jun 2024 11:35:37 +0200 Subject: [PATCH 2/4] docs: slight extension on the MPI part --- docs/further.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/further.md b/docs/further.md index 4cebf76..be2eb49 100644 --- a/docs/further.md +++ b/docs/further.md @@ -86,6 +86,8 @@ other systems, e.g. by replacing `srun` with `mpiexec`: $ snakemake --set-resources calc_pi:mpi="mpiexec" ... ``` +To submit "ordinary" MPI jobs, submitting with `tasks` (the MPI ranks) is sufficient. Alternatively, on some clusters, it might be convenient to just configure `nodes`. Consider using a combination of `tasks` and `cpus_per_task` for hybrid applications (those that use ranks (multiprocessing) and threads). A detailed topology layout can be achieved using the `slurm_extra` parameter (see below) using further flags like `--distribution`. + ## Running Jobs locally Not all Snakemake workflows are adapted for heterogeneous environments, particularly clusters. Users might want to avoid the submission of _all_ rules as cluster jobs. Non-cluster jobs should usually include _short_ jobs, e.g. internet downloads or plotting rules. @@ -158,8 +160,7 @@ set-resources: ## Additional Custom Job Configuration SLURM installations can support custom plugins, which may add support -for additional flags to `sbatch`. In addition, there are various -`sbatch` options not directly supported via the resource definitions +for additional flags to `sbatch`. In addition, there are various batch options not directly supported via the resource definitions shown above. You may use the `slurm_extra` resource to specify additional flags to `sbatch`: From 59bc8ee6f32890f9f8828c86985ab98f71e57d0f Mon Sep 17 00:00:00 2001 From: meesters Date: Tue, 4 Jun 2024 11:36:18 +0200 Subject: [PATCH 3/4] fix: bug from issue #97 --- snakemake_executor_plugin_slurm/__init__.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/snakemake_executor_plugin_slurm/__init__.py b/snakemake_executor_plugin_slurm/__init__.py index 5db2f0b..72e4548 100644 --- a/snakemake_executor_plugin_slurm/__init__.py +++ b/snakemake_executor_plugin_slurm/__init__.py @@ -123,14 +123,23 @@ def run_job(self, job: JobExecutorInterface): "- submitting without. This might or might not work on your cluster." ) - # MPI job - if job.resources.get("mpi", False): - if job.resources.get("nodes", False): - call += f" --nodes={job.resources.get('nodes', 1)}" + if job.resources.get("nodes", False): + call += f" --nodes={job.resources.get('nodes', 1)}" # fixes #40 - set ntasks regarlless of mpi, because # SLURM v22.05 will require it for all jobs call += f" --ntasks={job.resources.get('tasks', 1)}" + # MPI job + if job.resources.get("mpi", False): + if not job.resources.get("tasks_per_node") and not job.resources.get( + "nodes" + ): + logger.warning( + "MPI job detected, but no 'tasks_per_node' or 'nodes' " + "specified. Assuming 'tasks_per_node=1'." + "Probably not what you want." + ) + call += f" --cpus-per-task={get_cpus_per_task(job)}" if job.resources.get("slurm_extra"): From c6f99fa39c74e7814ba0a409f1fdfd985d46b7c4 Mon Sep 17 00:00:00 2001 From: meesters Date: Tue, 4 Jun 2024 11:44:27 +0200 Subject: [PATCH 4/4] fix: name space error from previous commit --- snakemake_executor_plugin_slurm/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snakemake_executor_plugin_slurm/__init__.py b/snakemake_executor_plugin_slurm/__init__.py index 72e4548..119bc76 100644 --- a/snakemake_executor_plugin_slurm/__init__.py +++ b/snakemake_executor_plugin_slurm/__init__.py @@ -134,7 +134,7 @@ def run_job(self, job: JobExecutorInterface): if not job.resources.get("tasks_per_node") and not job.resources.get( "nodes" ): - logger.warning( + self.logger.warning( "MPI job detected, but no 'tasks_per_node' or 'nodes' " "specified. Assuming 'tasks_per_node=1'." "Probably not what you want."