Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 51 additions & 0 deletions executorlib/standalone/slurm_command.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
from typing import Optional

SLURM_COMMAND = "srun"


def generate_slurm_command(
cores: int,
cwd: Optional[str],
threads_per_core: int = 1,
gpus_per_core: int = 0,
num_nodes: Optional[int] = None,
exclusive: bool = False,
openmpi_oversubscribe: bool = False,
slurm_cmd_args: Optional[list[str]] = None,
pmi_mode: Optional[str] = None,
) -> list[str]:
"""
Generate the command list for the SLURM interface.

Args:
cores (int): The number of cores.
cwd (str): The current working directory.
threads_per_core (int, optional): The number of threads per core. Defaults to 1.
gpus_per_core (int, optional): The number of GPUs per core. Defaults to 0.
num_nodes (int, optional): The number of compute nodes to use for executing the task. Defaults to None.
exclusive (bool): Whether to exclusively reserve the compute nodes, or allow sharing compute notes. Defaults to False.
openmpi_oversubscribe (bool, optional): Whether to oversubscribe the cores. Defaults to False.
slurm_cmd_args (list[str], optional): Additional command line arguments. Defaults to [].
pmi_mode (str): PMI interface to use (OpenMPI v5 requires pmix) default is None

Comment on lines +21 to +30
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Fix docstring inaccuracies: wrong defaults and typo (“notes” → “nodes”).

  • slurm_cmd_args default is None (doc says []).
  • cwd is Optional[str] (doc states str, no default noted).
  • Typo in “allow sharing compute notes” should be “nodes”.

Apply this doc-only diff:

@@
-    Args:
-        cores (int): The number of cores.
-        cwd (str): The current working directory.
-        threads_per_core (int, optional): The number of threads per core. Defaults to 1.
-        gpus_per_core (int, optional): The number of GPUs per core. Defaults to 0.
-        num_nodes (int, optional): The number of compute nodes to use for executing the task. Defaults to None.
-        exclusive (bool): Whether to exclusively reserve the compute nodes, or allow sharing compute notes. Defaults to False.
-        openmpi_oversubscribe (bool, optional): Whether to oversubscribe the cores. Defaults to False.
-        slurm_cmd_args (list[str], optional): Additional command line arguments. Defaults to [].
-        pmi_mode (str): PMI interface to use (OpenMPI v5 requires pmix) default is None
+    Args:
+        cores (int): The number of cores (tasks).
+        cwd (Optional[str]): The current working directory. Defaults to None.
+        threads_per_core (int, optional): The number of threads per core. Defaults to 1.
+        gpus_per_core (int, optional): The number of GPUs per core. Defaults to 0.
+        num_nodes (Optional[int], optional): The number of compute nodes to use for executing the task. Defaults to None.
+        exclusive (bool): Whether to exclusively reserve the compute nodes, or allow sharing compute nodes. Defaults to False.
+        openmpi_oversubscribe (bool, optional): Whether to oversubscribe the cores. Defaults to False.
+        slurm_cmd_args (Optional[list[str]], optional): Additional command line arguments to append. Defaults to None.
+        pmi_mode (Optional[str], optional): PMI interface to use (e.g., "pmix" for OpenMPI v5). Defaults to None.
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
cores (int): The number of cores.
cwd (str): The current working directory.
threads_per_core (int, optional): The number of threads per core. Defaults to 1.
gpus_per_core (int, optional): The number of GPUs per core. Defaults to 0.
num_nodes (int, optional): The number of compute nodes to use for executing the task. Defaults to None.
exclusive (bool): Whether to exclusively reserve the compute nodes, or allow sharing compute notes. Defaults to False.
openmpi_oversubscribe (bool, optional): Whether to oversubscribe the cores. Defaults to False.
slurm_cmd_args (list[str], optional): Additional command line arguments. Defaults to [].
pmi_mode (str): PMI interface to use (OpenMPI v5 requires pmix) default is None
Args:
cores (int): The number of cores (tasks).
cwd (Optional[str]): The current working directory. Defaults to None.
threads_per_core (int, optional): The number of threads per core. Defaults to 1.
gpus_per_core (int, optional): The number of GPUs per core. Defaults to 0.
num_nodes (Optional[int], optional): The number of compute nodes to use for executing the task. Defaults to None.
exclusive (bool): Whether to exclusively reserve the compute nodes, or allow sharing compute nodes. Defaults to False.
openmpi_oversubscribe (bool, optional): Whether to oversubscribe the cores. Defaults to False.
slurm_cmd_args (Optional[list[str]], optional): Additional command line arguments to append. Defaults to None.
pmi_mode (Optional[str], optional): PMI interface to use (e.g., "pmix" for OpenMPI v5). Defaults to None.
🤖 Prompt for AI Agents
In executorlib/standalone/slurm_command.py around lines 21 to 30, the docstring
has incorrect parameter types/defaults and a typo: update cwd to indicate
Optional[str] and document its default (None), change slurm_cmd_args default
from [] to None, and fix the typo “compute notes” → “compute nodes”; ensure
pmi_mode default is noted as None if applicable and keep other defaults
(threads_per_core=1, gpus_per_core=0, num_nodes=None, exclusive=False,
openmpi_oversubscribe=False) accurate in the docstring.

Returns:
list[str]: The generated command list.
"""
command_prepend_lst = [SLURM_COMMAND, "-n", str(cores)]
if cwd is not None:
command_prepend_lst += ["-D", cwd]
if pmi_mode is not None:
command_prepend_lst += ["--mpi=" + pmi_mode]
if num_nodes is not None:
command_prepend_lst += ["-N", str(num_nodes)]
if threads_per_core > 1:
command_prepend_lst += ["--cpus-per-task=" + str(threads_per_core)]
if gpus_per_core > 0:
command_prepend_lst += ["--gpus-per-task=" + str(gpus_per_core)]
if exclusive:
command_prepend_lst += ["--exact"]
if openmpi_oversubscribe:
command_prepend_lst += ["--oversubscribe"]
if slurm_cmd_args is not None and len(slurm_cmd_args) > 0:
command_prepend_lst += slurm_cmd_args
return command_prepend_lst
51 changes: 1 addition & 50 deletions executorlib/task_scheduler/interactive/slurmspawner.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
from typing import Optional

from executorlib.standalone.interactive.spawner import SubprocessSpawner

SLURM_COMMAND = "srun"
from executorlib.standalone.slurm_command import generate_slurm_command


def validate_max_workers(max_workers: int, cores: int, threads_per_core: int):
Expand Down Expand Up @@ -83,51 +82,3 @@ def generate_command(self, command_lst: list[str]) -> list[str]:
return super().generate_command(
command_lst=command_prepend_lst + command_lst,
)


def generate_slurm_command(
cores: int,
cwd: Optional[str],
threads_per_core: int = 1,
gpus_per_core: int = 0,
num_nodes: Optional[int] = None,
exclusive: bool = False,
openmpi_oversubscribe: bool = False,
slurm_cmd_args: Optional[list[str]] = None,
pmi_mode: Optional[str] = None,
) -> list[str]:
"""
Generate the command list for the SLURM interface.

Args:
cores (int): The number of cores.
cwd (str): The current working directory.
threads_per_core (int, optional): The number of threads per core. Defaults to 1.
gpus_per_core (int, optional): The number of GPUs per core. Defaults to 0.
num_nodes (int, optional): The number of compute nodes to use for executing the task. Defaults to None.
exclusive (bool): Whether to exclusively reserve the compute nodes, or allow sharing compute notes. Defaults to False.
openmpi_oversubscribe (bool, optional): Whether to oversubscribe the cores. Defaults to False.
slurm_cmd_args (list[str], optional): Additional command line arguments. Defaults to [].
pmi_mode (str): PMI interface to use (OpenMPI v5 requires pmix) default is None

Returns:
list[str]: The generated command list.
"""
command_prepend_lst = [SLURM_COMMAND, "-n", str(cores)]
if cwd is not None:
command_prepend_lst += ["-D", cwd]
if pmi_mode is not None:
command_prepend_lst += ["--mpi=" + pmi_mode]
if num_nodes is not None:
command_prepend_lst += ["-N", str(num_nodes)]
if threads_per_core > 1:
command_prepend_lst += ["--cpus-per-task=" + str(threads_per_core)]
if gpus_per_core > 0:
command_prepend_lst += ["--gpus-per-task=" + str(gpus_per_core)]
if exclusive:
command_prepend_lst += ["--exact"]
if openmpi_oversubscribe:
command_prepend_lst += ["--oversubscribe"]
if slurm_cmd_args is not None and len(slurm_cmd_args) > 0:
command_prepend_lst += slurm_cmd_args
return command_prepend_lst
2 changes: 1 addition & 1 deletion tests/test_interactive_slurmspawner.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import unittest
from executorlib.task_scheduler.interactive.slurmspawner import generate_slurm_command
from executorlib.standalone.slurm_command import generate_slurm_command

try:
from executorlib.standalone.scheduler import pysqa_execute_command
Expand Down
Loading