Skip to content

Commit aa1e87e

Browse files
authored
Merge pull request #247 from pyiron/no_more_wait
Use queue.Queue().get() rather than queue.Queue().get_nowait()
2 parents c841760 + 38f20ab commit aa1e87e

File tree

6 files changed

+7
-30
lines changed

6 files changed

+7
-30
lines changed

pympipool/__init__.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@ class Executor:
3838
oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI only) - default False
3939
init_function (None): optional function to preset arguments for functions which are submitted later
4040
cwd (str/None): current working directory where the parallel python task is executed
41-
sleep_interval (float): synchronization interval - default 0.1
4241
hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the
4342
context of an HPC cluster this essential to be able to communicate to an
4443
Executor running on a different compute node within the same allocation. And
@@ -77,7 +76,6 @@ def __init__(
7776
oversubscribe=False,
7877
init_function=None,
7978
cwd=None,
80-
sleep_interval=0.1,
8179
executor=None,
8280
hostname_localhost=False,
8381
):
@@ -93,7 +91,6 @@ def __new__(
9391
oversubscribe=False,
9492
init_function=None,
9593
cwd=None,
96-
sleep_interval=0.1,
9794
executor=None,
9895
hostname_localhost=False,
9996
):
@@ -113,7 +110,6 @@ def __new__(
113110
oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI only) - default False
114111
init_function (None): optional function to preset arguments for functions which are submitted later
115112
cwd (str/None): current working directory where the parallel python task is executed
116-
sleep_interval (float): synchronization interval - default 0.1
117113
hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the
118114
context of an HPC cluster this essential to be able to communicate to an
119115
Executor running on a different compute node within the same allocation. And
@@ -136,7 +132,6 @@ def __new__(
136132
gpus_per_worker=gpus_per_worker,
137133
init_function=init_function,
138134
cwd=cwd,
139-
sleep_interval=sleep_interval,
140135
hostname_localhost=hostname_localhost,
141136
)
142137
elif slurm_installed:
@@ -145,7 +140,6 @@ def __new__(
145140
cores_per_worker=cores_per_worker,
146141
init_function=init_function,
147142
cwd=cwd,
148-
sleep_interval=sleep_interval,
149143
hostname_localhost=hostname_localhost,
150144
)
151145
else:
@@ -168,6 +162,5 @@ def __new__(
168162
cores_per_worker=cores_per_worker,
169163
init_function=init_function,
170164
cwd=cwd,
171-
sleep_interval=sleep_interval,
172165
hostname_localhost=hostname_localhost,
173166
)

pympipool/flux/executor.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ class PyFluxExecutor(ExecutorBase):
2626
gpus_per_worker (int): number of GPUs per worker - defaults to 0
2727
init_function (None): optional function to preset arguments for functions which are submitted later
2828
cwd (str/None): current working directory where the parallel python task is executed
29-
sleep_interval (float): synchronization interval - default 0.1
3029
executor (flux.job.FluxExecutor): Flux Python interface to submit the workers to flux
3130
hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the
3231
context of an HPC cluster this essential to be able to communicate to an
@@ -65,7 +64,6 @@ def __init__(
6564
gpus_per_worker=0,
6665
init_function=None,
6766
cwd=None,
68-
sleep_interval=0.1,
6967
executor=None,
7068
hostname_localhost=False,
7169
):
@@ -76,7 +74,6 @@ def __init__(
7674
# Broker Arguments
7775
"future_queue": self._future_queue,
7876
"max_workers": max_workers,
79-
"sleep_interval": sleep_interval,
8077
"hostname_localhost": hostname_localhost,
8178
"executor_class": PyFluxSingleTaskExecutor,
8279
# Executor Arguments

pympipool/mpi/executor.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ class PyMPIExecutor(ExecutorBase):
2222
oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI only) - default False
2323
init_function (None): optional function to preset arguments for functions which are submitted later
2424
cwd (str/None): current working directory where the parallel python task is executed
25-
sleep_interval (float): synchronization interval - default 0.1
2625
hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the
2726
context of an HPC cluster this essential to be able to communicate to an
2827
Executor running on a different compute node within the same allocation. And
@@ -59,7 +58,6 @@ def __init__(
5958
oversubscribe=False,
6059
init_function=None,
6160
cwd=None,
62-
sleep_interval=0.1,
6361
hostname_localhost=False,
6462
):
6563
super().__init__()
@@ -69,7 +67,6 @@ def __init__(
6967
# Broker Arguments
7068
"future_queue": self._future_queue,
7169
"max_workers": max_workers,
72-
"sleep_interval": sleep_interval,
7370
"executor_class": PyMPISingleTaskExecutor,
7471
"hostname_localhost": hostname_localhost,
7572
# Executor Arguments

pympipool/shared/executorbase.py

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
import os
88
import queue
99
import sys
10-
from time import sleep
1110

1211
import cloudpickle
1312

@@ -176,7 +175,6 @@ def executor_broker(
176175
future_queue,
177176
max_workers,
178177
executor_class,
179-
sleep_interval=0.1,
180178
**kwargs,
181179
):
182180
meta_future_lst = _get_executor_dict(
@@ -185,17 +183,14 @@ def executor_broker(
185183
**kwargs,
186184
)
187185
while True:
188-
try:
189-
task_dict = future_queue.get_nowait()
190-
except queue.Empty:
191-
sleep(sleep_interval)
186+
if execute_task_dict(
187+
task_dict=future_queue.get(), meta_future_lst=meta_future_lst
188+
):
189+
future_queue.task_done()
192190
else:
193-
if execute_task_dict(task_dict=task_dict, meta_future_lst=meta_future_lst):
194-
future_queue.task_done()
195-
else:
196-
future_queue.task_done()
197-
future_queue.join()
198-
break
191+
future_queue.task_done()
192+
future_queue.join()
193+
break
199194

200195

201196
def execute_task_dict(task_dict, meta_future_lst):

pympipool/shell/executor.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,6 @@ class SubprocessExecutor(ExecutorBase):
6767
6868
Args:
6969
max_workers (int): defines the number workers which can execute functions in parallel
70-
sleep_interval (float): synchronization interval - default 0.1
7170
7271
Examples:
7372
@@ -82,7 +81,6 @@ class SubprocessExecutor(ExecutorBase):
8281
def __init__(
8382
self,
8483
max_workers=1,
85-
sleep_interval=0.1,
8684
):
8785
super().__init__()
8886
self._process = RaisingThread(
@@ -91,7 +89,6 @@ def __init__(
9189
# Broker Arguments
9290
"future_queue": self._future_queue,
9391
"max_workers": max_workers,
94-
"sleep_interval": sleep_interval,
9592
"executor_class": SubprocessSingleExecutor,
9693
},
9794
)

pympipool/slurm/executor.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,6 @@ def __init__(
6262
oversubscribe=False,
6363
init_function=None,
6464
cwd=None,
65-
sleep_interval=0.1,
6665
hostname_localhost=False,
6766
):
6867
super().__init__()
@@ -72,7 +71,6 @@ def __init__(
7271
# Broker Arguments
7372
"future_queue": self._future_queue,
7473
"max_workers": max_workers,
75-
"sleep_interval": sleep_interval,
7674
"hostname_localhost": hostname_localhost,
7775
"executor_class": PySlurmSingleTaskExecutor,
7876
# Executor Arguments

0 commit comments

Comments
 (0)