@@ -92,14 +92,27 @@ class GenericThreadPool final
92
92
93
93
private:
94
94
/* *
95
- * @brief getWorker Obtain a reference to the local thread's associated worker,
96
- * otherwise return the next worker in the round robin.
95
+ * @brief post Try post job to thread pool.
96
+ * @param handler Handler to be called from thread pool worker. It has
97
+ * to be callable as 'handler()'.
98
+ * @param failedWakeupRetryCap The number of retries to perform when worker
99
+ * wakeup fails.
100
+ * @return 'true' on success, false otherwise.
101
+ * @note All exceptions thrown by handler will be suppressed.
97
102
*/
98
- Worker<Task, Queue>& getWorker ();
103
+ template <typename Handler>
104
+ bool tryPostImpl (Handler&& handler, size_t failedWakeupRetryCap);
105
+
106
+ /* *
107
+ * @brief getWorker Obtain the id of the local thread's associated worker,
108
+ * otherwise return the next worker id in the round robin.
109
+ */
110
+ size_t getWorkerId ();
99
111
100
112
SlottedBag<Queue> m_idle_workers;
101
113
WorkerVector m_workers;
102
114
Rouser m_rouser;
115
+ size_t m_failed_wakeup_retry_cap;
103
116
std::atomic<size_t > m_next_worker;
104
117
std::atomic<size_t > m_num_busy_waiters;
105
118
};
@@ -112,6 +125,7 @@ inline GenericThreadPool<Task, Queue>::GenericThreadPool(ThreadPoolOptions optio
112
125
: m_idle_workers(options.threadCount())
113
126
, m_workers(options.threadCount())
114
127
, m_rouser(options.rousePeriod())
128
+ , m_failed_wakeup_retry_cap(options.failedWakeupRetryCap())
115
129
, m_next_worker(0 )
116
130
, m_num_busy_waiters(0 )
117
131
{
@@ -140,6 +154,7 @@ inline GenericThreadPool<Task, Queue>& GenericThreadPool<Task, Queue>::operator=
140
154
m_idle_workers = std::move (rhs.m_idle_workers );
141
155
m_workers = std::move (rhs.m_workers );
142
156
m_rouser = std::move (rhs.m_rouser );
157
+ m_failed_wakeup_retry_cap = rhs.m_failed_wakeup_retry_cap ;
143
158
m_next_worker = rhs.m_next_worker .load ();
144
159
m_num_busy_waiters = rhs.m_num_busy_waiters .load ();
145
160
}
@@ -159,6 +174,22 @@ inline GenericThreadPool<Task, Queue>::~GenericThreadPool()
159
174
template <typename Task, template <typename > class Queue >
160
175
template <typename Handler>
161
176
inline bool GenericThreadPool<Task, Queue>::tryPost(Handler&& handler)
177
+ {
178
+ return tryPostImpl (std::forward<Handler>(handler), m_failed_wakeup_retry_cap);
179
+ }
180
+
181
+ template <typename Task, template <typename > class Queue >
182
+ template <typename Handler>
183
+ inline void GenericThreadPool<Task, Queue>::post(Handler&& handler)
184
+ {
185
+ const auto ok = tryPost (std::forward<Handler>(handler));
186
+ if (!ok)
187
+ throw std::runtime_error (" Thread pool queue is full." );
188
+ }
189
+
190
+ template <typename Task, template <typename > class Queue >
191
+ template <typename Handler>
192
+ inline bool GenericThreadPool<Task, Queue>::tryPostImpl(Handler&& handler, size_t failedWakeupRetryCap)
162
193
{
163
194
// This section of the code increases the probability that our thread pool
164
195
// is fully utilized (num active workers = argmin(num tasks, num total workers)).
@@ -169,56 +200,59 @@ inline bool GenericThreadPool<Task, Queue>::tryPost(Handler&& handler)
169
200
auto result = m_idle_workers.tryEmptyAny ();
170
201
if (result.first )
171
202
{
172
- if (m_workers[result.second ]->tryPost (std::forward<Handler>(handler)))
173
- {
174
- m_workers[result.second ]->wake ();
175
- return true ;
176
- }
203
+ auto success = m_workers[result.second ]->tryPost (std::forward<Handler>(handler));
204
+ m_workers[result.second ]->wake ();
177
205
178
- // If post is unsuccessful, we need to re-add the worker to the idle worker bag.
179
- m_idle_workers.fill (result.second );
180
- return false ;
206
+ // The above post will only fail if the idle worker's queue is full, which is an extremely
207
+ // low probability scenario. In that case, we wake the worker and let it get to work on
208
+ // processing the items in its queue. We then re-try posting our current task.
209
+ if (success)
210
+ return true ;
211
+ else if (failedWakeupRetryCap > 0 )
212
+ return tryPostImpl (std::forward<Handler>(handler), failedWakeupRetryCap - 1 );
181
213
}
182
214
}
183
215
184
216
// No idle threads. Our threads are either active or busy waiting
185
217
// Either way, submit the work item in a round robin fashion.
186
- if (!getWorker ().tryPost (std::forward<Handler>(handler)))
187
- return false ; // Worker's task queue is full.
188
-
189
- // The following section increases the probability that tasks will not be dropped.
190
- // This is a soft constraint, the strict task dropping bound is covered by the Rouser
191
- // thread's functionality. This code experimentally lowers task response time under
192
- // low thread pool utilization without incurring significant performance penalties at
193
- // high thread pool utilization.
194
- if (m_num_busy_waiters.load (std::memory_order_acquire) == 0 )
218
+ auto id = getWorkerId ();
219
+ auto initialWorkerId = id;
220
+ do
195
221
{
196
- auto result = m_idle_workers.tryEmptyAny ();
197
- if (result.first )
198
- m_workers[result.second ]->wake ();
199
- }
222
+ if (m_workers[id]->tryPost (std::forward<Handler>(handler)))
223
+ {
224
+ // The following section increases the probability that tasks will not be dropped.
225
+ // This is a soft constraint, the strict task dropping bound is covered by the Rouser
226
+ // thread's functionality. This code experimentally lowers task response time under
227
+ // low thread pool utilization without incurring significant performance penalties at
228
+ // high thread pool utilization.
229
+ if (m_num_busy_waiters.load (std::memory_order_acquire) == 0 )
230
+ {
231
+ auto result = m_idle_workers.tryEmptyAny ();
232
+ if (result.first )
233
+ m_workers[result.second ]->wake ();
234
+ }
200
235
201
- return true ;
202
- }
236
+ return true ;
237
+ }
203
238
204
- template <typename Task, template <typename > class Queue >
205
- template <typename Handler>
206
- inline void GenericThreadPool<Task, Queue>::post(Handler&& handler)
207
- {
208
- const auto ok = tryPost (std::forward<Handler>(handler));
209
- if (!ok)
210
- throw std::runtime_error (" Thread pool queue is full." );
239
+ ++id %= m_workers.size ();
240
+ } while (id != initialWorkerId);
241
+
242
+ // All Queues in our thread pool are full during one whole iteration.
243
+ // We consider this a posting failure case.
244
+ return false ;
211
245
}
212
246
213
247
template <typename Task, template <typename > class Queue >
214
- inline Worker<Task, Queue>& GenericThreadPool<Task, Queue>::getWorker ()
248
+ inline size_t GenericThreadPool<Task, Queue>::getWorkerId ()
215
249
{
216
250
auto id = Worker<Task, Queue>::getWorkerIdForCurrentThread ();
217
251
218
252
if (id > m_workers.size ())
219
253
id = m_next_worker.fetch_add (1 , std::memory_order_relaxed) % m_workers.size ();
220
254
221
- return *m_workers[id] ;
255
+ return id ;
222
256
}
223
257
224
258
}
0 commit comments