-
Notifications
You must be signed in to change notification settings - Fork 5.5k
/
ForkJoinPool.java
4083 lines (3926 loc) · 179 KB
/
ForkJoinPool.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* This file is available under and governed by the GNU General Public
* License version 2 only, as published by the Free Software Foundation.
* However, the following notice accompanied the original version of this
* file:
*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package java.util.concurrent;
import java.lang.Thread.UncaughtExceptionHandler;
import java.lang.reflect.Field;
import java.security.AccessController;
import java.security.AccessControlContext;
import java.security.Permission;
import java.security.Permissions;
import java.security.PrivilegedAction;
import java.security.ProtectionDomain;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.function.Predicate;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.locks.LockSupport;
import jdk.internal.access.JavaLangAccess;
import jdk.internal.access.JavaUtilConcurrentFJPAccess;
import jdk.internal.access.SharedSecrets;
import jdk.internal.misc.Unsafe;
import jdk.internal.vm.SharedThreadContainer;
/**
* An {@link ExecutorService} for running {@link ForkJoinTask}s.
* A {@code ForkJoinPool} provides the entry point for submissions
* from non-{@code ForkJoinTask} clients, as well as management and
* monitoring operations.
*
* <p>A {@code ForkJoinPool} differs from other kinds of {@link
* ExecutorService} mainly by virtue of employing
* <em>work-stealing</em>: all threads in the pool attempt to find and
* execute tasks submitted to the pool and/or created by other active
* tasks (eventually blocking waiting for work if none exist). This
* enables efficient processing when most tasks spawn other subtasks
* (as do most {@code ForkJoinTask}s), as well as when many small
* tasks are submitted to the pool from external clients. Especially
* when setting <em>asyncMode</em> to true in constructors, {@code
* ForkJoinPool}s may also be appropriate for use with event-style
* tasks that are never joined. All worker threads are initialized
* with {@link Thread#isDaemon} set {@code true}.
*
* <p>A static {@link #commonPool()} is available and appropriate for
* most applications. The common pool is used by any ForkJoinTask that
* is not explicitly submitted to a specified pool. Using the common
* pool normally reduces resource usage (its threads are slowly
* reclaimed during periods of non-use, and reinstated upon subsequent
* use).
*
* <p>For applications that require separate or custom pools, a {@code
* ForkJoinPool} may be constructed with a given target parallelism
* level; by default, equal to the number of available processors.
* The pool attempts to maintain enough active (or available) threads
* by dynamically adding, suspending, or resuming internal worker
* threads, even if some tasks are stalled waiting to join others.
* However, no such adjustments are guaranteed in the face of blocked
* I/O or other unmanaged synchronization. The nested {@link
* ManagedBlocker} interface enables extension of the kinds of
* synchronization accommodated. The default policies may be
* overridden using a constructor with parameters corresponding to
* those documented in class {@link ThreadPoolExecutor}.
*
* <p>In addition to execution and lifecycle control methods, this
* class provides status check methods (for example
* {@link #getStealCount}) that are intended to aid in developing,
* tuning, and monitoring fork/join applications. Also, method
* {@link #toString} returns indications of pool state in a
* convenient form for informal monitoring.
*
* <p>As is the case with other ExecutorServices, there are three
* main task execution methods summarized in the following table.
* These are designed to be used primarily by clients not already
* engaged in fork/join computations in the current pool. The main
* forms of these methods accept instances of {@code ForkJoinTask},
* but overloaded forms also allow mixed execution of plain {@code
* Runnable}- or {@code Callable}- based activities as well. However,
* tasks that are already executing in a pool should normally instead
* use the within-computation forms listed in the table unless using
* async event-style tasks that are not usually joined, in which case
* there is little difference among choice of methods.
*
* <table class="plain">
* <caption>Summary of task execution methods</caption>
* <tr>
* <td></td>
* <th scope="col"> Call from non-fork/join clients</th>
* <th scope="col"> Call from within fork/join computations</th>
* </tr>
* <tr>
* <th scope="row" style="text-align:left"> Arrange async execution</th>
* <td> {@link #execute(ForkJoinTask)}</td>
* <td> {@link ForkJoinTask#fork}</td>
* </tr>
* <tr>
* <th scope="row" style="text-align:left"> Await and obtain result</th>
* <td> {@link #invoke(ForkJoinTask)}</td>
* <td> {@link ForkJoinTask#invoke}</td>
* </tr>
* <tr>
* <th scope="row" style="text-align:left"> Arrange exec and obtain Future</th>
* <td> {@link #submit(ForkJoinTask)}</td>
* <td> {@link ForkJoinTask#fork} (ForkJoinTasks <em>are</em> Futures)</td>
* </tr>
* </table>
*
* <p>The parameters used to construct the common pool may be controlled by
* setting the following {@linkplain System#getProperty system properties}:
* <ul>
* <li>{@systemProperty java.util.concurrent.ForkJoinPool.common.parallelism}
* - the parallelism level, a non-negative integer
* <li>{@systemProperty java.util.concurrent.ForkJoinPool.common.threadFactory}
* - the class name of a {@link ForkJoinWorkerThreadFactory}.
* The {@linkplain ClassLoader#getSystemClassLoader() system class loader}
* is used to load this class.
* <li>{@systemProperty java.util.concurrent.ForkJoinPool.common.exceptionHandler}
* - the class name of a {@link UncaughtExceptionHandler}.
* The {@linkplain ClassLoader#getSystemClassLoader() system class loader}
* is used to load this class.
* <li>{@systemProperty java.util.concurrent.ForkJoinPool.common.maximumSpares}
* - the maximum number of allowed extra threads to maintain target
* parallelism (default 256).
* </ul>
* If no thread factory is supplied via a system property, then the
* common pool uses a factory that uses the system class loader as the
* {@linkplain Thread#getContextClassLoader() thread context class loader}.
* In addition, if a {@link SecurityManager} is present, then
* the common pool uses a factory supplying threads that have no
* {@link Permissions} enabled, and are not guaranteed to preserve
* the values of {@link java.lang.ThreadLocal} variables across tasks.
*
* Upon any error in establishing these settings, default parameters
* are used. It is possible to disable or limit the use of threads in
* the common pool by setting the parallelism property to zero, and/or
* using a factory that may return {@code null}. However doing so may
* cause unjoined tasks to never be executed.
*
* @implNote This implementation restricts the maximum number of
* running threads to 32767. Attempts to create pools with greater
* than the maximum number result in {@code
* IllegalArgumentException}. Also, this implementation rejects
* submitted tasks (that is, by throwing {@link
* RejectedExecutionException}) only when the pool is shut down or
* internal resources have been exhausted.
*
* @since 1.7
* @author Doug Lea
*/
public class ForkJoinPool extends AbstractExecutorService {
/*
* Implementation Overview
*
* This class and its nested classes provide the main
* functionality and control for a set of worker threads. Because
* most internal methods and nested classes are interrelated,
* their main rationale and descriptions are presented here;
* individual methods and nested classes contain only brief
* comments about details. Broadly: submissions from non-FJ
* threads enter into submission queues. Workers take these tasks
* and typically split them into subtasks that may be stolen by
* other workers. Work-stealing based on randomized scans
* generally leads to better throughput than "work dealing" in
* which producers assign tasks to idle threads, in part because
* threads that have finished other tasks before the signalled
* thread wakes up (which can be a long time) can take the task
* instead. Preference rules give first priority to processing
* tasks from their own queues (LIFO or FIFO, depending on mode),
* then to randomized FIFO steals of tasks in other queues.
*
* This framework began as vehicle for supporting structured
* parallelism using work-stealing, designed to work best when
* tasks are dag-structured (wrt completion dependencies), nested
* (generated using recursion or completions), of reasonable
* granularity, independent (wrt memory and resources) and where
* callers participate in task execution. These are properties
* that anyone aiming for efficient parallel multicore execution
* should design for. Over time, the scalability advantages of
* this framework led to extensions to better support more diverse
* usage contexts, amounting to weakenings or violations of each
* of these properties. Accommodating them may compromise
* performance, but mechanics discussed below include tradeoffs
* attempting to arrange that no single performance issue dominates.
*
* Here's a brief history of major revisions, each also with other
* minor features and changes.
*
* 1. Only handle recursively structured computational tasks
* 2. Async (FIFO) mode and striped submission queues
* 3. Completion-based tasks (mainly CountedCompleters)
* 4. CommonPool and parallelStream support
* 5. InterruptibleTasks for externally submitted tasks
*
* Most changes involve adaptions of base algorithms using
* combinations of static and dynamic bitwise mode settings (both
* here and in ForkJoinTask), and subclassing of ForkJoinTask.
* There are a fair number of odd code constructions and design
* decisions for components that reside at the edge of Java vs JVM
* functionality.
*
* WorkQueues
* ==========
*
* Most operations occur within work-stealing queues (in nested
* class WorkQueue). These are special forms of Deques that
* support only three of the four possible end-operations -- push,
* pop, and poll (aka steal), under the further constraints that
* push and pop are called only from the owning thread (or, as
* extended here, under a lock), while poll may be called from
* other threads. (If you are unfamiliar with them, you probably
* want to read Herlihy and Shavit's book "The Art of
* Multiprocessor programming", chapter 16 describing these in
* more detail before proceeding.) The main work-stealing queue
* design is roughly similar to those in the papers "Dynamic
* Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
* (http://research.sun.com/scalable/pubs/index.html) and
* "Idempotent work stealing" by Michael, Saraswat, and Vechev,
* PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
* The main differences ultimately stem from GC requirements that
* we null out taken slots as soon as we can, to maintain as small
* a footprint as possible even in programs generating huge
* numbers of tasks. To accomplish this, we shift the CAS
* arbitrating pop vs poll (steal) from being on the indices
* ("base" and "top") to the slots themselves. These provide the
* primary required memory ordering -- see "Correct and Efficient
* Work-Stealing for Weak Memory Models" by Le, Pop, Cohen, and
* Nardelli, PPoPP 2013
* (http://www.di.ens.fr/~zappa/readings/ppopp13.pdf) for an
* analysis of memory ordering requirements in work-stealing
* algorithms similar to the one used here. We use per-operation
* ordered writes of various kinds for updates, but usually use
* explicit load fences for reads, to cover access of several
* fields of possibly several objects without further constraining
* read-by-read ordering.
*
* We also support a user mode in which local task processing is
* in FIFO, not LIFO order, simply by using a local version of
* poll rather than pop. This can be useful in message-passing
* frameworks in which tasks are never joined, although with
* increased contention among task producers and consumers. Also,
* the same data structure (and class) is used for "submission
* queues" (described below) holding externally submitted tasks,
* that differ only in that a lock (using field "phase"; see below) is
* required by external callers to push and pop tasks.
*
* Adding tasks then takes the form of a classic array push(task)
* in a circular buffer:
* q.array[q.top++ % length] = task;
*
* The actual code needs to null-check and size-check the array,
* uses masking, not mod, for indexing a power-of-two-sized array,
* enforces memory ordering, supports resizing, and possibly
* signals waiting workers to start scanning (described below),
* which requires stronger forms of order accesses.
*
* The pop operation (always performed by owner) is of the form:
* if ((task = getAndSet(q.array, (q.top-1) % length, null)) != null)
* decrement top and return task;
* If this fails, the queue is empty. This operation is one part
* of the nextLocalTask method, that instead does a local-poll
* in FIFO mode.
*
* The poll operation is, basically:
* if (CAS nonnull task t = q.array[k = q.base % length] to null)
* increment base and return task;
*
* However, there are several more cases that must be dealt with.
* Some of them are just due to asynchrony; others reflect
* contention and stealing policies. Stepping through them
* illustrates some of the implementation decisions in this class.
*
* * Slot k must be read with an acquiring read, which it must
* anyway to dereference and run the task if the (acquiring)
* CAS succeeds, but uses an explicit acquire fence to support
* the following rechecks even if the CAS is not attempted.
*
* * q.base may change between reading and using its value to
* index the slot. To avoid trying to use the wrong t, the
* index and slot must be reread (not necessarily immediately)
* until consistent, unless this is a local poll by owner, in
* which case this form of inconsistency can only appear as t
* being null, below.
*
* * Similarly, q.array may change (due to a resize), unless this
* is a local poll by owner. Otherwise, when t is present, this
* only needs consideration on CAS failure (since a CAS
* confirms the non-resized case.)
*
* * t may appear null because a previous poll operation has not
* yet incremented q.base, so the read is from an already-taken
* index. This form of stall reflects the non-lock-freedom of
* the poll operation. Stalls can be detected by observing that
* q.base doesn't change on repeated reads of null t and when
* no other alternatives apply, spin-wait for it to settle. To
* reduce producing these kinds of stalls by other stealers, we
* encourage timely writes to indices using otherwise
* unnecessarily strong writes.
*
* * The CAS may fail, in which case we may want to retry unless
* there is too much contention. One goal is to balance and
* spread out the many forms of contention that may be
* encountered across polling and other operations to avoid
* sustained performance degradations. Across all cases where
* alternatives exist, a bounded number of CAS misses or stalls
* are tolerated (for slots, ctl, and elsewhere described
* below) before taking alternative action. These may move
* contention or retries elsewhere, which is still preferable
* to single-point bottlenecks.
*
* * Even though the check "top == base" is quiescently accurate
* to determine whether a queue is empty, it is not of much use
* when deciding whether to try to poll or repoll after a
* failure. Both top and base may move independently, and both
* lag updates to the underlying array. To reduce memory
* contention, non-owners avoid reading the "top" when
* possible, by using one-ahead reads to check whether to
* repoll, relying on the fact that a non-empty queue does not
* have two null slots in a row, except in cases (resizes and
* shifts) that can be detected with a secondary recheck that
* is less likely to conflict with owner writes.
*
* The poll operations in q.poll(), runWorker(), helpJoin(), and
* elsewhere differ with respect to whether other queues are
* available to try, and the presence or nature of screening steps
* when only some kinds of tasks can be taken. When alternatives
* (or failing) is an option, they uniformly give up after
* bounded numbers of stalls and/or CAS failures, which reduces
* contention when too many workers are polling too few tasks.
* Overall, in the aggregate, we ensure probabilistic
* non-blockingness of work-stealing at least until checking
* quiescence (which is intrinsically blocking): If an attempted
* steal fails in these ways, a scanning thief chooses a different
* target to try next. In contexts where alternatives aren't
* available, and when progress conditions can be isolated to
* values of a single variable, simple spinloops (using
* Thread.onSpinWait) are used to reduce memory traffic.
*
* WorkQueues are also used in a similar way for tasks submitted
* to the pool. We cannot mix these tasks in the same queues used
* by workers. Instead, we randomly associate submission queues
* with submitting threads, using a form of hashing. The
* ThreadLocalRandom probe value serves as a hash code for
* choosing existing queues, and may be randomly repositioned upon
* contention with other submitters. In essence, submitters act
* like workers except that they are restricted to executing local
* tasks that they submitted (or when known, subtasks thereof).
* Insertion of tasks in shared mode requires a lock. We use only
* a simple spinlock (as one role of field "phase") because
* submitters encountering a busy queue move to a different
* position to use or create other queues. They (spin) block when
* registering new queues, or indirectly elsewhere, by revisiting
* later.
*
* Management
* ==========
*
* The main throughput advantages of work-stealing stem from
* decentralized control -- workers mostly take tasks from
* themselves or each other, at rates that can exceed a billion
* per second. Most non-atomic control is performed by some form
* of scanning across or within queues. The pool itself creates,
* activates (enables scanning for and running tasks),
* deactivates, blocks, and terminates threads, all with minimal
* central information. There are only a few properties that we
* can globally track or maintain, so we pack them into a small
* number of variables, often maintaining atomicity without
* blocking or locking. Nearly all essentially atomic control
* state is held in a few variables that are by far most often
* read (not written) as status and consistency checks. We pack as
* much information into them as we can.
*
* Field "ctl" contains 64 bits holding information needed to
* atomically decide to add, enqueue (on an event queue), and
* dequeue and release workers. To enable this packing, we
* restrict maximum parallelism to (1<<15)-1 (which is far in
* excess of normal operating range) to allow ids, counts, and
* their negations (used for thresholding) to fit into 16bit
* subfields.
*
* Field "runState" and per-WorkQueue field "phase" play similar
* roles, as lockable, versioned counters. Field runState also
* includes monotonic event bits (SHUTDOWN, STOP, and TERMINATED).
* The version tags enable detection of state changes (by
* comparing two reads) modulo bit wraparound. The bit range in
* each case suffices for purposes of determining quiescence,
* termination, avoiding ABA-like errors, and signal control, most
* of which are ultimately based on at most 15bit ranges (due to
* 32767 max total workers). RunState updates do not need to be
* atomic with respect to ctl updates, but because they are not,
* some care is required to avoid stalls. The seqLock properties
* detect changes and conditionally upgrade to coordinate with
* updates. It is typically held for less than a dozen
* instructions unless the queue array is being resized, during
* which contention is rare. To be conservative, lockRunState is
* implemented as a spin/sleep loop. Here and elsewhere spin
* constants are short enough to apply even on systems with few
* available processors. In addition to checking pool status,
* reads of runState sometimes serve as acquire fences before
* reading other fields.
*
* Field "parallelism" holds the target parallelism (normally
* corresponding to pool size). Users can dynamically reset target
* parallelism, but is only accessed when signalling or awaiting
* work, so only slowly has an effect in creating threads or
* letting them time out and terminate when idle.
*
* Array "queues" holds references to WorkQueues. It is updated
* (only during worker creation and termination) under the
* runState lock. It is otherwise concurrently readable but reads
* for use in scans (see below) are always prefaced by a volatile
* read of runState (or equivalent constructions), ensuring that
* its state is current at the point it is used (which is all we
* require). To simplify index-based operations, the array size is
* always a power of two, and all readers must tolerate null
* slots. Worker queues are at odd indices. Worker phase ids
* masked with SMASK match their index. Shared (submission) queues
* are at even indices. Grouping them together in this way aids in
* task scanning: At top-level, both kinds of queues should be
* sampled with approximately the same probability, which is
* simpler if they are all in the same array. But we also need to
* identify what kind they are without looking at them, leading to
* this odd/even scheme. One disadvantage is that there are
* usually many fewer submission queues, so there can be many
* wasted probes (null slots). But this is still cheaper than
* alternatives. Other loops over the queues array vary in origin
* and stride depending on whether they cover only submission
* (even) or worker (odd) queues or both, and whether they require
* randomness (in which case cyclically exhaustive strides may be
* used).
*
* All worker thread creation is on-demand, triggered by task
* submissions, replacement of terminated workers, and/or
* compensation for blocked workers. However, all other support
* code is set up to work with other policies. To ensure that we
* do not hold on to worker or task references that would prevent
* GC, all accesses to workQueues in waiting, signalling, and
* control methods are via indices into the queues array (which is
* one source of some of the messy code constructions here). In
* essence, the queues array serves as a weak reference
* mechanism. In particular, the stack top subfield of ctl stores
* indices, not references. Operations on queues obtained from
* these indices remain valid (with at most some unnecessary extra
* work) even if an underlying worker failed and was replaced by
* another at the same index. During termination, worker queue
* array updates are disabled.
*
* Queuing Idle Workers. Unlike HPC work-stealing frameworks, we
* cannot let workers spin indefinitely scanning for tasks when
* none can be found immediately, and we cannot start/resume
* workers unless there appear to be tasks available. On the
* other hand, we must quickly prod them into action when new
* tasks are submitted or generated. These latencies are mainly a
* function of JVM park/unpark (and underlying OS) performance,
* which can be slow and variable (even though usages are
* streamlined as much as possible). In many usages, ramp-up time
* is the main limiting factor in overall performance, which is
* compounded at program start-up by JIT compilation and
* allocation. On the other hand, throughput degrades when too
* many threads poll for too few tasks. (See below.)
*
* The "ctl" field atomically maintains total and "released"
* worker counts, plus the head of the available worker queue
* (actually stack, represented by the lower 32bit subfield of
* ctl). Released workers are those known to be scanning for
* and/or running tasks (we cannot accurately determine
* which). Unreleased ("available") workers are recorded in the
* ctl stack. These workers are made eligible for signalling by
* enqueuing in ctl (see method deactivate). This "queue" is a
* form of Treiber stack. This is ideal for activating threads in
* most-recently used order, and improves performance and
* locality, outweighing the disadvantages of being prone to
* contention and inability to release a worker unless it is
* topmost on stack. The top stack state holds the value of the
* "phase" field of the worker: its index and status, plus a
* version counter that, in addition to the count subfields (also
* serving as version stamps) provide protection against Treiber
* stack ABA effects.
*
* Creating workers. To create a worker, we pre-increment counts
* (serving as a reservation), and attempt to construct a
* ForkJoinWorkerThread via its factory. On starting, the new
* thread first invokes registerWorker, where it is assigned an
* index in the queues array (expanding the array if necessary).
* Upon any exception across these steps, or null return from
* factory, deregisterWorker adjusts counts and records
* accordingly. If a null return, the pool continues running with
* fewer than the target number workers. If exceptional, the
* exception is propagated, generally to some external caller.
*
* WorkQueue field "phase" encodes the queue array id in lower
* bits, and otherwise acts similarly to the pool runState field:
* The "IDLE" bit is clear while active (either a released worker
* or a locked external queue), with other bits serving as a
* version counter to distinguish changes across multiple reads.
* Note that phase field updates lag queue CAS releases; seeing a
* non-idle phase does not guarantee that the worker is available
* (and so is never checked in this way).
*
* The ctl field also serves as the basis for memory
* synchronization surrounding activation. This uses a more
* efficient version of a Dekker-like rule that task producers and
* consumers sync with each other by both writing/CASing ctl (even
* if to its current value). However, rather than CASing ctl to
* its current value in the common case where no action is
* required, we reduce write contention by ensuring that
* signalWork invocations are prefaced with a fully fenced memory
* access (which is usually needed anyway).
*
* Signalling. Signals (in signalWork) cause new or reactivated
* workers to scan for tasks. Method signalWork and its callers
* try to approximate the unattainable goal of having the right
* number of workers activated for the tasks at hand, but must err
* on the side of too many workers vs too few to avoid stalls:
*
* * If computations are purely tree structured, it suffices for
* every worker to activate another when it pushes a task into
* an empty queue, resulting in O(log(#threads)) steps to full
* activation. Emptiness must be conservatively approximated
* (by checking if there is apparently at most one existing
* task) which may result in unnecessary signals. Also, to
* reduce resource usages in some cases, at the expense of
* slower startup in others, activation of an idle thread is
* preferred over creating a new one, here and elsewhere.
*
* * At the other extreme, if "flat" tasks (those that do not in
* turn generate others) come in serially from only a single
* producer, each worker taking its first (since the last
* activation) task from a queue should propagate a signal if
* there are more tasks in that queue. This is equivalent to,
* but generally faster than, arranging the stealer take
* multiple tasks, re-pushing one or more on its own queue, and
* signalling (because its queue is empty), also resulting in
* logarithmic full activation time.
*
* * Because we don't know about usage patterns (or most commonly,
* mixtures), we use both approaches, which present even more
* opportunities to over-signal. (Failure to distinguish these
* cases in terms of submission methods was arguably an early
* design mistake.) Note that in either of these contexts,
* signals may be (and often are) unnecessary because active
* workers continue scanning after running tasks without the
* need to be signalled (which is one reason work stealing is
* often faster than alternatives), so additional workers
* aren't needed.
*
* * For rapidly branching tasks that require full pool resources,
* oversignalling is OK, because signalWork will soon have no
* more workers to create or reactivate. But for others (mainly
* externally submitted tasks), overprovisioning may cause very
* noticeable slowdowns due to contention and resource
* wastage. We reduce impact by deactivating workers when
* queues don't have accessible tasks, but reactivating and
* rescanning if other tasks remain.
*
* * Despite these, signal contention and overhead effects still
* occur during ramp-up and ramp-down of small computations.
*
* Scanning. Method runWorker performs top-level scanning for (and
* execution of) tasks by polling a pseudo-random permutation of
* the array (by starting at a given index, and using a constant
* cyclically exhaustive stride.) It uses the same basic polling
* method as WorkQueue.poll(), but restarts with a different
* permutation on each invocation. The pseudorandom generator
* need not have high-quality statistical properties in the long
* term. We use Marsaglia XorShifts, seeded with the Weyl sequence
* from ThreadLocalRandom probes, which are cheap and
* suffice. Each queue's polling attempts to avoid becoming stuck
* when other scanners/pollers stall. Scans do not otherwise
* explicitly take into account core affinities, loads, cache
* localities, etc, However, they do exploit temporal locality
* (which usually approximates these) by preferring to re-poll
* from the same queue after a successful poll before trying
* others, which also reduces bookkeeping, cache traffic, and
* scanning overhead. But it also reduces fairness, which is
* partially counteracted by giving up on detected interference
* (which also reduces contention when too many workers try to
* take small tasks from the same queue).
*
* Deactivation. When no tasks are found by a worker in runWorker,
* it tries to deactivate()), giving up (and rescanning) on "ctl"
* contention. To avoid missed signals during deactivation, the
* method rescans and reactivates if there may have been a missed
* signal during deactivation. Because idle workers are often not
* yet blocked (parked), we use a WorkQueue field to advertise
* that a waiter actually needs unparking upon signal.
*
* Quiescence. Workers scan looking for work, giving up when they
* don't find any, without being sure that none are available.
* However, some required functionality relies on consensus about
* quiescence (also termination, discussed below). The count
* fields in ctl allow accurate discovery of states in which all
* workers are idle. However, because external (asynchronous)
* submitters are not part of this vote, these mechanisms
* themselves do not guarantee that the pool is in a quiescent
* state with respect to methods isQuiescent, shutdown (which
* begins termination when quiescent), helpQuiesce, and indirectly
* others including tryCompensate. Method quiescent() is used in
* all of these contexts. It provides checks that all workers are
* idle and there are no submissions that they could poll if they
* were not idle, retrying on inconsistent reads of queues and
* using the runState seqLock to retry on queue array updates.
* (It also reports quiescence if the pool is terminating.) A true
* report means only that there was a moment at which quiescence
* held. False negatives are inevitable (for example when queues
* indices lag updates, as described above), which is accommodated
* when (tentatively) idle by scanning for work etc, and then
* re-invoking. This includes cases in which the final unparked
* thread (in deactivate()) uses quiescent() to check for tasks
* that could have been added during a race window that would not
* be accompanied by a signal, in which case re-activating itself
* (or any other worker) to rescan. Method helpQuiesce acts
* similarly but cannot rely on ctl counts to determine that all
* workers are inactive because the caller and any others
* executing helpQuiesce are not included in counts.
*
* Termination. A call to shutdownNow invokes tryTerminate to
* atomically set a runState mode bit. However, the process of
* termination is intrinsically non-atomic. The calling thread, as
* well as other workers thereafter terminating help cancel queued
* tasks and interrupt other workers. These actions race with
* unterminated workers. By default, workers check for
* termination only when accessing pool state. This may take a
* while but suffices for structured computational tasks. But not
* necessarily for others. Class InterruptibleTask (see below)
* further arranges runState checks before executing task bodies,
* and ensures interrupts while terminating. Even so, there are no
* guarantees after an abrupt shutdown that remaining tasks
* complete normally or exceptionally or are cancelled.
* Termination may fail to complete if running tasks ignore both
* task status and interrupts and/or produce more tasks after
* others that could cancel them have exited.
*
* Trimming workers. To release resources after periods of lack of
* use, a worker starting to wait when the pool is quiescent will
* time out and terminate if the pool has remained quiescent for
* period given by field keepAlive (default 60sec), which applies
* to the first timeout of a quiescent pool. Subsequent cases use
* minimal delays such that, if still quiescent, all will be
* released soon thereafter. This is checked by setting the
* "source" field of signallee to an invalid value, that will
* remain invalid only if it did not process any tasks.
*
* Joining Tasks
* =============
*
* The "Join" part of ForkJoinPools consists of a set of
* mechanisms that sometimes or always (depending on the kind of
* task) avoid context switching or adding worker threads when one
* task would otherwise be blocked waiting for completion of
* another, basically, just by running that task or one of its
* subtasks if not already taken. These mechanics are disabled for
* InterruptibleTasks, that guarantee that callers do not execute
* submitted tasks.
*
* The basic structure of joining is an extended spin/block scheme
* in which workers check for task completions status between
* steps to find other work, until relevant pool state stabilizes
* enough to believe that no such tasks are available, at which
* point blocking. This is usually a good choice of when to block
* that would otherwise be harder to approximate.
*
* These forms of helping may increase stack space usage, but that
* space is bounded in tree/dag structured procedurally parallel
* designs to be no more than that if a task were executed only by
* the joining thread. This is arranged by associated task
* subclasses that also help detect and control the ways in which
* this may occur.
*
* Normally, the first option when joining a task that is not done
* is to try to take it from the local queue and run it. Method
* tryRemoveAndExec tries to do so. For tasks with any form of
* subtasks that must be completed first, we try to locate these
* subtasks and run them as well. This is easy when local, but
* when stolen, steal-backs are restricted to the same rules as
* stealing (polling), which requires additional bookkeeping and
* scanning. This cost is still very much worthwhile because of
* its impact on task scheduling and resource control.
*
* The two methods for finding and executing subtasks vary in
* details. The algorithm in helpJoin entails a form of "linear
* helping". Each worker records (in field "source") the index of
* the internal queue from which it last stole a task. (Note:
* because chains cannot include even-numbered external queues,
* they are ignored, and 0 is an OK default. However, the source
* field is set anyway, or eventually to DROPPED, to ensure
* volatile memory synchronization effects.) The scan in method
* helpJoin uses these markers to try to find a worker to help
* (i.e., steal back a task from and execute it) that could make
* progress toward completion of the actively joined task. Thus,
* the joiner executes a task that would be on its own local deque
* if the to-be-joined task had not been stolen. This is a
* conservative variant of the approach described in Wagner &
* Calder "Leapfrogging: a portable technique for implementing
* efficient futures" SIGPLAN Notices, 1993
* (http://portal.acm.org/citation.cfm?id=155354). It differs
* mainly in that we only record queues, not full dependency
* links. This requires a linear scan of the queues to locate
* stealers, but isolates cost to when it is needed, rather than
* adding to per-task overhead. For CountedCompleters, the
* analogous method helpComplete doesn't need stealer-tracking,
* but requires a similar (but simpler) check of completion
* chains.
*
* In either case, searches can fail to locate stealers when
* stalls delay recording sources or issuing subtasks. We avoid
* some of these cases by using snapshotted values of ctl as a
* check that the numbers of workers are not changing, along with
* rescans to deal with contention and stalls. But even when
* accurately identified, stealers might not ever produce a task
* that the joiner can in turn help with.
*
* Related method helpAsyncBlocker does not directly rely on
* subtask structure, but instead avoids or postpones blocking of
* tagged tasks (CompletableFuture.AsynchronousCompletionTask) by
* executing other asyncs that can be processed in any order.
* This is currently invoked only in non-join-based blocking
* contexts from classes CompletableFuture and
* SubmissionPublisher, that could be further generalized.
*
* When any of the above fail to avoid blocking, we rely on
* "compensation" -- an indirect form of context switching that
* either activates an existing worker to take the place of the
* blocked one, or expands the number of workers.
*
* Compensation does not by default aim to keep exactly the target
* parallelism number of unblocked threads running at any given
* time. Some previous versions of this class employed immediate
* compensations for any blocked join. However, in practice, the
* vast majority of blockages are transient byproducts of GC and
* other JVM or OS activities that are made worse by replacement
* by causing longer-term oversubscription. These are inevitable
* without (unobtainably) perfect information about whether worker
* creation is actually necessary. False alarms are common enough
* to negatively impact performance, so compensation is by default
* attempted only when it appears possible that the pool could
* stall due to lack of any unblocked workers. However, we allow
* users to override defaults using the long form of the
* ForkJoinPool constructor. The compensation mechanism may also
* be bounded. Bounds for the commonPool better enable JVMs to
* cope with programming errors and abuse before running out of
* resources to do so.
*
* The ManagedBlocker extension API can't use helping so relies
* only on compensation in method awaitBlocker. This API was
* designed to highlight the uncertainty of compensation decisions
* by requiring implementation of method isReleasable to abort
* compensation during attempts to obtain a stable snapshot. But
* users now rely upon the fact that if isReleasable always
* returns false, the API can be used to obtain precautionary
* compensation, which is sometimes the only reasonable option
* when running unknown code in tasks; which is now supported more
* simply (see method beginCompensatedBlock).
*
* Common Pool
* ===========
*
* The static common pool always exists after static
* initialization. Since it (or any other created pool) need
* never be used, we minimize initial construction overhead and
* footprint to the setup of about a dozen fields, although with
* some System property parsing and security processing that takes
* far longer than the actual construction when SecurityManagers
* are used or properties are set. The common pool is
* distinguished by having a null workerNamePrefix (which is an
* odd convention, but avoids the need to decode status in factory
* classes). It also has PRESET_SIZE config set if parallelism
* was configured by system property.
*
* When external threads use the common pool, they can perform
* subtask processing (see helpComplete and related methods) upon
* joins, unless they are submitted using ExecutorService
* submission methods, which implicitly disallow this. This
* caller-helps policy makes it sensible to set common pool
* parallelism level to one (or more) less than the total number
* of available cores, or even zero for pure caller-runs. External
* threads waiting for joins first check the common pool for their
* task, which fails quickly if the caller did not fork to common
* pool.
*
* Guarantees for common pool parallelism zero are limited to
* tasks that are joined by their callers in a tree-structured
* fashion or use CountedCompleters (as is true for jdk
* parallelStreams). Support infiltrates several methods,
* including those that retry helping steps until we are sure that
* none apply if there are no workers.
*
* As a more appropriate default in managed environments, unless
* overridden by system properties, we use workers of subclass
* InnocuousForkJoinWorkerThread when there is a SecurityManager
* present. These workers have no permissions set, do not belong
* to any user-defined ThreadGroup, and clear all ThreadLocals
* after executing any top-level task. The associated mechanics
* may be JVM-dependent and must access particular Thread class
* fields to achieve this effect.
*
* InterruptibleTasks
* ====================
*
* Regular ForkJoinTasks manage task cancellation (method cancel)
* independently from the interrupt status of threads running
* tasks. Interrupts are issued internally only while
* terminating, to wake up workers and cancel queued tasks. By
* default, interrupts are cleared only when necessary to ensure
* that calls to LockSupport.park do not loop indefinitely (park
* returns immediately if the current thread is interrupted).
*
* To comply with ExecutorService specs, we use subclasses of
* abstract class InterruptibleTask for tasks that require
* stronger interruption and cancellation guarantees. External
* submitters never run these tasks, even if in the common pool.
* InterruptibleTasks include a "runner" field (implemented
* similarly to FutureTask) to support cancel(true). Upon pool
* shutdown, runners are interrupted so they can cancel. Since
* external joining callers never run these tasks, they must await
* cancellation by others, which can occur along several different
* paths.
*
* Across these APIs, rules for reporting exceptions for tasks
* with results accessed via join() differ from those via get(),
* which differ from those invoked using pool submit methods by
* non-workers (which comply with Future.get() specs). Internal
* usages of ForkJoinTasks ignore interrupt status when executing
* or awaiting completion. Otherwise, reporting task results or
* exceptions is preferred to throwing InterruptedExceptions,
* which are in turn preferred to timeouts. Similarly, completion
* status is preferred to reporting cancellation. Cancellation is
* reported as an unchecked exception by join(), and by worker
* calls to get(), but is otherwise wrapped in a (checked)
* ExecutionException.
*
* Worker Threads cannot be VirtualThreads, as enforced by
* requiring ForkJoinWorkerThreads in factories. There are
* several constructions relying on this. However as of this
* writing, virtual thread bodies are by default run as some form
* of InterruptibleTask.
*
* Memory placement
* ================
*
* Performance is very sensitive to placement of instances of
* ForkJoinPool and WorkQueues and their queue arrays, as well as
* the placement of their fields. Caches misses and contention due
* to false-sharing have been observed to slow down some programs
* by more than a factor of four. Effects may vary across initial
* memory configuarations, applications, and different garbage
* collectors and GC settings, so there is no perfect solution.
* Too much isolation may generate more cache misses in common
* cases (because some fields snd slots are usually read at the
* same time). The @Contended annotation provides only rough
* control (for good reason). Similarly for relying on fields
* being placed in size-sorted declaration order.
*
* We isolate the ForkJoinPool.ctl field that otherwise causes the
* most false-sharing misses with respect to other fields. Also,
* ForkJoinPool fields are ordered such that fields less prone to
* contention effects are first, offsetting those that otherwise
* would be, while also reducing total footprint vs using
* multiple @Contended regions, which tends to slow down
* less-contended applications. To help arrange this, some
* non-reference fields are declared as "long" even when ints or
* shorts would suffice. For class WorkQueue, an
* embedded @Contended region segregates fields most heavily
* updated by owners from those most commonly read by stealers or
* other management. For class WorkQueue, an embedded padded
* region segregates fields (all declared as "int") most heavily
* updated by owners from those most commonly read by stealers or
* other management.
*
* Initial sizing and resizing of WorkQueue arrays is an even more
* delicate tradeoff because the best strategy systematically
* varies across garbage collectors. Small arrays are better for
* locality and reduce GC scan time, but large arrays reduce both
* direct false-sharing and indirect cases due to GC bookkeeping
* (cardmarks etc), and reduce the number of resizes, which are
* not especially fast because they require atomic transfers.
* Currently, arrays for workers are initialized to be just large
* enough to avoid resizing in most tree-structured tasks, but
* larger for external queues where both false-sharing problems
* and the need for resizing are more common. (Maintenance note:
* any changes in fields, queues, or their uses, or JVM layout
* policies, must be accompanied by re-evaluation of these
* placement and sizing decisions.)
*
* Style notes
* ===========
*
* Memory ordering relies mainly on atomic operations (CAS,
* getAndSet, getAndAdd) along with moded accesses. These use
* jdk-internal Unsafe for atomics and special memory modes,
* rather than VarHandles, to avoid initialization dependencies in
* other jdk components that require early parallelism. This can
* be awkward and ugly, but also reflects the need to control
* outcomes across the unusual cases that arise in very racy code
* with very few invariants. All atomic task slot updates use
* Unsafe operations requiring offset positions, not indices, as
* computed by method slotOffset. All fields are read into locals
* before use, and null-checked if they are references, even if
* they can never be null under current usages. Usually,
* computations (held in local variables) are defined as soon as
* logically enabled, sometimes to convince compilers that they
* may be performed despite memory ordering constraints. Array
* accesses using masked indices include checks (that are always
* true) that the array length is non-zero to avoid compilers
* inserting more expensive traps. This is usually done in a
* "C"-like style of listing declarations at the heads of methods
* or blocks, and using inline assignments on first encounter.
* Nearly all explicit checks lead to bypass/return, not exception
* throws, because they may legitimately arise during shutdown. A
* few unusual loop constructions encourage (with varying
* effectiveness) JVMs about where (not) to place safepoints.
*
* There is a lot of representation-level coupling among classes
* ForkJoinPool, ForkJoinWorkerThread, and ForkJoinTask. The
* fields of WorkQueue maintain data structures managed by
* ForkJoinPool, so are directly accessed. There is little point
* trying to reduce this, since any associated future changes in
* representations will need to be accompanied by algorithmic
* changes anyway. Several methods intrinsically sprawl because
* they must accumulate sets of consistent reads of fields held in
* local variables. Some others are artificially broken up to
* reduce producer/consumer imbalances due to dynamic compilation.
* There are also other coding oddities (including several
* unnecessary-looking hoisted null checks) that help some methods
* perform reasonably even when interpreted (not compiled).
*
* The order of declarations in this file is (with a few exceptions):
* (1) Static configuration constants
* (2) Static utility functions
* (3) Nested (static) classes
* (4) Fields, along with constants used when unpacking some of them
* (5) Internal control methods
* (6) Callbacks and other support for ForkJoinTask methods
* (7) Exported methods
* (8) Static block initializing statics in minimally dependent order
*
*/
// static configuration constants
/**
* Default idle timeout value (in milliseconds) for idle threads
* to park waiting for new work before terminating.
*/
static final long DEFAULT_KEEPALIVE = 60_000L;
/**
* Undershoot tolerance for idle timeouts, also serving as the
* minimum allowed timeout value.
*/
static final long TIMEOUT_SLOP = 20L;
/**
* The default value for common pool maxSpares. Overridable using
* the "java.util.concurrent.ForkJoinPool.common.maximumSpares"
* system property. The default value is far in excess of normal
* requirements, but also far short of maximum capacity and typical OS
* thread limits, so allows JVMs to catch misuse/abuse before
* running out of resources needed to do so.
*/
static final int DEFAULT_COMMON_MAX_SPARES = 256;
/**
* Initial capacity of work-stealing queue array for workers.
* Must be a power of two, at least 2. See above.
*/