Skip to content

Commit 0b9b1df

Browse files
committed
Fix format and tidy for code moved from rayon
1 parent 35c5144 commit 0b9b1df

File tree

25 files changed

+190
-328
lines changed

25 files changed

+190
-328
lines changed

compiler/rustc_thread_pool/src/broadcast/mod.rs

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,11 @@
1-
use crate::job::{ArcJob, StackJob};
2-
use crate::latch::{CountLatch, LatchRef};
3-
use crate::registry::{Registry, WorkerThread};
41
use std::fmt;
52
use std::marker::PhantomData;
63
use std::sync::Arc;
74

5+
use crate::job::{ArcJob, StackJob};
6+
use crate::latch::{CountLatch, LatchRef};
7+
use crate::registry::{Registry, WorkerThread};
8+
89
mod test;
910

1011
/// Executes `op` within every thread in the current threadpool. If this is
@@ -53,10 +54,7 @@ impl<'a> BroadcastContext<'a> {
5354
pub(super) fn with<R>(f: impl FnOnce(BroadcastContext<'_>) -> R) -> R {
5455
let worker_thread = WorkerThread::current();
5556
assert!(!worker_thread.is_null());
56-
f(BroadcastContext {
57-
worker: unsafe { &*worker_thread },
58-
_marker: PhantomData,
59-
})
57+
f(BroadcastContext { worker: unsafe { &*worker_thread }, _marker: PhantomData })
6058
}
6159

6260
/// Our index amongst the broadcast threads (ranges from `0..self.num_threads()`).
@@ -108,9 +106,8 @@ where
108106
let current_thread = WorkerThread::current().as_ref();
109107
let tlv = crate::tlv::get();
110108
let latch = CountLatch::with_count(n_threads, current_thread);
111-
let jobs: Vec<_> = (0..n_threads)
112-
.map(|_| StackJob::new(tlv, &f, LatchRef::new(&latch)))
113-
.collect();
109+
let jobs: Vec<_> =
110+
(0..n_threads).map(|_| StackJob::new(tlv, &f, LatchRef::new(&latch))).collect();
114111
let job_refs = jobs.iter().map(|job| job.as_job_ref());
115112

116113
registry.inject_broadcast(job_refs);

compiler/rustc_thread_pool/src/broadcast/test.rs renamed to compiler/rustc_thread_pool/src/broadcast/tests.rs

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
#![cfg(test)]
22

3-
use crate::ThreadPoolBuilder;
3+
use std::sync::Arc;
44
use std::sync::atomic::{AtomicUsize, Ordering};
55
use std::sync::mpsc::channel;
6-
use std::sync::Arc;
76
use std::{thread, time};
87

8+
use crate::ThreadPoolBuilder;
9+
910
#[test]
1011
fn broadcast_global() {
1112
let v = crate::broadcast(|ctx| ctx.index());

compiler/rustc_thread_pool/src/job.rs

Lines changed: 11 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
1-
use crate::latch::Latch;
2-
use crate::tlv;
3-
use crate::tlv::Tlv;
4-
use crate::unwind;
5-
use crossbeam_deque::{Injector, Steal};
61
use std::any::Any;
72
use std::cell::UnsafeCell;
83
use std::mem;
94
use std::sync::Arc;
105

6+
use crossbeam_deque::{Injector, Steal};
7+
8+
use crate::latch::Latch;
9+
use crate::tlv::Tlv;
10+
use crate::{tlv, unwind};
11+
1112
pub(super) enum JobResult<T> {
1213
None,
1314
Ok(T),
@@ -29,7 +30,7 @@ pub(super) trait Job {
2930
/// Effectively a Job trait object. Each JobRef **must** be executed
3031
/// exactly once, or else data may leak.
3132
///
32-
/// Internally, we store the job's data in a `*const ()` pointer. The
33+
/// Internally, we store the job's data in a `*const ()` pointer. The
3334
/// true type is something like `*const StackJob<...>`, but we hide
3435
/// it. We also carry the "execute fn" from the `Job` trait.
3536
pub(super) struct JobRef {
@@ -48,10 +49,7 @@ impl JobRef {
4849
T: Job,
4950
{
5051
// erase types:
51-
JobRef {
52-
pointer: data as *const (),
53-
execute_fn: <T as Job>::execute,
54-
}
52+
JobRef { pointer: data as *const (), execute_fn: <T as Job>::execute }
5553
}
5654

5755
/// Returns an opaque handle that can be saved and compared,
@@ -69,7 +67,7 @@ impl JobRef {
6967

7068
/// A job that will be owned by a stack slot. This means that when it
7169
/// executes it need not free any heap data, the cleanup occurs when
72-
/// the stack frame is later popped. The function parameter indicates
70+
/// the stack frame is later popped. The function parameter indicates
7371
/// `true` if the job was stolen -- executed on a different thread.
7472
pub(super) struct StackJob<L, F, R>
7573
where
@@ -248,13 +246,11 @@ pub(super) struct JobFifo {
248246

249247
impl JobFifo {
250248
pub(super) fn new() -> Self {
251-
JobFifo {
252-
inner: Injector::new(),
253-
}
249+
JobFifo { inner: Injector::new() }
254250
}
255251

256252
pub(super) unsafe fn push(&self, job_ref: JobRef) -> JobRef {
257-
// A little indirection ensures that spawns are always prioritized in FIFO order. The
253+
// A little indirection ensures that spawns are always prioritized in FIFO order. The
258254
// jobs in a thread's deque may be popped from the back (LIFO) or stolen from the front
259255
// (FIFO), but either way they will end up popping from the front of this queue.
260256
self.inner.push(job_ref);

compiler/rustc_thread_pool/src/join/mod.rs

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,10 @@
1+
use std::any::Any;
2+
13
use crate::job::StackJob;
24
use crate::latch::SpinLatch;
35
use crate::registry::{self, WorkerThread};
46
use crate::tlv::{self, Tlv};
5-
use crate::unwind;
6-
use std::any::Any;
7-
8-
use crate::FnContext;
7+
use crate::{FnContext, unwind};
98

109
#[cfg(test)]
1110
mod test;
@@ -22,7 +21,7 @@ mod test;
2221
/// it.
2322
///
2423
/// When `join` is called from outside the thread pool, the calling
25-
/// thread will block while the closures execute in the pool. When
24+
/// thread will block while the closures execute in the pool. When
2625
/// `join` is called within the pool, the calling thread still actively
2726
/// participates in the thread pool. It will begin by executing closure
2827
/// A (on the current thread). While it is doing that, it will advertise
@@ -80,13 +79,13 @@ mod test;
8079
/// CPU-bound tasks that do not perform I/O or other blocking
8180
/// operations. If you do perform I/O, and that I/O should block
8281
/// (e.g., waiting for a network request), the overall performance may
83-
/// be poor. Moreover, if you cause one closure to be blocked waiting
82+
/// be poor. Moreover, if you cause one closure to be blocked waiting
8483
/// on another (for example, using a channel), that could lead to a
8584
/// deadlock.
8685
///
8786
/// # Panics
8887
///
89-
/// No matter what happens, both closures will always be executed. If
88+
/// No matter what happens, both closures will always be executed. If
9089
/// a single closure panics, whether it be the first or second
9190
/// closure, that panic will be propagated and hence `join()` will
9291
/// panic with the same panic value. If both closures panic, `join()`
@@ -109,7 +108,7 @@ where
109108
/// Identical to `join`, except that the closures have a parameter
110109
/// that provides context for the way the closure has been called,
111110
/// especially indicating whether they're executing on a different
112-
/// thread than where `join_context` was called. This will occur if
111+
/// thread than where `join_context` was called. This will occur if
113112
/// the second job is stolen by a different thread, or if
114113
/// `join_context` was called from outside the thread pool to begin
115114
/// with.
@@ -148,7 +147,7 @@ where
148147
};
149148

150149
// Now that task A has finished, try to pop job B from the
151-
// local stack. It may already have been popped by job A; it
150+
// local stack. It may already have been popped by job A; it
152151
// may also have been stolen. There may also be some tasks
153152
// pushed on top of it in the stack, and we will have to pop
154153
// those off to get to it.

compiler/rustc_thread_pool/src/join/test.rs renamed to compiler/rustc_thread_pool/src/join/tests.rs

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
//! Tests for the join code.
22
3-
use super::*;
4-
use crate::ThreadPoolBuilder;
53
use rand::distr::StandardUniform;
64
use rand::{Rng, SeedableRng};
75
use rand_xorshift::XorShiftRng;
86

7+
use super::*;
8+
use crate::ThreadPoolBuilder;
9+
910
fn quick_sort<T: PartialOrd + Send>(v: &mut [T]) {
1011
if v.len() <= 1 {
1112
return;

compiler/rustc_thread_pool/src/latch.rs

Lines changed: 13 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ use crate::registry::{Registry, WorkerThread};
2828
/// - Once `probe()` returns true, all memory effects from the `set()`
2929
/// are visible (in other words, the set should synchronize-with
3030
/// the probe).
31-
/// - Once `set()` occurs, the next `probe()` *will* observe it. This
31+
/// - Once `set()` occurs, the next `probe()` *will* observe it. This
3232
/// typically requires a seq-cst ordering. See [the "tickle-then-get-sleepy" scenario in the sleep
3333
/// README](/src/sleep/README.md#tickle-then-get-sleepy) for details.
3434
pub(super) trait Latch {
@@ -78,29 +78,23 @@ pub(super) struct CoreLatch {
7878
impl CoreLatch {
7979
#[inline]
8080
fn new() -> Self {
81-
Self {
82-
state: AtomicUsize::new(0),
83-
}
81+
Self { state: AtomicUsize::new(0) }
8482
}
8583

8684
/// Invoked by owning thread as it prepares to sleep. Returns true
8785
/// if the owning thread may proceed to fall asleep, false if the
8886
/// latch was set in the meantime.
8987
#[inline]
9088
pub(super) fn get_sleepy(&self) -> bool {
91-
self.state
92-
.compare_exchange(UNSET, SLEEPY, Ordering::SeqCst, Ordering::Relaxed)
93-
.is_ok()
89+
self.state.compare_exchange(UNSET, SLEEPY, Ordering::SeqCst, Ordering::Relaxed).is_ok()
9490
}
9591

9692
/// Invoked by owning thread as it falls asleep sleep. Returns
9793
/// true if the owning thread should block, or false if the latch
9894
/// was set in the meantime.
9995
#[inline]
10096
pub(super) fn fall_asleep(&self) -> bool {
101-
self.state
102-
.compare_exchange(SLEEPY, SLEEPING, Ordering::SeqCst, Ordering::Relaxed)
103-
.is_ok()
97+
self.state.compare_exchange(SLEEPY, SLEEPING, Ordering::SeqCst, Ordering::Relaxed).is_ok()
10498
}
10599

106100
/// Invoked by owning thread as it falls asleep sleep. Returns
@@ -110,8 +104,7 @@ impl CoreLatch {
110104
pub(super) fn wake_up(&self) {
111105
if !self.probe() {
112106
let _ =
113-
self.state
114-
.compare_exchange(SLEEPING, UNSET, Ordering::SeqCst, Ordering::Relaxed);
107+
self.state.compare_exchange(SLEEPING, UNSET, Ordering::SeqCst, Ordering::Relaxed);
115108
}
116109
}
117110

@@ -166,15 +159,12 @@ impl<'r> SpinLatch<'r> {
166159
}
167160
}
168161

169-
/// Creates a new spin latch for cross-threadpool blocking. Notably, we
162+
/// Creates a new spin latch for cross-threadpool blocking. Notably, we
170163
/// need to make sure the registry is kept alive after setting, so we can
171164
/// safely call the notification.
172165
#[inline]
173166
pub(super) fn cross(thread: &'r WorkerThread) -> SpinLatch<'r> {
174-
SpinLatch {
175-
cross: true,
176-
..SpinLatch::new(thread)
177-
}
167+
SpinLatch { cross: true, ..SpinLatch::new(thread) }
178168
}
179169

180170
#[inline]
@@ -235,10 +225,7 @@ pub(super) struct LockLatch {
235225
impl LockLatch {
236226
#[inline]
237227
pub(super) fn new() -> LockLatch {
238-
LockLatch {
239-
m: Mutex::new(false),
240-
v: Condvar::new(),
241-
}
228+
LockLatch { m: Mutex::new(false), v: Condvar::new() }
242229
}
243230

244231
/// Block until latch is set, then resets this lock latch so it can be reused again.
@@ -288,9 +275,7 @@ pub(super) struct OnceLatch {
288275
impl OnceLatch {
289276
#[inline]
290277
pub(super) fn new() -> OnceLatch {
291-
Self {
292-
core_latch: CoreLatch::new(),
293-
}
278+
Self { core_latch: CoreLatch::new() }
294279
}
295280

296281
/// Set the latch, then tickle the specific worker thread,
@@ -372,9 +357,7 @@ impl CountLatch {
372357
registry: Arc::clone(owner.registry()),
373358
worker_index: owner.index(),
374359
},
375-
None => CountLatchKind::Blocking {
376-
latch: LockLatch::new(),
377-
},
360+
None => CountLatchKind::Blocking { latch: LockLatch::new() },
378361
},
379362
}
380363
}
@@ -387,11 +370,7 @@ impl CountLatch {
387370

388371
pub(super) fn wait(&self, owner: Option<&WorkerThread>) {
389372
match &self.kind {
390-
CountLatchKind::Stealing {
391-
latch,
392-
registry,
393-
worker_index,
394-
} => unsafe {
373+
CountLatchKind::Stealing { latch, registry, worker_index } => unsafe {
395374
let owner = owner.expect("owner thread");
396375
debug_assert_eq!(registry.id(), owner.registry().id());
397376
debug_assert_eq!(*worker_index, owner.index());
@@ -409,11 +388,7 @@ impl Latch for CountLatch {
409388
// NOTE: Once we call `set` on the internal `latch`,
410389
// the target may proceed and invalidate `this`!
411390
match (*this).kind {
412-
CountLatchKind::Stealing {
413-
ref latch,
414-
ref registry,
415-
worker_index,
416-
} => {
391+
CountLatchKind::Stealing { ref latch, ref registry, worker_index } => {
417392
let registry = Arc::clone(registry);
418393
if CoreLatch::set(latch) {
419394
registry.notify_worker_latch_is_set(worker_index);
@@ -433,10 +408,7 @@ pub(super) struct LatchRef<'a, L> {
433408

434409
impl<L> LatchRef<'_, L> {
435410
pub(super) fn new(inner: &L) -> LatchRef<'_, L> {
436-
LatchRef {
437-
inner,
438-
marker: PhantomData,
439-
}
411+
LatchRef { inner, marker: PhantomData }
440412
}
441413
}
442414

0 commit comments

Comments
 (0)