@@ -5,6 +5,7 @@ use core::time::Duration;
5
5
use std:: collections:: HashSet ;
6
6
use std:: cell:: RefCell ;
7
7
8
+ #[ cfg( not( feature = "backtrace" ) ) ]
8
9
use std:: sync:: atomic:: { AtomicUsize , Ordering } ;
9
10
10
11
use std:: sync:: Mutex as StdMutex ;
@@ -15,7 +16,12 @@ use std::sync::RwLockWriteGuard as StdRwLockWriteGuard;
15
16
use std:: sync:: Condvar as StdCondvar ;
16
17
17
18
#[ cfg( feature = "backtrace" ) ]
18
- use backtrace:: Backtrace ;
19
+ use { prelude:: HashMap , backtrace:: Backtrace , std:: sync:: Once } ;
20
+
21
+ #[ cfg( not( feature = "backtrace" ) ) ]
22
+ struct Backtrace { }
23
+ #[ cfg( not( feature = "backtrace" ) ) ]
24
+ impl Backtrace { fn new ( ) -> Backtrace { Backtrace { } } }
19
25
20
26
pub type LockResult < Guard > = Result < Guard , ( ) > ;
21
27
@@ -46,14 +52,19 @@ thread_local! {
46
52
/// We track the set of locks currently held by a reference to their `LockMetadata`
47
53
static LOCKS_HELD : RefCell <HashSet <Arc <LockMetadata >>> = RefCell :: new( HashSet :: new( ) ) ;
48
54
}
55
+ #[ cfg( not( feature = "backtrace" ) ) ]
49
56
static LOCK_IDX : AtomicUsize = AtomicUsize :: new ( 0 ) ;
50
57
58
+ #[ cfg( feature = "backtrace" ) ]
59
+ static mut LOCKS : Option < StdMutex < HashMap < u64 , Arc < LockMetadata > > > > = None ;
60
+ #[ cfg( feature = "backtrace" ) ]
61
+ static LOCKS_INIT : Once = Once :: new ( ) ;
62
+
51
63
/// Metadata about a single lock, by id, the set of things locked-before it, and the backtrace of
52
64
/// when the Mutex itself was constructed.
53
65
struct LockMetadata {
54
66
lock_idx : u64 ,
55
- locked_before : StdMutex < HashSet < Arc < LockMetadata > > > ,
56
- #[ cfg( feature = "backtrace" ) ]
67
+ locked_before : StdMutex < HashSet < LockDep > > ,
57
68
lock_construction_bt : Backtrace ,
58
69
}
59
70
impl PartialEq for LockMetadata {
@@ -64,14 +75,61 @@ impl std::hash::Hash for LockMetadata {
64
75
fn hash < H : std:: hash:: Hasher > ( & self , hasher : & mut H ) { hasher. write_u64 ( self . lock_idx ) ; }
65
76
}
66
77
78
+ struct LockDep {
79
+ lock : Arc < LockMetadata > ,
80
+ lockdep_trace : Option < Backtrace > ,
81
+ }
82
+ impl LockDep {
83
+ /// Note that `Backtrace::new()` is rather expensive so we rely on the caller to fill in the
84
+ /// `lockdep_backtrace` field after ensuring we need it.
85
+ fn new_without_bt ( lock : & Arc < LockMetadata > ) -> Self {
86
+ Self { lock : Arc :: clone ( lock) , lockdep_trace : None }
87
+ }
88
+ }
89
+ impl PartialEq for LockDep {
90
+ fn eq ( & self , o : & LockDep ) -> bool { self . lock . lock_idx == o. lock . lock_idx }
91
+ }
92
+ impl Eq for LockDep { }
93
+ impl std:: hash:: Hash for LockDep {
94
+ fn hash < H : std:: hash:: Hasher > ( & self , hasher : & mut H ) { hasher. write_u64 ( self . lock . lock_idx ) ; }
95
+ }
96
+
67
97
impl LockMetadata {
68
- fn new ( ) -> LockMetadata {
69
- LockMetadata {
70
- locked_before : StdMutex :: new ( HashSet :: new ( ) ) ,
71
- lock_idx : LOCK_IDX . fetch_add ( 1 , Ordering :: Relaxed ) as u64 ,
72
- #[ cfg( feature = "backtrace" ) ]
73
- lock_construction_bt : Backtrace :: new ( ) ,
98
+ fn new ( ) -> Arc < LockMetadata > {
99
+ let lock_idx;
100
+ let backtrace = Backtrace :: new ( ) ;
101
+
102
+ #[ cfg( not( feature = "backtrace" ) ) ]
103
+ { lock_idx = LOCK_IDX . fetch_add ( 1 , Ordering :: Relaxed ) as u64 ; }
104
+
105
+ #[ cfg( feature = "backtrace" ) ]
106
+ {
107
+ let mut ip = None ;
108
+ // Find the first frame which was *not* in debug_sync (or which is in our tests) and
109
+ // use that as the mutex construction site. The second frame should always be in
110
+ // debug_sync (the first may be in Backtrace itself).
111
+ let sync_mutex_constr_regex = regex:: Regex :: new ( r"lightning.*debug_sync.*new" ) . unwrap ( ) ;
112
+ for ( idx, frame) in backtrace. frames ( ) . iter ( ) . enumerate ( ) {
113
+ let symbol_name = frame. symbols ( ) . last ( ) . unwrap ( ) . name ( ) . unwrap ( ) . as_str ( ) . unwrap ( ) ;
114
+ assert ! ( idx != 1 || sync_mutex_constr_regex. is_match( symbol_name) ,
115
+ "{} should contain lightning...debug_sync...new" , symbol_name) ;
116
+ if !sync_mutex_constr_regex. is_match ( symbol_name) {
117
+ ip = Some ( frame. ip ( ) as usize as u64 ) ;
118
+ break ;
119
+ }
120
+ }
121
+ lock_idx = ip. unwrap ( ) ;
122
+ LOCKS_INIT . call_once ( || { unsafe { LOCKS = Some ( StdMutex :: new ( HashMap :: new ( ) ) ) ; } } ) ;
123
+ if let Some ( metadata) = unsafe { LOCKS . as_ref ( ) } . unwrap ( ) . lock ( ) . unwrap ( ) . get ( & lock_idx) {
124
+ return Arc :: clone ( & metadata) ;
125
+ }
74
126
}
127
+
128
+ Arc :: new ( LockMetadata {
129
+ locked_before : StdMutex :: new ( HashSet :: new ( ) ) ,
130
+ lock_idx,
131
+ lock_construction_bt : backtrace,
132
+ } )
75
133
}
76
134
77
135
// Returns whether we were a recursive lock (only relevant for read)
@@ -89,18 +147,25 @@ impl LockMetadata {
89
147
}
90
148
for locked in held. borrow ( ) . iter ( ) {
91
149
if !read && * locked == * this {
92
- panic ! ( "Tried to lock a lock while it was held!" ) ;
150
+ // With `feature = "backtrace"` set, we may be looking at different instances
151
+ // of the same lock.
152
+ debug_assert ! ( cfg!( feature = "backtrace" ) , "Tried to lock a lock while it was held!" ) ;
93
153
}
94
154
for locked_dep in locked. locked_before . lock ( ) . unwrap ( ) . iter ( ) {
95
- if * locked_dep == * this {
155
+ if locked_dep. lock == * this && locked_dep . lock != * locked {
96
156
#[ cfg( feature = "backtrace" ) ]
97
- panic ! ( "Tried to violate existing lockorder.\n Mutex that should be locked after the current lock was created at the following backtrace.\n Note that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n {:?}" , locked. lock_construction_bt) ;
157
+ panic ! ( "Tried to violate existing lockorder.\n Mutex that should be locked after the current lock was created at the following backtrace.\n Note that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n Lock constructed at: \ n {:?}\n \n Lock dep created at: \n {:?} \n \n " , locked. lock_construction_bt, locked_dep . lockdep_trace ) ;
98
158
#[ cfg( not( feature = "backtrace" ) ) ]
99
159
panic ! ( "Tried to violate existing lockorder. Build with the backtrace feature for more info." ) ;
100
160
}
101
161
}
102
162
// Insert any already-held locks in our locked-before set.
103
- this. locked_before . lock ( ) . unwrap ( ) . insert ( Arc :: clone ( locked) ) ;
163
+ let mut locked_before = this. locked_before . lock ( ) . unwrap ( ) ;
164
+ let mut lockdep = LockDep :: new_without_bt ( locked) ;
165
+ if !locked_before. contains ( & lockdep) {
166
+ lockdep. lockdep_trace = Some ( Backtrace :: new ( ) ) ;
167
+ locked_before. insert ( lockdep) ;
168
+ }
104
169
}
105
170
held. borrow_mut ( ) . insert ( Arc :: clone ( this) ) ;
106
171
inserted = true ;
@@ -116,10 +181,15 @@ impl LockMetadata {
116
181
// Since a try-lock will simply fail if the lock is held already, we do not
117
182
// consider try-locks to ever generate lockorder inversions. However, if a try-lock
118
183
// succeeds, we do consider it to have created lockorder dependencies.
184
+ held. borrow_mut ( ) . insert ( Arc :: clone ( this) ) ;
185
+ let mut locked_before = this. locked_before . lock ( ) . unwrap ( ) ;
119
186
for locked in held. borrow ( ) . iter ( ) {
120
- this. locked_before . lock ( ) . unwrap ( ) . insert ( Arc :: clone ( locked) ) ;
187
+ let mut lockdep = LockDep :: new_without_bt ( locked) ;
188
+ if !locked_before. contains ( & lockdep) {
189
+ lockdep. lockdep_trace = Some ( Backtrace :: new ( ) ) ;
190
+ locked_before. insert ( lockdep) ;
191
+ }
121
192
}
122
- held. borrow_mut ( ) . insert ( Arc :: clone ( this) ) ;
123
193
} ) ;
124
194
}
125
195
}
@@ -170,7 +240,7 @@ impl<T: Sized> DerefMut for MutexGuard<'_, T> {
170
240
171
241
impl < T > Mutex < T > {
172
242
pub fn new ( inner : T ) -> Mutex < T > {
173
- Mutex { inner : StdMutex :: new ( inner) , deps : Arc :: new ( LockMetadata :: new ( ) ) }
243
+ Mutex { inner : StdMutex :: new ( inner) , deps : LockMetadata :: new ( ) }
174
244
}
175
245
176
246
pub fn lock < ' a > ( & ' a self ) -> LockResult < MutexGuard < ' a , T > > {
@@ -249,7 +319,7 @@ impl<T: Sized> DerefMut for RwLockWriteGuard<'_, T> {
249
319
250
320
impl < T > RwLock < T > {
251
321
pub fn new ( inner : T ) -> RwLock < T > {
252
- RwLock { inner : StdRwLock :: new ( inner) , deps : Arc :: new ( LockMetadata :: new ( ) ) }
322
+ RwLock { inner : StdRwLock :: new ( inner) , deps : LockMetadata :: new ( ) }
253
323
}
254
324
255
325
pub fn read < ' a > ( & ' a self ) -> LockResult < RwLockReadGuard < ' a , T > > {
@@ -271,96 +341,101 @@ impl<T> RwLock<T> {
271
341
}
272
342
}
273
343
274
- #[ test]
275
- #[ should_panic]
276
- fn recursive_lock_fail ( ) {
277
- let mutex = Mutex :: new ( ( ) ) ;
278
- let _a = mutex. lock ( ) . unwrap ( ) ;
279
- let _b = mutex. lock ( ) . unwrap ( ) ;
280
- }
281
-
282
- #[ test]
283
- fn recursive_read ( ) {
284
- let lock = RwLock :: new ( ( ) ) ;
285
- let _a = lock. read ( ) . unwrap ( ) ;
286
- let _b = lock. read ( ) . unwrap ( ) ;
287
- }
344
+ pub type FairRwLock < T > = RwLock < T > ;
288
345
289
- #[ test]
290
- #[ should_panic]
291
- fn lockorder_fail ( ) {
292
- let a = Mutex :: new ( ( ) ) ;
293
- let b = Mutex :: new ( ( ) ) ;
294
- {
295
- let _a = a. lock ( ) . unwrap ( ) ;
296
- let _b = b. lock ( ) . unwrap ( ) ;
297
- }
298
- {
299
- let _b = b. lock ( ) . unwrap ( ) ;
300
- let _a = a. lock ( ) . unwrap ( ) ;
346
+ mod tests {
347
+ use super :: { RwLock , Mutex } ;
348
+
349
+ #[ test]
350
+ #[ should_panic]
351
+ #[ cfg( not( feature = "backtrace" ) ) ]
352
+ fn recursive_lock_fail ( ) {
353
+ let mutex = Mutex :: new ( ( ) ) ;
354
+ let _a = mutex. lock ( ) . unwrap ( ) ;
355
+ let _b = mutex. lock ( ) . unwrap ( ) ;
356
+ }
357
+
358
+ #[ test]
359
+ fn recursive_read ( ) {
360
+ let lock = RwLock :: new ( ( ) ) ;
361
+ let _a = lock. read ( ) . unwrap ( ) ;
362
+ let _b = lock. read ( ) . unwrap ( ) ;
363
+ }
364
+
365
+ #[ test]
366
+ #[ should_panic]
367
+ fn lockorder_fail ( ) {
368
+ let a = Mutex :: new ( ( ) ) ;
369
+ let b = Mutex :: new ( ( ) ) ;
370
+ {
371
+ let _a = a. lock ( ) . unwrap ( ) ;
372
+ let _b = b. lock ( ) . unwrap ( ) ;
373
+ }
374
+ {
375
+ let _b = b. lock ( ) . unwrap ( ) ;
376
+ let _a = a. lock ( ) . unwrap ( ) ;
377
+ }
301
378
}
302
- }
303
379
304
- #[ test]
305
- #[ should_panic]
306
- fn write_lockorder_fail ( ) {
307
- let a = RwLock :: new ( ( ) ) ;
308
- let b = RwLock :: new ( ( ) ) ;
309
- {
310
- let _a = a. write ( ) . unwrap ( ) ;
311
- let _b = b. write ( ) . unwrap ( ) ;
312
- }
313
- {
314
- let _b = b. write ( ) . unwrap ( ) ;
315
- let _a = a. write ( ) . unwrap ( ) ;
380
+ #[ test]
381
+ #[ should_panic]
382
+ fn write_lockorder_fail ( ) {
383
+ let a = RwLock :: new ( ( ) ) ;
384
+ let b = RwLock :: new ( ( ) ) ;
385
+ {
386
+ let _a = a. write ( ) . unwrap ( ) ;
387
+ let _b = b. write ( ) . unwrap ( ) ;
388
+ }
389
+ {
390
+ let _b = b. write ( ) . unwrap ( ) ;
391
+ let _a = a. write ( ) . unwrap ( ) ;
392
+ }
316
393
}
317
- }
318
394
319
- #[ test]
320
- #[ should_panic]
321
- fn read_lockorder_fail ( ) {
322
- let a = RwLock :: new ( ( ) ) ;
323
- let b = RwLock :: new ( ( ) ) ;
324
- {
325
- let _a = a. read ( ) . unwrap ( ) ;
326
- let _b = b. read ( ) . unwrap ( ) ;
327
- }
328
- {
329
- let _b = b. read ( ) . unwrap ( ) ;
330
- let _a = a. read ( ) . unwrap ( ) ;
395
+ #[ test]
396
+ #[ should_panic]
397
+ fn read_lockorder_fail ( ) {
398
+ let a = RwLock :: new ( ( ) ) ;
399
+ let b = RwLock :: new ( ( ) ) ;
400
+ {
401
+ let _a = a. read ( ) . unwrap ( ) ;
402
+ let _b = b. read ( ) . unwrap ( ) ;
403
+ }
404
+ {
405
+ let _b = b. read ( ) . unwrap ( ) ;
406
+ let _a = a. read ( ) . unwrap ( ) ;
407
+ }
331
408
}
332
- }
333
409
334
- #[ test]
335
- fn read_recurisve_no_lockorder ( ) {
336
- // Like the above, but note that no lockorder is implied when we recursively read-lock a
337
- // RwLock, causing this to pass just fine.
338
- let a = RwLock :: new ( ( ) ) ;
339
- let b = RwLock :: new ( ( ) ) ;
340
- let _outer = a. read ( ) . unwrap ( ) ;
341
- {
342
- let _a = a. read ( ) . unwrap ( ) ;
343
- let _b = b. read ( ) . unwrap ( ) ;
344
- }
345
- {
346
- let _b = b. read ( ) . unwrap ( ) ;
347
- let _a = a. read ( ) . unwrap ( ) ;
410
+ #[ test]
411
+ fn read_recurisve_no_lockorder ( ) {
412
+ // Like the above, but note that no lockorder is implied when we recursively read-lock a
413
+ // RwLock, causing this to pass just fine.
414
+ let a = RwLock :: new ( ( ) ) ;
415
+ let b = RwLock :: new ( ( ) ) ;
416
+ let _outer = a. read ( ) . unwrap ( ) ;
417
+ {
418
+ let _a = a. read ( ) . unwrap ( ) ;
419
+ let _b = b. read ( ) . unwrap ( ) ;
420
+ }
421
+ {
422
+ let _b = b. read ( ) . unwrap ( ) ;
423
+ let _a = a. read ( ) . unwrap ( ) ;
424
+ }
348
425
}
349
- }
350
426
351
- #[ test]
352
- #[ should_panic]
353
- fn read_write_lockorder_fail ( ) {
354
- let a = RwLock :: new ( ( ) ) ;
355
- let b = RwLock :: new ( ( ) ) ;
356
- {
357
- let _a = a. write ( ) . unwrap ( ) ;
358
- let _b = b. read ( ) . unwrap ( ) ;
359
- }
360
- {
361
- let _b = b. read ( ) . unwrap ( ) ;
362
- let _a = a. write ( ) . unwrap ( ) ;
427
+ #[ test]
428
+ #[ should_panic]
429
+ fn read_write_lockorder_fail ( ) {
430
+ let a = RwLock :: new ( ( ) ) ;
431
+ let b = RwLock :: new ( ( ) ) ;
432
+ {
433
+ let _a = a. write ( ) . unwrap ( ) ;
434
+ let _b = b. read ( ) . unwrap ( ) ;
435
+ }
436
+ {
437
+ let _b = b. read ( ) . unwrap ( ) ;
438
+ let _a = a. write ( ) . unwrap ( ) ;
439
+ }
363
440
}
364
441
}
365
-
366
- pub type FairRwLock < T > = RwLock < T > ;
0 commit comments