@@ -490,6 +490,18 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
490
490
usage [i ] = '\0' ;
491
491
}
492
492
493
+ static int __print_lock_name (struct lock_class * class )
494
+ {
495
+ char str [KSYM_NAME_LEN ];
496
+ const char * name ;
497
+
498
+ name = class -> name ;
499
+ if (!name )
500
+ name = __get_key_name (class -> key , str );
501
+
502
+ return printk ("%s" , name );
503
+ }
504
+
493
505
static void print_lock_name (struct lock_class * class )
494
506
{
495
507
char str [KSYM_NAME_LEN ], usage [LOCK_USAGE_CHARS ];
@@ -1053,6 +1065,56 @@ print_circular_bug_entry(struct lock_list *target, int depth)
1053
1065
return 0 ;
1054
1066
}
1055
1067
1068
+ static void
1069
+ print_circular_lock_scenario (struct held_lock * src ,
1070
+ struct held_lock * tgt ,
1071
+ struct lock_list * prt )
1072
+ {
1073
+ struct lock_class * source = hlock_class (src );
1074
+ struct lock_class * target = hlock_class (tgt );
1075
+ struct lock_class * parent = prt -> class ;
1076
+
1077
+ /*
1078
+ * A direct locking problem where unsafe_class lock is taken
1079
+ * directly by safe_class lock, then all we need to show
1080
+ * is the deadlock scenario, as it is obvious that the
1081
+ * unsafe lock is taken under the safe lock.
1082
+ *
1083
+ * But if there is a chain instead, where the safe lock takes
1084
+ * an intermediate lock (middle_class) where this lock is
1085
+ * not the same as the safe lock, then the lock chain is
1086
+ * used to describe the problem. Otherwise we would need
1087
+ * to show a different CPU case for each link in the chain
1088
+ * from the safe_class lock to the unsafe_class lock.
1089
+ */
1090
+ if (parent != source ) {
1091
+ printk ("Chain exists of:\n " );
1092
+ __print_lock_name (source );
1093
+ printk (" --> " );
1094
+ __print_lock_name (parent );
1095
+ printk (" --> " );
1096
+ __print_lock_name (target );
1097
+ printk ("\n\n" );
1098
+ }
1099
+
1100
+ printk (" Possible unsafe locking scenario:\n\n" );
1101
+ printk (" CPU0 CPU1\n" );
1102
+ printk (" ---- ----\n" );
1103
+ printk (" lock(" );
1104
+ __print_lock_name (target );
1105
+ printk (");\n" );
1106
+ printk (" lock(" );
1107
+ __print_lock_name (parent );
1108
+ printk (");\n" );
1109
+ printk (" lock(" );
1110
+ __print_lock_name (target );
1111
+ printk (");\n" );
1112
+ printk (" lock(" );
1113
+ __print_lock_name (source );
1114
+ printk (");\n" );
1115
+ printk ("\n *** DEADLOCK ***\n\n" );
1116
+ }
1117
+
1056
1118
/*
1057
1119
* When a circular dependency is detected, print the
1058
1120
* header first:
@@ -1096,6 +1158,7 @@ static noinline int print_circular_bug(struct lock_list *this,
1096
1158
{
1097
1159
struct task_struct * curr = current ;
1098
1160
struct lock_list * parent ;
1161
+ struct lock_list * first_parent ;
1099
1162
int depth ;
1100
1163
1101
1164
if (!debug_locks_off_graph_unlock () || debug_locks_silent )
@@ -1109,13 +1172,17 @@ static noinline int print_circular_bug(struct lock_list *this,
1109
1172
print_circular_bug_header (target , depth , check_src , check_tgt );
1110
1173
1111
1174
parent = get_lock_parent (target );
1175
+ first_parent = parent ;
1112
1176
1113
1177
while (parent ) {
1114
1178
print_circular_bug_entry (parent , -- depth );
1115
1179
parent = get_lock_parent (parent );
1116
1180
}
1117
1181
1118
1182
printk ("\nother info that might help us debug this:\n\n" );
1183
+ print_circular_lock_scenario (check_src , check_tgt ,
1184
+ first_parent );
1185
+
1119
1186
lockdep_print_held_locks (curr );
1120
1187
1121
1188
printk ("\nstack backtrace:\n" );
@@ -1314,7 +1381,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
1314
1381
printk ("\n" );
1315
1382
1316
1383
if (depth == 0 && (entry != root )) {
1317
- printk ("lockdep:%s bad BFS generated tree \n" , __func__ );
1384
+ printk ("lockdep:%s bad path found in chain graph \n" , __func__ );
1318
1385
break ;
1319
1386
}
1320
1387
@@ -1325,6 +1392,62 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
1325
1392
return ;
1326
1393
}
1327
1394
1395
+ static void
1396
+ print_irq_lock_scenario (struct lock_list * safe_entry ,
1397
+ struct lock_list * unsafe_entry ,
1398
+ struct lock_class * prev_class ,
1399
+ struct lock_class * next_class )
1400
+ {
1401
+ struct lock_class * safe_class = safe_entry -> class ;
1402
+ struct lock_class * unsafe_class = unsafe_entry -> class ;
1403
+ struct lock_class * middle_class = prev_class ;
1404
+
1405
+ if (middle_class == safe_class )
1406
+ middle_class = next_class ;
1407
+
1408
+ /*
1409
+ * A direct locking problem where unsafe_class lock is taken
1410
+ * directly by safe_class lock, then all we need to show
1411
+ * is the deadlock scenario, as it is obvious that the
1412
+ * unsafe lock is taken under the safe lock.
1413
+ *
1414
+ * But if there is a chain instead, where the safe lock takes
1415
+ * an intermediate lock (middle_class) where this lock is
1416
+ * not the same as the safe lock, then the lock chain is
1417
+ * used to describe the problem. Otherwise we would need
1418
+ * to show a different CPU case for each link in the chain
1419
+ * from the safe_class lock to the unsafe_class lock.
1420
+ */
1421
+ if (middle_class != unsafe_class ) {
1422
+ printk ("Chain exists of:\n " );
1423
+ __print_lock_name (safe_class );
1424
+ printk (" --> " );
1425
+ __print_lock_name (middle_class );
1426
+ printk (" --> " );
1427
+ __print_lock_name (unsafe_class );
1428
+ printk ("\n\n" );
1429
+ }
1430
+
1431
+ printk (" Possible interrupt unsafe locking scenario:\n\n" );
1432
+ printk (" CPU0 CPU1\n" );
1433
+ printk (" ---- ----\n" );
1434
+ printk (" lock(" );
1435
+ __print_lock_name (unsafe_class );
1436
+ printk (");\n" );
1437
+ printk (" local_irq_disable();\n" );
1438
+ printk (" lock(" );
1439
+ __print_lock_name (safe_class );
1440
+ printk (");\n" );
1441
+ printk (" lock(" );
1442
+ __print_lock_name (middle_class );
1443
+ printk (");\n" );
1444
+ printk (" <Interrupt>\n" );
1445
+ printk (" lock(" );
1446
+ __print_lock_name (safe_class );
1447
+ printk (");\n" );
1448
+ printk ("\n *** DEADLOCK ***\n\n" );
1449
+ }
1450
+
1328
1451
static int
1329
1452
print_bad_irq_dependency (struct task_struct * curr ,
1330
1453
struct lock_list * prev_root ,
@@ -1376,6 +1499,9 @@ print_bad_irq_dependency(struct task_struct *curr,
1376
1499
print_stack_trace (forwards_entry -> class -> usage_traces + bit2 , 1 );
1377
1500
1378
1501
printk ("\nother info that might help us debug this:\n\n" );
1502
+ print_irq_lock_scenario (backwards_entry , forwards_entry ,
1503
+ hlock_class (prev ), hlock_class (next ));
1504
+
1379
1505
lockdep_print_held_locks (curr );
1380
1506
1381
1507
printk ("\nthe dependencies between %s-irq-safe lock" , irqclass );
@@ -1539,6 +1665,26 @@ static inline void inc_chains(void)
1539
1665
1540
1666
#endif
1541
1667
1668
+ static void
1669
+ print_deadlock_scenario (struct held_lock * nxt ,
1670
+ struct held_lock * prv )
1671
+ {
1672
+ struct lock_class * next = hlock_class (nxt );
1673
+ struct lock_class * prev = hlock_class (prv );
1674
+
1675
+ printk (" Possible unsafe locking scenario:\n\n" );
1676
+ printk (" CPU0\n" );
1677
+ printk (" ----\n" );
1678
+ printk (" lock(" );
1679
+ __print_lock_name (prev );
1680
+ printk (");\n" );
1681
+ printk (" lock(" );
1682
+ __print_lock_name (next );
1683
+ printk (");\n" );
1684
+ printk ("\n *** DEADLOCK ***\n\n" );
1685
+ printk (" May be due to missing lock nesting notation\n\n" );
1686
+ }
1687
+
1542
1688
static int
1543
1689
print_deadlock_bug (struct task_struct * curr , struct held_lock * prev ,
1544
1690
struct held_lock * next )
@@ -1557,6 +1703,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1557
1703
print_lock (prev );
1558
1704
1559
1705
printk ("\nother info that might help us debug this:\n" );
1706
+ print_deadlock_scenario (next , prev );
1560
1707
lockdep_print_held_locks (curr );
1561
1708
1562
1709
printk ("\nstack backtrace:\n" );
@@ -1826,7 +1973,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
1826
1973
struct list_head * hash_head = chainhashentry (chain_key );
1827
1974
struct lock_chain * chain ;
1828
1975
struct held_lock * hlock_curr , * hlock_next ;
1829
- int i , j , n , cn ;
1976
+ int i , j ;
1830
1977
1831
1978
if (DEBUG_LOCKS_WARN_ON (!irqs_disabled ()))
1832
1979
return 0 ;
@@ -1886,15 +2033,9 @@ static inline int lookup_chain_cache(struct task_struct *curr,
1886
2033
}
1887
2034
i ++ ;
1888
2035
chain -> depth = curr -> lockdep_depth + 1 - i ;
1889
- cn = nr_chain_hlocks ;
1890
- while (cn + chain -> depth <= MAX_LOCKDEP_CHAIN_HLOCKS ) {
1891
- n = cmpxchg (& nr_chain_hlocks , cn , cn + chain -> depth );
1892
- if (n == cn )
1893
- break ;
1894
- cn = n ;
1895
- }
1896
- if (likely (cn + chain -> depth <= MAX_LOCKDEP_CHAIN_HLOCKS )) {
1897
- chain -> base = cn ;
2036
+ if (likely (nr_chain_hlocks + chain -> depth <= MAX_LOCKDEP_CHAIN_HLOCKS )) {
2037
+ chain -> base = nr_chain_hlocks ;
2038
+ nr_chain_hlocks += chain -> depth ;
1898
2039
for (j = 0 ; j < chain -> depth - 1 ; j ++ , i ++ ) {
1899
2040
int lock_id = curr -> held_locks [i ].class_idx - 1 ;
1900
2041
chain_hlocks [chain -> base + j ] = lock_id ;
@@ -2011,6 +2152,24 @@ static void check_chain_key(struct task_struct *curr)
2011
2152
#endif
2012
2153
}
2013
2154
2155
+ static void
2156
+ print_usage_bug_scenario (struct held_lock * lock )
2157
+ {
2158
+ struct lock_class * class = hlock_class (lock );
2159
+
2160
+ printk (" Possible unsafe locking scenario:\n\n" );
2161
+ printk (" CPU0\n" );
2162
+ printk (" ----\n" );
2163
+ printk (" lock(" );
2164
+ __print_lock_name (class );
2165
+ printk (");\n" );
2166
+ printk (" <Interrupt>\n" );
2167
+ printk (" lock(" );
2168
+ __print_lock_name (class );
2169
+ printk (");\n" );
2170
+ printk ("\n *** DEADLOCK ***\n\n" );
2171
+ }
2172
+
2014
2173
static int
2015
2174
print_usage_bug (struct task_struct * curr , struct held_lock * this ,
2016
2175
enum lock_usage_bit prev_bit , enum lock_usage_bit new_bit )
@@ -2039,6 +2198,8 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
2039
2198
2040
2199
print_irqtrace_events (curr );
2041
2200
printk ("\nother info that might help us debug this:\n" );
2201
+ print_usage_bug_scenario (this );
2202
+
2042
2203
lockdep_print_held_locks (curr );
2043
2204
2044
2205
printk ("\nstack backtrace:\n" );
@@ -2073,6 +2234,10 @@ print_irq_inversion_bug(struct task_struct *curr,
2073
2234
struct held_lock * this , int forwards ,
2074
2235
const char * irqclass )
2075
2236
{
2237
+ struct lock_list * entry = other ;
2238
+ struct lock_list * middle = NULL ;
2239
+ int depth ;
2240
+
2076
2241
if (!debug_locks_off_graph_unlock () || debug_locks_silent )
2077
2242
return 0 ;
2078
2243
@@ -2091,6 +2256,25 @@ print_irq_inversion_bug(struct task_struct *curr,
2091
2256
printk ("\n\nand interrupts could create inverse lock ordering between them.\n\n" );
2092
2257
2093
2258
printk ("\nother info that might help us debug this:\n" );
2259
+
2260
+ /* Find a middle lock (if one exists) */
2261
+ depth = get_lock_depth (other );
2262
+ do {
2263
+ if (depth == 0 && (entry != root )) {
2264
+ printk ("lockdep:%s bad path found in chain graph\n" , __func__ );
2265
+ break ;
2266
+ }
2267
+ middle = entry ;
2268
+ entry = get_lock_parent (entry );
2269
+ depth -- ;
2270
+ } while (entry && entry != root && (depth >= 0 ));
2271
+ if (forwards )
2272
+ print_irq_lock_scenario (root , other ,
2273
+ middle ? middle -> class : root -> class , other -> class );
2274
+ else
2275
+ print_irq_lock_scenario (other , root ,
2276
+ middle ? middle -> class : other -> class , root -> class );
2277
+
2094
2278
lockdep_print_held_locks (curr );
2095
2279
2096
2280
printk ("\nthe shortest dependencies between 2nd lock and 1st lock:\n" );
0 commit comments