Skip to content

Commit 1f9f78b

Browse files
oglittatorvalds
authored andcommitted
mm/slub, kunit: add a KUnit test for SLUB debugging functionality
SLUB has resiliency_test() function which is hidden behind #ifdef SLUB_RESILIENCY_TEST that is not part of Kconfig, so nobody runs it. KUnit should be a proper replacement for it. Try changing byte in redzone after allocation and changing pointer to next free node, first byte, 50th byte and redzone byte. Check if validation finds errors. There are several differences from the original resiliency test: Tests create own caches with known state instead of corrupting shared kmalloc caches. The corruption of freepointer uses correct offset, the original resiliency test got broken with freepointer changes. Scratch changing random byte test, because it does not have meaning in this form where we need deterministic results. Add new option CONFIG_SLUB_KUNIT_TEST in Kconfig. Tests next_pointer, first_word and clobber_50th_byte do not run with KASAN option on. Because the test deliberately modifies non-allocated objects. Use kunit_resource to count errors in cache and silence bug reports. Count error whenever slab_bug() or slab_fix() is called or when the count of pages is wrong. [glittao@gmail.com: remove unused function test_exit(), from SLUB KUnit test] Link: https://lkml.kernel.org/r/20210512140656.12083-1-glittao@gmail.com [akpm@linux-foundation.org: export kasan_enable/disable_current to modules] Link: https://lkml.kernel.org/r/20210511150734.3492-2-glittao@gmail.com Signed-off-by: Oliver Glitta <glittao@gmail.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Daniel Latypov <dlatypov@google.com> Acked-by: Marco Elver <elver@google.com> Cc: Brendan Higgins <brendanhiggins@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 26c6cb7 commit 1f9f78b

File tree

6 files changed

+212
-3
lines changed

6 files changed

+212
-3
lines changed

lib/Kconfig.debug

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2429,6 +2429,18 @@ config BITS_TEST
24292429

24302430
If unsure, say N.
24312431

2432+
config SLUB_KUNIT_TEST
2433+
tristate "KUnit test for SLUB cache error detection" if !KUNIT_ALL_TESTS
2434+
depends on SLUB_DEBUG && KUNIT
2435+
default KUNIT_ALL_TESTS
2436+
help
2437+
This builds SLUB allocator unit test.
2438+
Tests SLUB cache debugging functionality.
2439+
For more information on KUnit and unit tests in general please refer
2440+
to the KUnit documentation in Documentation/dev-tools/kunit/.
2441+
2442+
If unsure, say N.
2443+
24322444
config TEST_UDELAY
24332445
tristate "udelay test driver"
24342446
help

lib/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -354,5 +354,6 @@ obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
354354
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
355355
obj-$(CONFIG_BITS_TEST) += test_bits.o
356356
obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
357+
obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
357358

358359
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o

lib/slub_kunit.c

Lines changed: 152 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,152 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
#include <kunit/test.h>
3+
#include <linux/mm.h>
4+
#include <linux/slab.h>
5+
#include <linux/module.h>
6+
#include <linux/kernel.h>
7+
#include "../mm/slab.h"
8+
9+
static struct kunit_resource resource;
10+
static int slab_errors;
11+
12+
static void test_clobber_zone(struct kunit *test)
13+
{
14+
struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0,
15+
SLAB_RED_ZONE, NULL);
16+
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
17+
18+
kasan_disable_current();
19+
p[64] = 0x12;
20+
21+
validate_slab_cache(s);
22+
KUNIT_EXPECT_EQ(test, 2, slab_errors);
23+
24+
kasan_enable_current();
25+
kmem_cache_free(s, p);
26+
kmem_cache_destroy(s);
27+
}
28+
29+
#ifndef CONFIG_KASAN
30+
static void test_next_pointer(struct kunit *test)
31+
{
32+
struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0,
33+
SLAB_POISON, NULL);
34+
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
35+
unsigned long tmp;
36+
unsigned long *ptr_addr;
37+
38+
kmem_cache_free(s, p);
39+
40+
ptr_addr = (unsigned long *)(p + s->offset);
41+
tmp = *ptr_addr;
42+
p[s->offset] = 0x12;
43+
44+
/*
45+
* Expecting three errors.
46+
* One for the corrupted freechain and the other one for the wrong
47+
* count of objects in use. The third error is fixing broken cache.
48+
*/
49+
validate_slab_cache(s);
50+
KUNIT_EXPECT_EQ(test, 3, slab_errors);
51+
52+
/*
53+
* Try to repair corrupted freepointer.
54+
* Still expecting two errors. The first for the wrong count
55+
* of objects in use.
56+
* The second error is for fixing broken cache.
57+
*/
58+
*ptr_addr = tmp;
59+
slab_errors = 0;
60+
61+
validate_slab_cache(s);
62+
KUNIT_EXPECT_EQ(test, 2, slab_errors);
63+
64+
/*
65+
* Previous validation repaired the count of objects in use.
66+
* Now expecting no error.
67+
*/
68+
slab_errors = 0;
69+
validate_slab_cache(s);
70+
KUNIT_EXPECT_EQ(test, 0, slab_errors);
71+
72+
kmem_cache_destroy(s);
73+
}
74+
75+
static void test_first_word(struct kunit *test)
76+
{
77+
struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0,
78+
SLAB_POISON, NULL);
79+
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
80+
81+
kmem_cache_free(s, p);
82+
*p = 0x78;
83+
84+
validate_slab_cache(s);
85+
KUNIT_EXPECT_EQ(test, 2, slab_errors);
86+
87+
kmem_cache_destroy(s);
88+
}
89+
90+
static void test_clobber_50th_byte(struct kunit *test)
91+
{
92+
struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0,
93+
SLAB_POISON, NULL);
94+
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
95+
96+
kmem_cache_free(s, p);
97+
p[50] = 0x9a;
98+
99+
validate_slab_cache(s);
100+
KUNIT_EXPECT_EQ(test, 2, slab_errors);
101+
102+
kmem_cache_destroy(s);
103+
}
104+
#endif
105+
106+
static void test_clobber_redzone_free(struct kunit *test)
107+
{
108+
struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0,
109+
SLAB_RED_ZONE, NULL);
110+
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
111+
112+
kasan_disable_current();
113+
kmem_cache_free(s, p);
114+
p[64] = 0xab;
115+
116+
validate_slab_cache(s);
117+
KUNIT_EXPECT_EQ(test, 2, slab_errors);
118+
119+
kasan_enable_current();
120+
kmem_cache_destroy(s);
121+
}
122+
123+
static int test_init(struct kunit *test)
124+
{
125+
slab_errors = 0;
126+
127+
kunit_add_named_resource(test, NULL, NULL, &resource,
128+
"slab_errors", &slab_errors);
129+
return 0;
130+
}
131+
132+
static struct kunit_case test_cases[] = {
133+
KUNIT_CASE(test_clobber_zone),
134+
135+
#ifndef CONFIG_KASAN
136+
KUNIT_CASE(test_next_pointer),
137+
KUNIT_CASE(test_first_word),
138+
KUNIT_CASE(test_clobber_50th_byte),
139+
#endif
140+
141+
KUNIT_CASE(test_clobber_redzone_free),
142+
{}
143+
};
144+
145+
static struct kunit_suite test_suite = {
146+
.name = "slub_test",
147+
.init = test_init,
148+
.test_cases = test_cases,
149+
};
150+
kunit_test_suite(test_suite);
151+
152+
MODULE_LICENSE("GPL");

mm/kasan/common.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,11 +51,14 @@ void kasan_enable_current(void)
5151
{
5252
current->kasan_depth++;
5353
}
54+
EXPORT_SYMBOL(kasan_enable_current);
5455

5556
void kasan_disable_current(void)
5657
{
5758
current->kasan_depth--;
5859
}
60+
EXPORT_SYMBOL(kasan_disable_current);
61+
5962
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
6063

6164
void __kasan_unpoison_range(const void *address, size_t size)

mm/slab.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -215,6 +215,7 @@ DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
215215
DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
216216
#endif
217217
extern void print_tracking(struct kmem_cache *s, void *object);
218+
long validate_slab_cache(struct kmem_cache *s);
218219
#else
219220
static inline void print_tracking(struct kmem_cache *s, void *object)
220221
{

mm/slub.c

Lines changed: 43 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
#include <linux/prefetch.h>
3737
#include <linux/memcontrol.h>
3838
#include <linux/random.h>
39+
#include <kunit/test.h>
3940

4041
#include <trace/events/kmem.h>
4142

@@ -449,6 +450,26 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
449450
static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
450451
static DEFINE_SPINLOCK(object_map_lock);
451452

453+
#if IS_ENABLED(CONFIG_KUNIT)
454+
static bool slab_add_kunit_errors(void)
455+
{
456+
struct kunit_resource *resource;
457+
458+
if (likely(!current->kunit_test))
459+
return false;
460+
461+
resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
462+
if (!resource)
463+
return false;
464+
465+
(*(int *)resource->data)++;
466+
kunit_put_resource(resource);
467+
return true;
468+
}
469+
#else
470+
static inline bool slab_add_kunit_errors(void) { return false; }
471+
#endif
472+
452473
/*
453474
* Determine a map of object in use on a page.
454475
*
@@ -679,6 +700,9 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
679700
struct va_format vaf;
680701
va_list args;
681702

703+
if (slab_add_kunit_errors())
704+
return;
705+
682706
va_start(args, fmt);
683707
vaf.fmt = fmt;
684708
vaf.va = &args;
@@ -742,6 +766,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
742766
void object_err(struct kmem_cache *s, struct page *page,
743767
u8 *object, char *reason)
744768
{
769+
if (slab_add_kunit_errors())
770+
return;
771+
745772
slab_bug(s, "%s", reason);
746773
print_trailer(s, page, object);
747774
}
@@ -752,6 +779,9 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
752779
va_list args;
753780
char buf[100];
754781

782+
if (slab_add_kunit_errors())
783+
return;
784+
755785
va_start(args, fmt);
756786
vsnprintf(buf, sizeof(buf), fmt, args);
757787
va_end(args);
@@ -801,12 +831,16 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
801831
while (end > fault && end[-1] == value)
802832
end--;
803833

834+
if (slab_add_kunit_errors())
835+
goto skip_bug_print;
836+
804837
slab_bug(s, "%s overwritten", what);
805838
pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
806839
fault, end - 1, fault - addr,
807840
fault[0], value);
808841
print_trailer(s, page, object);
809842

843+
skip_bug_print:
810844
restore_bytes(s, what, value, fault, end);
811845
return 0;
812846
}
@@ -4649,9 +4683,11 @@ static int validate_slab_node(struct kmem_cache *s,
46494683
validate_slab(s, page);
46504684
count++;
46514685
}
4652-
if (count != n->nr_partial)
4686+
if (count != n->nr_partial) {
46534687
pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
46544688
s->name, count, n->nr_partial);
4689+
slab_add_kunit_errors();
4690+
}
46554691

46564692
if (!(s->flags & SLAB_STORE_USER))
46574693
goto out;
@@ -4660,16 +4696,18 @@ static int validate_slab_node(struct kmem_cache *s,
46604696
validate_slab(s, page);
46614697
count++;
46624698
}
4663-
if (count != atomic_long_read(&n->nr_slabs))
4699+
if (count != atomic_long_read(&n->nr_slabs)) {
46644700
pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
46654701
s->name, count, atomic_long_read(&n->nr_slabs));
4702+
slab_add_kunit_errors();
4703+
}
46664704

46674705
out:
46684706
spin_unlock_irqrestore(&n->list_lock, flags);
46694707
return count;
46704708
}
46714709

4672-
static long validate_slab_cache(struct kmem_cache *s)
4710+
long validate_slab_cache(struct kmem_cache *s)
46734711
{
46744712
int node;
46754713
unsigned long count = 0;
@@ -4681,6 +4719,8 @@ static long validate_slab_cache(struct kmem_cache *s)
46814719

46824720
return count;
46834721
}
4722+
EXPORT_SYMBOL(validate_slab_cache);
4723+
46844724
/*
46854725
* Generate lists of code addresses where slabcache objects are allocated
46864726
* and freed.

0 commit comments

Comments
 (0)