Skip to content

Commit 946fa0d

Browse files
ftang1tehcaster
authored andcommitted
mm/slub: extend redzone check to extra allocated kmalloc space than requested
kmalloc will round up the request size to a fixed size (mostly power of 2), so there could be a extra space than what is requested, whose size is the actual buffer size minus original request size. To better detect out of bound access or abuse of this space, add redzone sanity check for it. In current kernel, some kmalloc user already knows the existence of the space and utilizes it after calling 'ksize()' to know the real size of the allocated buffer. So we skip the sanity check for objects which have been called with ksize(), as treating them as legitimate users. Kees Cook is working on sanitizing all these user cases, by using kmalloc_size_roundup() to avoid ambiguous usages. And after this is done, this special handling for ksize() can be removed. In some cases, the free pointer could be saved inside the latter part of object data area, which may overlap the redzone part(for small sizes of kmalloc objects). As suggested by Hyeonggon Yoo, force the free pointer to be in meta data area when kmalloc redzone debug is enabled, to make all kmalloc objects covered by redzone check. Suggested-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Feng Tang <feng.tang@intel.com> Acked-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent 5d1ba31 commit 946fa0d

File tree

3 files changed

+53
-5
lines changed

3 files changed

+53
-5
lines changed

mm/slab.h

+4
Original file line numberDiff line numberDiff line change
@@ -885,4 +885,8 @@ void __check_heap_object(const void *ptr, unsigned long n,
885885
}
886886
#endif
887887

888+
#ifdef CONFIG_SLUB_DEBUG
889+
void skip_orig_size_check(struct kmem_cache *s, const void *object);
890+
#endif
891+
888892
#endif /* MM_SLAB_H */

mm/slab_common.c

+4
Original file line numberDiff line numberDiff line change
@@ -1037,6 +1037,10 @@ size_t __ksize(const void *object)
10371037
return folio_size(folio);
10381038
}
10391039

1040+
#ifdef CONFIG_SLUB_DEBUG
1041+
skip_orig_size_check(folio_slab(folio)->slab_cache, object);
1042+
#endif
1043+
10401044
return slab_ksize(folio_slab(folio)->slab_cache);
10411045
}
10421046

mm/slub.c

+45-5
Original file line numberDiff line numberDiff line change
@@ -829,6 +829,17 @@ static inline void set_orig_size(struct kmem_cache *s,
829829
if (!slub_debug_orig_size(s))
830830
return;
831831

832+
#ifdef CONFIG_KASAN_GENERIC
833+
/*
834+
* KASAN could save its free meta data in object's data area at
835+
* offset 0, if the size is larger than 'orig_size', it will
836+
* overlap the data redzone in [orig_size+1, object_size], and
837+
* the check should be skipped.
838+
*/
839+
if (kasan_metadata_size(s, true) > orig_size)
840+
orig_size = s->object_size;
841+
#endif
842+
832843
p += get_info_end(s);
833844
p += sizeof(struct track) * 2;
834845

@@ -848,6 +859,11 @@ static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
848859
return *(unsigned int *)p;
849860
}
850861

862+
void skip_orig_size_check(struct kmem_cache *s, const void *object)
863+
{
864+
set_orig_size(s, (void *)object, s->object_size);
865+
}
866+
851867
static void slab_bug(struct kmem_cache *s, char *fmt, ...)
852868
{
853869
struct va_format vaf;
@@ -966,17 +982,28 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
966982
static void init_object(struct kmem_cache *s, void *object, u8 val)
967983
{
968984
u8 *p = kasan_reset_tag(object);
985+
unsigned int poison_size = s->object_size;
969986

970-
if (s->flags & SLAB_RED_ZONE)
987+
if (s->flags & SLAB_RED_ZONE) {
971988
memset(p - s->red_left_pad, val, s->red_left_pad);
972989

990+
if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
991+
/*
992+
* Redzone the extra allocated space by kmalloc than
993+
* requested, and the poison size will be limited to
994+
* the original request size accordingly.
995+
*/
996+
poison_size = get_orig_size(s, object);
997+
}
998+
}
999+
9731000
if (s->flags & __OBJECT_POISON) {
974-
memset(p, POISON_FREE, s->object_size - 1);
975-
p[s->object_size - 1] = POISON_END;
1001+
memset(p, POISON_FREE, poison_size - 1);
1002+
p[poison_size - 1] = POISON_END;
9761003
}
9771004

9781005
if (s->flags & SLAB_RED_ZONE)
979-
memset(p + s->object_size, val, s->inuse - s->object_size);
1006+
memset(p + poison_size, val, s->inuse - poison_size);
9801007
}
9811008

9821009
static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
@@ -1120,6 +1147,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
11201147
{
11211148
u8 *p = object;
11221149
u8 *endobject = object + s->object_size;
1150+
unsigned int orig_size;
11231151

11241152
if (s->flags & SLAB_RED_ZONE) {
11251153
if (!check_bytes_and_report(s, slab, object, "Left Redzone",
@@ -1129,6 +1157,17 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
11291157
if (!check_bytes_and_report(s, slab, object, "Right Redzone",
11301158
endobject, val, s->inuse - s->object_size))
11311159
return 0;
1160+
1161+
if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1162+
orig_size = get_orig_size(s, object);
1163+
1164+
if (s->object_size > orig_size &&
1165+
!check_bytes_and_report(s, slab, object,
1166+
"kmalloc Redzone", p + orig_size,
1167+
val, s->object_size - orig_size)) {
1168+
return 0;
1169+
}
1170+
}
11321171
} else {
11331172
if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
11341173
check_bytes_and_report(s, slab, p, "Alignment padding",
@@ -4206,7 +4245,8 @@ static int calculate_sizes(struct kmem_cache *s)
42064245
*/
42074246
s->inuse = size;
42084247

4209-
if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
4248+
if (slub_debug_orig_size(s) ||
4249+
(flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
42104250
((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
42114251
s->ctor) {
42124252
/*

0 commit comments

Comments
 (0)