Skip to content

Commit

Permalink
mm/slub: extend redzone check to extra allocated kmalloc space than r…
Browse files Browse the repository at this point in the history
…equested

kmalloc will round up the request size to a fixed size (mostly power
of 2), so there could be a extra space than what is requested, whose
size is the actual buffer size minus original request size.

To better detect out of bound access or abuse of this space, add
redzone sanity check for it.

In current kernel, some kmalloc user already knows the existence of
the space and utilizes it after calling 'ksize()' to know the real
size of the allocated buffer. So we skip the sanity check for objects
which have been called with ksize(), as treating them as legitimate
users. Kees Cook is working on sanitizing all these user cases,
by using kmalloc_size_roundup() to avoid ambiguous usages. And after
this is done, this special handling for ksize() can be removed.

In some cases, the free pointer could be saved inside the latter
part of object data area, which may overlap the redzone part(for
small sizes of kmalloc objects). As suggested by Hyeonggon Yoo,
force the free pointer to be in meta data area when kmalloc redzone
debug is enabled, to make all kmalloc objects covered by redzone
check.

Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Feng Tang <feng.tang@intel.com>
Acked-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
  • Loading branch information
ftang1 authored and tehcaster committed Nov 11, 2022
1 parent 5d1ba31 commit 946fa0d
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 5 deletions.
4 changes: 4 additions & 0 deletions mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -885,4 +885,8 @@ void __check_heap_object(const void *ptr, unsigned long n,
}
#endif

#ifdef CONFIG_SLUB_DEBUG
void skip_orig_size_check(struct kmem_cache *s, const void *object);
#endif

#endif /* MM_SLAB_H */
4 changes: 4 additions & 0 deletions mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -1037,6 +1037,10 @@ size_t __ksize(const void *object)
return folio_size(folio);
}

#ifdef CONFIG_SLUB_DEBUG
skip_orig_size_check(folio_slab(folio)->slab_cache, object);
#endif

return slab_ksize(folio_slab(folio)->slab_cache);
}

Expand Down
50 changes: 45 additions & 5 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -829,6 +829,17 @@ static inline void set_orig_size(struct kmem_cache *s,
if (!slub_debug_orig_size(s))
return;

#ifdef CONFIG_KASAN_GENERIC
/*
* KASAN could save its free meta data in object's data area at
* offset 0, if the size is larger than 'orig_size', it will
* overlap the data redzone in [orig_size+1, object_size], and
* the check should be skipped.
*/
if (kasan_metadata_size(s, true) > orig_size)
orig_size = s->object_size;
#endif

p += get_info_end(s);
p += sizeof(struct track) * 2;

Expand All @@ -848,6 +859,11 @@ static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
return *(unsigned int *)p;
}

void skip_orig_size_check(struct kmem_cache *s, const void *object)
{
set_orig_size(s, (void *)object, s->object_size);
}

static void slab_bug(struct kmem_cache *s, char *fmt, ...)
{
struct va_format vaf;
Expand Down Expand Up @@ -966,17 +982,28 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
static void init_object(struct kmem_cache *s, void *object, u8 val)
{
u8 *p = kasan_reset_tag(object);
unsigned int poison_size = s->object_size;

if (s->flags & SLAB_RED_ZONE)
if (s->flags & SLAB_RED_ZONE) {
memset(p - s->red_left_pad, val, s->red_left_pad);

if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
/*
* Redzone the extra allocated space by kmalloc than
* requested, and the poison size will be limited to
* the original request size accordingly.
*/
poison_size = get_orig_size(s, object);
}
}

if (s->flags & __OBJECT_POISON) {
memset(p, POISON_FREE, s->object_size - 1);
p[s->object_size - 1] = POISON_END;
memset(p, POISON_FREE, poison_size - 1);
p[poison_size - 1] = POISON_END;
}

if (s->flags & SLAB_RED_ZONE)
memset(p + s->object_size, val, s->inuse - s->object_size);
memset(p + poison_size, val, s->inuse - poison_size);
}

static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
Expand Down Expand Up @@ -1120,6 +1147,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
{
u8 *p = object;
u8 *endobject = object + s->object_size;
unsigned int orig_size;

if (s->flags & SLAB_RED_ZONE) {
if (!check_bytes_and_report(s, slab, object, "Left Redzone",
Expand All @@ -1129,6 +1157,17 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
if (!check_bytes_and_report(s, slab, object, "Right Redzone",
endobject, val, s->inuse - s->object_size))
return 0;

if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
orig_size = get_orig_size(s, object);

if (s->object_size > orig_size &&
!check_bytes_and_report(s, slab, object,
"kmalloc Redzone", p + orig_size,
val, s->object_size - orig_size)) {
return 0;
}
}
} else {
if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
check_bytes_and_report(s, slab, p, "Alignment padding",
Expand Down Expand Up @@ -4206,7 +4245,8 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->inuse = size;

if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
if (slub_debug_orig_size(s) ||
(flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
s->ctor) {
/*
Expand Down

0 comments on commit 946fa0d

Please sign in to comment.