Skip to content

Commit

Permalink
mm: kmsan: call KMSAN hooks from SLUB code
Browse files Browse the repository at this point in the history
In order to report uninitialized memory coming from heap allocations KMSAN
has to poison them unless they're created with __GFP_ZERO.

It's handy that we need KMSAN hooks in the places where
init_on_alloc/init_on_free initialization is performed.

In addition, we apply __no_kmsan_checks to get_freepointer_safe() to
suppress reports when accessing freelist pointers that reside in freed
objects.

Link: https://lkml.kernel.org/r/20220915150417.722975-16-glider@google.com
Signed-off-by: Alexander Potapenko <glider@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Konovalov <andreyknvl@google.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Eric Biggers <ebiggers@kernel.org>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Ilya Leoshkevich <iii@linux.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vegard Nossum <vegard.nossum@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
ramosian-glider authored and akpm00 committed Oct 3, 2022
1 parent b073d7f commit 68ef169
Show file tree
Hide file tree
Showing 4 changed files with 151 additions and 0 deletions.
57 changes: 57 additions & 0 deletions include/linux/kmsan.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <linux/types.h>

struct page;
struct kmem_cache;

#ifdef CONFIG_KMSAN

Expand Down Expand Up @@ -48,6 +49,44 @@ void kmsan_free_page(struct page *page, unsigned int order);
*/
void kmsan_copy_page_meta(struct page *dst, struct page *src);

/**
* kmsan_slab_alloc() - Notify KMSAN about a slab allocation.
* @s: slab cache the object belongs to.
* @object: object pointer.
* @flags: GFP flags passed to the allocator.
*
* Depending on cache flags and GFP flags, KMSAN sets up the metadata of the
* newly created object, marking it as initialized or uninitialized.
*/
void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);

/**
* kmsan_slab_free() - Notify KMSAN about a slab deallocation.
* @s: slab cache the object belongs to.
* @object: object pointer.
*
* KMSAN marks the freed object as uninitialized.
*/
void kmsan_slab_free(struct kmem_cache *s, void *object);

/**
* kmsan_kmalloc_large() - Notify KMSAN about a large slab allocation.
* @ptr: object pointer.
* @size: object size.
* @flags: GFP flags passed to the allocator.
*
* Similar to kmsan_slab_alloc(), but for large allocations.
*/
void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);

/**
* kmsan_kfree_large() - Notify KMSAN about a large slab deallocation.
* @ptr: object pointer.
*
* Similar to kmsan_slab_free(), but for large allocations.
*/
void kmsan_kfree_large(const void *ptr);

/**
* kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap.
* @start: start of vmapped range.
Expand Down Expand Up @@ -114,6 +153,24 @@ static inline void kmsan_copy_page_meta(struct page *dst, struct page *src)
{
}

static inline void kmsan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags)
{
}

static inline void kmsan_slab_free(struct kmem_cache *s, void *object)
{
}

static inline void kmsan_kmalloc_large(const void *ptr, size_t size,
gfp_t flags)
{
}

static inline void kmsan_kfree_large(const void *ptr)
{
}

static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
unsigned long end,
pgprot_t prot,
Expand Down
76 changes: 76 additions & 0 deletions mm/kmsan/hooks.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,82 @@
* skipping effects of functions like memset() inside instrumented code.
*/

void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags)
{
if (unlikely(object == NULL))
return;
if (!kmsan_enabled || kmsan_in_runtime())
return;
/*
* There's a ctor or this is an RCU cache - do nothing. The memory
* status hasn't changed since last use.
*/
if (s->ctor || (s->flags & SLAB_TYPESAFE_BY_RCU))
return;

kmsan_enter_runtime();
if (flags & __GFP_ZERO)
kmsan_internal_unpoison_memory(object, s->object_size,
KMSAN_POISON_CHECK);
else
kmsan_internal_poison_memory(object, s->object_size, flags,
KMSAN_POISON_CHECK);
kmsan_leave_runtime();
}

void kmsan_slab_free(struct kmem_cache *s, void *object)
{
if (!kmsan_enabled || kmsan_in_runtime())
return;

/* RCU slabs could be legally used after free within the RCU period */
if (unlikely(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)))
return;
/*
* If there's a constructor, freed memory must remain in the same state
* until the next allocation. We cannot save its state to detect
* use-after-free bugs, instead we just keep it unpoisoned.
*/
if (s->ctor)
return;
kmsan_enter_runtime();
kmsan_internal_poison_memory(object, s->object_size, GFP_KERNEL,
KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
kmsan_leave_runtime();
}

void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
{
if (unlikely(ptr == NULL))
return;
if (!kmsan_enabled || kmsan_in_runtime())
return;
kmsan_enter_runtime();
if (flags & __GFP_ZERO)
kmsan_internal_unpoison_memory((void *)ptr, size,
/*checked*/ true);
else
kmsan_internal_poison_memory((void *)ptr, size, flags,
KMSAN_POISON_CHECK);
kmsan_leave_runtime();
}

void kmsan_kfree_large(const void *ptr)
{
struct page *page;

if (!kmsan_enabled || kmsan_in_runtime())
return;
kmsan_enter_runtime();
page = virt_to_head_page((void *)ptr);
KMSAN_WARN_ON(ptr != page_address(page));
kmsan_internal_poison_memory((void *)ptr,
PAGE_SIZE << compound_order(page),
GFP_KERNEL,
KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
kmsan_leave_runtime();
}

static unsigned long vmalloc_shadow(unsigned long addr)
{
return (unsigned long)kmsan_get_metadata((void *)addr,
Expand Down
1 change: 1 addition & 0 deletions mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -729,6 +729,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
memset(p[i], 0, s->object_size);
kmemleak_alloc_recursive(p[i], s->object_size, 1,
s->flags, flags);
kmsan_slab_alloc(s, p[i], flags);
}

memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
Expand Down
17 changes: 17 additions & 0 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kasan.h>
#include <linux/kmsan.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
Expand Down Expand Up @@ -359,6 +360,17 @@ static void prefetch_freepointer(const struct kmem_cache *s, void *object)
prefetchw(object + s->offset);
}

/*
* When running under KMSAN, get_freepointer_safe() may return an uninitialized
* pointer value in the case the current thread loses the race for the next
* memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in
* slab_alloc_node() will fail, so the uninitialized value won't be used, but
* KMSAN will still check all arguments of cmpxchg because of imperfect
* handling of inline assembly.
* To work around this problem, we apply __no_kmsan_checks to ensure that
* get_freepointer_safe() returns initialized memory.
*/
__no_kmsan_checks
static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
{
unsigned long freepointer_addr;
Expand Down Expand Up @@ -1709,19 +1721,22 @@ static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
ptr = kasan_kmalloc_large(ptr, size, flags);
/* As ptr might get tagged, call kmemleak hook after KASAN. */
kmemleak_alloc(ptr, size, 1, flags);
kmsan_kmalloc_large(ptr, size, flags);
return ptr;
}

static __always_inline void kfree_hook(void *x)
{
kmemleak_free(x);
kasan_kfree_large(x);
kmsan_kfree_large(x);
}

static __always_inline bool slab_free_hook(struct kmem_cache *s,
void *x, bool init)
{
kmemleak_free_recursive(x, s->flags);
kmsan_slab_free(s, x);

debug_check_no_locks_freed(x, s->object_size);

Expand Down Expand Up @@ -5941,6 +5956,7 @@ static char *create_unique_id(struct kmem_cache *s)
p += sprintf(p, "%07u", s->size);

BUG_ON(p > name + ID_STR_LENGTH - 1);
kmsan_unpoison_memory(name, p - name);
return name;
}

Expand Down Expand Up @@ -6042,6 +6058,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
al->name = name;
al->next = alias_list;
alias_list = al;
kmsan_unpoison_memory(al, sizeof(*al));
return 0;
}

Expand Down

0 comments on commit 68ef169

Please sign in to comment.