Skip to content

Commit 2482dde

Browse files
keestorvalds
authored andcommitted
mm: add SLUB free list pointer obfuscation
This SLUB free list pointer obfuscation code is modified from Brad Spengler/PaX Team's code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. This adds a per-cache random value to SLUB caches that is XORed with their freelist pointer address and value. This adds nearly zero overhead and frustrates the very common heap overflow exploitation method of overwriting freelist pointers. A recent example of the attack is written up here: http://cyseclabs.com/blog/cve-2016-6187-heap-off-by-one-exploit and there is a section dedicated to the technique the book "A Guide to Kernel Exploitation: Attacking the Core". This is based on patches by Daniel Micay, and refactored to minimize the use of #ifdef. With 200-count cycles of "hackbench -g 20 -l 1000" I saw the following run times: before: mean 10.11882499999999999995 variance .03320378329145728642 stdev .18221905304181911048 after: mean 10.12654000000000000014 variance .04700556623115577889 stdev .21680767106160192064 The difference gets lost in the noise, but if the above is to be taken literally, using CONFIG_FREELIST_HARDENED is 0.07% slower. Link: http://lkml.kernel.org/r/20170802180609.GA66807@beast Signed-off-by: Kees Cook <keescook@chromium.org> Suggested-by: Daniel Micay <danielmicay@gmail.com> Cc: Rik van Riel <riel@redhat.com> Cc: Tycho Andersen <tycho@docker.com> Cc: Alexander Popov <alex.popov@linux.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent ea37df5 commit 2482dde

File tree

3 files changed

+50
-5
lines changed

3 files changed

+50
-5
lines changed

include/linux/slub_def.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,10 @@ struct kmem_cache {
115115
#endif
116116
#endif
117117

118+
#ifdef CONFIG_SLAB_FREELIST_HARDENED
119+
unsigned long random;
120+
#endif
121+
118122
#ifdef CONFIG_NUMA
119123
/*
120124
* Defragmentation by allocating from a remote node.

init/Kconfig

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1576,6 +1576,15 @@ config SLAB_FREELIST_RANDOM
15761576
security feature reduces the predictability of the kernel slab
15771577
allocator against heap overflows.
15781578

1579+
config SLAB_FREELIST_HARDENED
1580+
bool "Harden slab freelist metadata"
1581+
depends on SLUB
1582+
help
1583+
Many kernel heap attacks try to target slab cache metadata and
1584+
other infrastructure. This options makes minor performance
1585+
sacrifies to harden the kernel slab allocator against common
1586+
freelist exploit methods.
1587+
15791588
config SLUB_CPU_PARTIAL
15801589
default y
15811590
depends on SLUB && SMP

mm/slub.c

Lines changed: 37 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
#include <linux/stacktrace.h>
3535
#include <linux/prefetch.h>
3636
#include <linux/memcontrol.h>
37+
#include <linux/random.h>
3738

3839
#include <trace/events/kmem.h>
3940

@@ -238,30 +239,58 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
238239
* Core slab cache functions
239240
*******************************************************************/
240241

242+
/*
243+
* Returns freelist pointer (ptr). With hardening, this is obfuscated
244+
* with an XOR of the address where the pointer is held and a per-cache
245+
* random number.
246+
*/
247+
static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
248+
unsigned long ptr_addr)
249+
{
250+
#ifdef CONFIG_SLAB_FREELIST_HARDENED
251+
return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
252+
#else
253+
return ptr;
254+
#endif
255+
}
256+
257+
/* Returns the freelist pointer recorded at location ptr_addr. */
258+
static inline void *freelist_dereference(const struct kmem_cache *s,
259+
void *ptr_addr)
260+
{
261+
return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
262+
(unsigned long)ptr_addr);
263+
}
264+
241265
static inline void *get_freepointer(struct kmem_cache *s, void *object)
242266
{
243-
return *(void **)(object + s->offset);
267+
return freelist_dereference(s, object + s->offset);
244268
}
245269

246270
static void prefetch_freepointer(const struct kmem_cache *s, void *object)
247271
{
248-
prefetch(object + s->offset);
272+
if (object)
273+
prefetch(freelist_dereference(s, object + s->offset));
249274
}
250275

251276
static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
252277
{
278+
unsigned long freepointer_addr;
253279
void *p;
254280

255281
if (!debug_pagealloc_enabled())
256282
return get_freepointer(s, object);
257283

258-
probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
259-
return p;
284+
freepointer_addr = (unsigned long)object + s->offset;
285+
probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
286+
return freelist_ptr(s, p, freepointer_addr);
260287
}
261288

262289
static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
263290
{
264-
*(void **)(object + s->offset) = fp;
291+
unsigned long freeptr_addr = (unsigned long)object + s->offset;
292+
293+
*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
265294
}
266295

267296
/* Loop over all objects in a slab */
@@ -3563,6 +3592,9 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
35633592
{
35643593
s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
35653594
s->reserved = 0;
3595+
#ifdef CONFIG_SLAB_FREELIST_HARDENED
3596+
s->random = get_random_long();
3597+
#endif
35663598

35673599
if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
35683600
s->reserved = sizeof(struct rcu_head);

0 commit comments

Comments
 (0)