Skip to content

Commit 3c9e3aa

Browse files
xairytorvalds
authored andcommitted
kasan: add tag related helper functions
This commit adds a few helper functions, that are meant to be used to work with tags embedded in the top byte of kernel pointers: to set, to get or to reset the top byte. Link: http://lkml.kernel.org/r/f6c6437bb8e143bc44f42c3c259c62e734be7935.1544099024.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Christoph Lameter <cl@linux.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 9c23f84 commit 3c9e3aa

File tree

6 files changed

+101
-2
lines changed

6 files changed

+101
-2
lines changed

arch/arm64/include/asm/kasan.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,16 @@
44

55
#ifndef __ASSEMBLY__
66

7-
#ifdef CONFIG_KASAN
8-
97
#include <linux/linkage.h>
108
#include <asm/memory.h>
119
#include <asm/pgtable-types.h>
1210

11+
#define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag)
12+
#define arch_kasan_reset_tag(addr) __tag_reset(addr)
13+
#define arch_kasan_get_tag(addr) __tag_get(addr)
14+
15+
#ifdef CONFIG_KASAN
16+
1317
/*
1418
* KASAN_SHADOW_START: beginning of the kernel virtual addresses.
1519
* KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,

arch/arm64/include/asm/memory.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -226,6 +226,18 @@ extern u64 vabits_user;
226226
#define untagged_addr(addr) \
227227
((__typeof__(addr))sign_extend64((u64)(addr), 55))
228228

229+
#ifdef CONFIG_KASAN_SW_TAGS
230+
#define __tag_shifted(tag) ((u64)(tag) << 56)
231+
#define __tag_set(addr, tag) (__typeof__(addr))( \
232+
((u64)(addr) & ~__tag_shifted(0xff)) | __tag_shifted(tag))
233+
#define __tag_reset(addr) untagged_addr(addr)
234+
#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
235+
#else
236+
#define __tag_set(addr, tag) (addr)
237+
#define __tag_reset(addr) (addr)
238+
#define __tag_get(addr) 0
239+
#endif
240+
229241
/*
230242
* Physical vs virtual RAM address space conversion. These are
231243
* private definitions which should NOT be used outside memory.h

arch/arm64/mm/kasan_init.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -252,6 +252,8 @@ void __init kasan_init(void)
252252
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
253253
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
254254

255+
kasan_init_tags();
256+
255257
/* At this point kasan is fully initialized. Enable error messages */
256258
init_task.kasan_depth = 0;
257259
pr_info("KernelAddressSanitizer initialized\n");

include/linux/kasan.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -169,6 +169,19 @@ static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
169169

170170
#define KASAN_SHADOW_INIT 0xFF
171171

172+
void kasan_init_tags(void);
173+
174+
void *kasan_reset_tag(const void *addr);
175+
176+
#else /* CONFIG_KASAN_SW_TAGS */
177+
178+
static inline void kasan_init_tags(void) { }
179+
180+
static inline void *kasan_reset_tag(const void *addr)
181+
{
182+
return (void *)addr;
183+
}
184+
172185
#endif /* CONFIG_KASAN_SW_TAGS */
173186

174187
#endif /* LINUX_KASAN_H */

mm/kasan/kasan.h

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,10 @@
88
#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
99
#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
1010

11+
#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
12+
#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
13+
#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
14+
1115
#define KASAN_FREE_PAGE 0xFF /* page was freed */
1216
#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
1317
#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
@@ -126,6 +130,33 @@ static inline void quarantine_reduce(void) { }
126130
static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
127131
#endif
128132

133+
#ifdef CONFIG_KASAN_SW_TAGS
134+
135+
u8 random_tag(void);
136+
137+
#else
138+
139+
static inline u8 random_tag(void)
140+
{
141+
return 0;
142+
}
143+
144+
#endif
145+
146+
#ifndef arch_kasan_set_tag
147+
#define arch_kasan_set_tag(addr, tag) ((void *)(addr))
148+
#endif
149+
#ifndef arch_kasan_reset_tag
150+
#define arch_kasan_reset_tag(addr) ((void *)(addr))
151+
#endif
152+
#ifndef arch_kasan_get_tag
153+
#define arch_kasan_get_tag(addr) 0
154+
#endif
155+
156+
#define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag)))
157+
#define reset_tag(addr) ((void *)arch_kasan_reset_tag(addr))
158+
#define get_tag(addr) arch_kasan_get_tag(addr)
159+
129160
/*
130161
* Exported functions for interfaces called from assembly or from generated
131162
* code. Declarations here to avoid warning about missing declarations.

mm/kasan/tags.c

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,43 @@
3838
#include "kasan.h"
3939
#include "../slab.h"
4040

41+
static DEFINE_PER_CPU(u32, prng_state);
42+
43+
void kasan_init_tags(void)
44+
{
45+
int cpu;
46+
47+
for_each_possible_cpu(cpu)
48+
per_cpu(prng_state, cpu) = get_random_u32();
49+
}
50+
51+
/*
52+
* If a preemption happens between this_cpu_read and this_cpu_write, the only
53+
* side effect is that we'll give a few allocated in different contexts objects
54+
* the same tag. Since tag-based KASAN is meant to be used a probabilistic
55+
* bug-detection debug feature, this doesn't have significant negative impact.
56+
*
57+
* Ideally the tags use strong randomness to prevent any attempts to predict
58+
* them during explicit exploit attempts. But strong randomness is expensive,
59+
* and we did an intentional trade-off to use a PRNG. This non-atomic RMW
60+
* sequence has in fact positive effect, since interrupts that randomly skew
61+
* PRNG at unpredictable points do only good.
62+
*/
63+
u8 random_tag(void)
64+
{
65+
u32 state = this_cpu_read(prng_state);
66+
67+
state = 1664525 * state + 1013904223;
68+
this_cpu_write(prng_state, state);
69+
70+
return (u8)(state % (KASAN_TAG_MAX + 1));
71+
}
72+
73+
void *kasan_reset_tag(const void *addr)
74+
{
75+
return reset_tag(addr);
76+
}
77+
4178
void check_memory_region(unsigned long addr, size_t size, bool write,
4279
unsigned long ret_ip)
4380
{

0 commit comments

Comments
 (0)