Skip to content

Commit 8ad8b72

Browse files
Nick Hupalmer-dabbelt
authored andcommitted
riscv: Add KASAN support
This patch ports the feature Kernel Address SANitizer (KASAN). Note: The start address of shadow memory is at the beginning of kernel space, which is 2^64 - (2^39 / 2) in SV39. The size of the kernel space is 2^38 bytes so the size of shadow memory should be 2^38 / 8. Thus, the shadow memory would not overlap with the fixmap area. There are currently two limitations in this port, 1. RV64 only: KASAN need large address space for extra shadow memory region. 2. KASAN can't debug the modules since the modules are allocated in VMALLOC area. We mapped the shadow memory, which corresponding to VMALLOC area, to the kasan_early_shadow_page because we don't have enough physical space for all the shadow memory corresponding to VMALLOC area. Signed-off-by: Nick Hu <nickhu@andestech.com> Reported-by: Greentime Hu <green.hu@gmail.com> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
1 parent 57ee58e commit 8ad8b72

File tree

12 files changed

+169
-4
lines changed

12 files changed

+169
-4
lines changed

arch/riscv/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ config RISCV
6666
select HAVE_ARCH_MMAP_RND_BITS if MMU
6767
select ARCH_HAS_GCOV_PROFILE_ALL
6868
select HAVE_COPY_THREAD_TLS
69+
select HAVE_ARCH_KASAN if MMU && 64BIT
6970

7071
config ARCH_MMAP_RND_BITS_MIN
7172
default 18 if 64BIT

arch/riscv/include/asm/kasan.h

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/* Copyright (C) 2019 Andes Technology Corporation */
3+
4+
#ifndef __ASM_KASAN_H
5+
#define __ASM_KASAN_H
6+
7+
#ifndef __ASSEMBLY__
8+
9+
#ifdef CONFIG_KASAN
10+
11+
#include <asm/pgtable.h>
12+
13+
#define KASAN_SHADOW_SCALE_SHIFT 3
14+
15+
#define KASAN_SHADOW_SIZE (UL(1) << (38 - KASAN_SHADOW_SCALE_SHIFT))
16+
#define KASAN_SHADOW_START 0xffffffc000000000 /* 2^64 - 2^38 */
17+
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
18+
19+
#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \
20+
(64 - KASAN_SHADOW_SCALE_SHIFT)))
21+
22+
void kasan_init(void);
23+
asmlinkage void kasan_early_init(void);
24+
25+
#endif
26+
#endif
27+
#endif /* __ASM_KASAN_H */

arch/riscv/include/asm/pgtable-64.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,11 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
5858
return (unsigned long)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
5959
}
6060

61+
static inline struct page *pud_page(pud_t pud)
62+
{
63+
return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
64+
}
65+
6166
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
6267

6368
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)

arch/riscv/include/asm/string.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,17 @@
1111

1212
#define __HAVE_ARCH_MEMSET
1313
extern asmlinkage void *memset(void *, int, size_t);
14+
extern asmlinkage void *__memset(void *, int, size_t);
1415

1516
#define __HAVE_ARCH_MEMCPY
1617
extern asmlinkage void *memcpy(void *, const void *, size_t);
18+
extern asmlinkage void *__memcpy(void *, const void *, size_t);
1719

20+
/* For those files which don't want to check by kasan. */
21+
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
22+
23+
#define memcpy(dst, src, len) __memcpy(dst, src, len)
24+
#define memset(s, c, n) __memset(s, c, n)
25+
26+
#endif
1827
#endif /* _ASM_RISCV_STRING_H */

arch/riscv/kernel/head.S

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,9 @@ clear_bss_done:
121121
sw zero, TASK_TI_CPU(tp)
122122
la sp, init_thread_union + THREAD_SIZE
123123

124+
#ifdef CONFIG_KASAN
125+
call kasan_early_init
126+
#endif
124127
/* Start the kernel */
125128
call parse_dtb
126129
tail start_kernel

arch/riscv/kernel/riscv_ksyms.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,3 +11,5 @@
1111
*/
1212
EXPORT_SYMBOL(memset);
1313
EXPORT_SYMBOL(memcpy);
14+
EXPORT_SYMBOL(__memset);
15+
EXPORT_SYMBOL(__memcpy);

arch/riscv/kernel/setup.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
#include <asm/smp.h>
2525
#include <asm/tlbflush.h>
2626
#include <asm/thread_info.h>
27+
#include <asm/kasan.h>
2728

2829
#include "head.h"
2930

@@ -74,6 +75,10 @@ void __init setup_arch(char **cmdline_p)
7475
swiotlb_init(1);
7576
#endif
7677

78+
#ifdef CONFIG_KASAN
79+
kasan_init();
80+
#endif
81+
7782
#ifdef CONFIG_SMP
7883
setup_smp();
7984
#endif

arch/riscv/kernel/vmlinux.lds.S

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ SECTIONS
4646
KPROBES_TEXT
4747
ENTRY_TEXT
4848
IRQENTRY_TEXT
49+
SOFTIRQENTRY_TEXT
4950
*(.fixup)
5051
_etext = .;
5152
}

arch/riscv/lib/memcpy.S

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,8 @@
77
#include <asm/asm.h>
88

99
/* void *memcpy(void *, const void *, size_t) */
10-
ENTRY(memcpy)
10+
ENTRY(__memcpy)
11+
WEAK(memcpy)
1112
move t6, a0 /* Preserve return value */
1213

1314
/* Defer to byte-oriented copy for small sizes */
@@ -104,4 +105,4 @@ ENTRY(memcpy)
104105
bltu a1, a3, 5b
105106
6:
106107
ret
107-
END(memcpy)
108+
END(__memcpy)

arch/riscv/lib/memset.S

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,8 @@
88
#include <asm/asm.h>
99

1010
/* void *memset(void *, int, size_t) */
11-
ENTRY(memset)
11+
ENTRY(__memset)
12+
WEAK(memset)
1213
move t0, a0 /* Preserve return value */
1314

1415
/* Defer to byte-oriented fill for small sizes */
@@ -109,4 +110,4 @@ ENTRY(memset)
109110
bltu t0, a3, 5b
110111
6:
111112
ret
112-
END(memset)
113+
END(__memset)

arch/riscv/mm/Makefile

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,3 +15,9 @@ ifeq ($(CONFIG_MMU),y)
1515
obj-$(CONFIG_SMP) += tlbflush.o
1616
endif
1717
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
18+
obj-$(CONFIG_KASAN) += kasan_init.o
19+
20+
ifdef CONFIG_KASAN
21+
KASAN_SANITIZE_kasan_init.o := n
22+
KASAN_SANITIZE_init.o := n
23+
endif

arch/riscv/mm/kasan_init.c

Lines changed: 104 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
// Copyright (C) 2019 Andes Technology Corporation
3+
4+
#include <linux/pfn.h>
5+
#include <linux/init_task.h>
6+
#include <linux/kasan.h>
7+
#include <linux/kernel.h>
8+
#include <linux/memblock.h>
9+
#include <asm/tlbflush.h>
10+
#include <asm/pgtable.h>
11+
#include <asm/fixmap.h>
12+
13+
extern pgd_t early_pg_dir[PTRS_PER_PGD];
14+
asmlinkage void __init kasan_early_init(void)
15+
{
16+
uintptr_t i;
17+
pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
18+
19+
for (i = 0; i < PTRS_PER_PTE; ++i)
20+
set_pte(kasan_early_shadow_pte + i,
21+
mk_pte(virt_to_page(kasan_early_shadow_page),
22+
PAGE_KERNEL));
23+
24+
for (i = 0; i < PTRS_PER_PMD; ++i)
25+
set_pmd(kasan_early_shadow_pmd + i,
26+
pfn_pmd(PFN_DOWN(__pa((uintptr_t)kasan_early_shadow_pte)),
27+
__pgprot(_PAGE_TABLE)));
28+
29+
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
30+
i += PGDIR_SIZE, ++pgd)
31+
set_pgd(pgd,
32+
pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
33+
__pgprot(_PAGE_TABLE)));
34+
35+
/* init for swapper_pg_dir */
36+
pgd = pgd_offset_k(KASAN_SHADOW_START);
37+
38+
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
39+
i += PGDIR_SIZE, ++pgd)
40+
set_pgd(pgd,
41+
pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
42+
__pgprot(_PAGE_TABLE)));
43+
44+
flush_tlb_all();
45+
}
46+
47+
static void __init populate(void *start, void *end)
48+
{
49+
unsigned long i;
50+
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
51+
unsigned long vend = PAGE_ALIGN((unsigned long)end);
52+
unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
53+
unsigned long n_pmds =
54+
(n_pages % PTRS_PER_PTE) ? n_pages / PTRS_PER_PTE + 1 :
55+
n_pages / PTRS_PER_PTE;
56+
pgd_t *pgd = pgd_offset_k(vaddr);
57+
pmd_t *pmd = memblock_alloc(n_pmds * sizeof(pmd_t), PAGE_SIZE);
58+
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
59+
60+
for (i = 0; i < n_pages; i++) {
61+
phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
62+
63+
set_pte(pte + i, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
64+
}
65+
66+
for (i = 0; i < n_pmds; ++pgd, i += PTRS_PER_PMD)
67+
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(((uintptr_t)(pmd + i)))),
68+
__pgprot(_PAGE_TABLE)));
69+
70+
for (i = 0; i < n_pages; ++pmd, i += PTRS_PER_PTE)
71+
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa((uintptr_t)(pte + i))),
72+
__pgprot(_PAGE_TABLE)));
73+
74+
flush_tlb_all();
75+
memset(start, 0, end - start);
76+
}
77+
78+
void __init kasan_init(void)
79+
{
80+
struct memblock_region *reg;
81+
unsigned long i;
82+
83+
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
84+
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
85+
86+
for_each_memblock(memory, reg) {
87+
void *start = (void *)__va(reg->base);
88+
void *end = (void *)__va(reg->base + reg->size);
89+
90+
if (start >= end)
91+
break;
92+
93+
populate(kasan_mem_to_shadow(start),
94+
kasan_mem_to_shadow(end));
95+
};
96+
97+
for (i = 0; i < PTRS_PER_PTE; i++)
98+
set_pte(&kasan_early_shadow_pte[i],
99+
mk_pte(virt_to_page(kasan_early_shadow_page),
100+
__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)));
101+
102+
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
103+
init_task.kasan_depth = 0;
104+
}

0 commit comments

Comments
 (0)