Skip to content

Commit 90cbac0

Browse files
chleroympe
authored andcommitted
powerpc: Enable KFENCE for PPC32
Add architecture specific implementation details for KFENCE and enable KFENCE for the ppc32 architecture. In particular, this implements the required interface in <asm/kfence.h>. KFENCE requires that attributes for pages from its memory pool can individually be set. Therefore, force the Read/Write linear map to be mapped at page granularity. Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Acked-by: Marco Elver <elver@google.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/8dfe1bd2abde26337c1d8c1ad0acfcc82185e0d5.1614868445.git.christophe.leroy@csgroup.eu
1 parent 0b71b37 commit 90cbac0

File tree

7 files changed

+57
-10
lines changed

7 files changed

+57
-10
lines changed

arch/powerpc/Kconfig

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -185,6 +185,7 @@ config PPC
185185
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
186186
select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14
187187
select HAVE_ARCH_KGDB
188+
select HAVE_ARCH_KFENCE if PPC32
188189
select HAVE_ARCH_MMAP_RND_BITS
189190
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
190191
select HAVE_ARCH_NVRAM_OPS
@@ -786,7 +787,7 @@ config THREAD_SHIFT
786787
config DATA_SHIFT_BOOL
787788
bool "Set custom data alignment"
788789
depends on ADVANCED_OPTIONS
789-
depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC
790+
depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE
790791
depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX)
791792
help
792793
This option allows you to set the kernel data alignment. When
@@ -798,13 +799,13 @@ config DATA_SHIFT_BOOL
798799
config DATA_SHIFT
799800
int "Data shift" if DATA_SHIFT_BOOL
800801
default 24 if STRICT_KERNEL_RWX && PPC64
801-
range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_BOOK3S_32
802-
range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_8xx
802+
range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
803+
range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
803804
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
804-
default 18 if DEBUG_PAGEALLOC && PPC_BOOK3S_32
805+
default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
805806
default 23 if STRICT_KERNEL_RWX && PPC_8xx
806-
default 23 if DEBUG_PAGEALLOC && PPC_8xx && PIN_TLB_DATA
807-
default 19 if DEBUG_PAGEALLOC && PPC_8xx
807+
default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA
808+
default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
808809
default PPC_PAGE_SHIFT
809810
help
810811
On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO.

arch/powerpc/include/asm/kfence.h

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* powerpc KFENCE support.
4+
*
5+
* Copyright (C) 2020 CS GROUP France
6+
*/
7+
8+
#ifndef __ASM_POWERPC_KFENCE_H
9+
#define __ASM_POWERPC_KFENCE_H
10+
11+
#include <linux/mm.h>
12+
#include <asm/pgtable.h>
13+
14+
static inline bool arch_kfence_init_pool(void)
15+
{
16+
return true;
17+
}
18+
19+
static inline bool kfence_protect_page(unsigned long addr, bool protect)
20+
{
21+
pte_t *kpte = virt_to_kpte(addr);
22+
23+
if (protect) {
24+
pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0);
25+
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
26+
} else {
27+
pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
28+
}
29+
30+
return true;
31+
}
32+
33+
#endif /* __ASM_POWERPC_KFENCE_H */

arch/powerpc/mm/book3s32/mmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
162162
unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
163163

164164

165-
if (debug_pagealloc_enabled() || __map_without_bats) {
165+
if (debug_pagealloc_enabled_or_kfence() || __map_without_bats) {
166166
pr_debug_once("Read-Write memory mapped without BATs\n");
167167
if (base >= border)
168168
return base;

arch/powerpc/mm/fault.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
#include <linux/context_tracking.h>
3333
#include <linux/hugetlb.h>
3434
#include <linux/uaccess.h>
35+
#include <linux/kfence.h>
3536

3637
#include <asm/firmware.h>
3738
#include <asm/interrupt.h>
@@ -418,8 +419,12 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
418419
* take a page fault to a kernel address or a page fault to a user
419420
* address outside of dedicated places
420421
*/
421-
if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write)))
422+
if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) {
423+
if (kfence_handle_page_fault(address, is_write, regs))
424+
return 0;
425+
422426
return SIGSEGV;
427+
}
423428

424429
/*
425430
* If we're in an interrupt, have no user context or are running

arch/powerpc/mm/init_32.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,9 @@ static void __init MMU_setup(void)
9797
if (IS_ENABLED(CONFIG_PPC_8xx))
9898
return;
9999

100+
if (IS_ENABLED(CONFIG_KFENCE))
101+
__map_without_ltlbs = 1;
102+
100103
if (debug_pagealloc_enabled())
101104
__map_without_ltlbs = 1;
102105

arch/powerpc/mm/mmu_decl.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -185,3 +185,8 @@ void ptdump_check_wx(void);
185185
#else
186186
static inline void ptdump_check_wx(void) { }
187187
#endif
188+
189+
static inline bool debug_pagealloc_enabled_or_kfence(void)
190+
{
191+
return IS_ENABLED(CONFIG_KFENCE) || debug_pagealloc_enabled();
192+
}

arch/powerpc/mm/nohash/8xx.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
149149
{
150150
unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
151151
unsigned long sinittext = __pa(_sinittext);
152-
bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled();
152+
bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled_or_kfence();
153153
unsigned long boundary = strict_boundary ? sinittext : etext8;
154154
unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
155155

@@ -161,7 +161,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
161161
return 0;
162162

163163
mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
164-
if (debug_pagealloc_enabled()) {
164+
if (debug_pagealloc_enabled_or_kfence()) {
165165
top = boundary;
166166
} else {
167167
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);

0 commit comments

Comments
 (0)