Skip to content

Commit 582077c

Browse files
author
Peter Zijlstra
committed
x86/cfi: Clean up linkage
With the introduction of kCFI the addition of ENDBR to SYM_FUNC_START* no longer suffices to make the function indirectly callable. This now requires the use of SYM_TYPED_FUNC_START. As such, remove the implicit ENDBR from SYM_FUNC_START* and add some explicit annotations to fix things up again. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Sami Tolvanen <samitolvanen@google.com> Link: https://lore.kernel.org/r/20250207122546.409116003@infradead.org
1 parent 2981557 commit 582077c

29 files changed

+103
-23
lines changed

arch/x86/crypto/aesni-intel_asm.S

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
*/
1818

1919
#include <linux/linkage.h>
20+
#include <linux/objtool.h>
2021
#include <asm/frame.h>
2122

2223
#define STATE1 %xmm0
@@ -1071,6 +1072,7 @@ SYM_FUNC_END(_aesni_inc)
10711072
* size_t len, u8 *iv)
10721073
*/
10731074
SYM_FUNC_START(aesni_ctr_enc)
1075+
ANNOTATE_NOENDBR
10741076
FRAME_BEGIN
10751077
cmp $16, LEN
10761078
jb .Lctr_enc_just_ret

arch/x86/entry/calling.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -431,6 +431,7 @@ For 32-bit we have the following conventions - kernel is built with
431431
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
432432
.macro THUNK name, func
433433
SYM_FUNC_START(\name)
434+
ANNOTATE_NOENDBR
434435
pushq %rbp
435436
movq %rsp, %rbp
436437

arch/x86/entry/entry.S

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55

66
#include <linux/export.h>
77
#include <linux/linkage.h>
8+
#include <linux/objtool.h>
89
#include <asm/msr-index.h>
910
#include <asm/unwind_hints.h>
1011
#include <asm/segment.h>
@@ -17,6 +18,7 @@
1718
.pushsection .noinstr.text, "ax"
1819

1920
SYM_FUNC_START(entry_ibpb)
21+
ANNOTATE_NOENDBR
2022
movl $MSR_IA32_PRED_CMD, %ecx
2123
movl $PRED_CMD_IBPB, %eax
2224
xorl %edx, %edx

arch/x86/entry/entry_64.S

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -175,6 +175,7 @@ SYM_CODE_END(entry_SYSCALL_64)
175175
*/
176176
.pushsection .text, "ax"
177177
SYM_FUNC_START(__switch_to_asm)
178+
ANNOTATE_NOENDBR
178179
/*
179180
* Save callee-saved registers
180181
* This must match the order in inactive_task_frame
@@ -742,6 +743,7 @@ _ASM_NOKPROBE(common_interrupt_return)
742743
* Is in entry.text as it shouldn't be instrumented.
743744
*/
744745
SYM_FUNC_START(asm_load_gs_index)
746+
ANNOTATE_NOENDBR
745747
FRAME_BEGIN
746748
swapgs
747749
.Lgs_change:
@@ -1526,6 +1528,7 @@ SYM_CODE_END(rewind_stack_and_make_dead)
15261528
* refactored in the future if needed.
15271529
*/
15281530
SYM_FUNC_START(clear_bhb_loop)
1531+
ANNOTATE_NOENDBR
15291532
push %rbp
15301533
mov %rsp, %rbp
15311534
movl $5, %ecx

arch/x86/entry/entry_64_fred.S

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ SYM_CODE_END(asm_fred_entrypoint_kernel)
5858

5959
#if IS_ENABLED(CONFIG_KVM_INTEL)
6060
SYM_FUNC_START(asm_fred_entry_from_kvm)
61+
ANNOTATE_NOENDBR
6162
push %rbp
6263
mov %rsp, %rbp
6364

arch/x86/entry/vdso/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,7 @@ KBUILD_CFLAGS_32 += -fno-stack-protector
133133
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
134134
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
135135
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
136+
KBUILD_CFLAGS_32 += -DBUILD_VDSO
136137

137138
ifdef CONFIG_MITIGATION_RETPOLINE
138139
ifneq ($(RETPOLINE_VDSO_CFLAGS),)

arch/x86/include/asm/linkage.h

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -119,33 +119,27 @@
119119

120120
/* SYM_FUNC_START -- use for global functions */
121121
#define SYM_FUNC_START(name) \
122-
SYM_START(name, SYM_L_GLOBAL, SYM_F_ALIGN) \
123-
ENDBR
122+
SYM_START(name, SYM_L_GLOBAL, SYM_F_ALIGN)
124123

125124
/* SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment */
126125
#define SYM_FUNC_START_NOALIGN(name) \
127-
SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) \
128-
ENDBR
126+
SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
129127

130128
/* SYM_FUNC_START_LOCAL -- use for local functions */
131129
#define SYM_FUNC_START_LOCAL(name) \
132-
SYM_START(name, SYM_L_LOCAL, SYM_F_ALIGN) \
133-
ENDBR
130+
SYM_START(name, SYM_L_LOCAL, SYM_F_ALIGN)
134131

135132
/* SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o alignment */
136133
#define SYM_FUNC_START_LOCAL_NOALIGN(name) \
137-
SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) \
138-
ENDBR
134+
SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
139135

140136
/* SYM_FUNC_START_WEAK -- use for weak functions */
141137
#define SYM_FUNC_START_WEAK(name) \
142-
SYM_START(name, SYM_L_WEAK, SYM_F_ALIGN) \
143-
ENDBR
138+
SYM_START(name, SYM_L_WEAK, SYM_F_ALIGN)
144139

145140
/* SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment */
146141
#define SYM_FUNC_START_WEAK_NOALIGN(name) \
147-
SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \
148-
ENDBR
142+
SYM_START(name, SYM_L_WEAK, SYM_A_NONE)
149143

150144
#endif /* _ASM_X86_LINKAGE_H */
151145

arch/x86/include/asm/page_64.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ static inline void clear_page(void *page)
6060
}
6161

6262
void copy_page(void *to, void *from);
63+
KCFI_REFERENCE(copy_page);
6364

6465
#ifdef CONFIG_X86_5LEVEL
6566
/*

arch/x86/include/asm/paravirt_types.h

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,17 @@ extern struct paravirt_patch_template pv_ops;
244244

245245
int paravirt_disable_iospace(void);
246246

247-
/* This generates an indirect call based on the operation type number. */
247+
/*
248+
* This generates an indirect call based on the operation type number.
249+
*
250+
* Since alternatives run after enabling CET/IBT -- the latter setting/clearing
251+
* capabilities and the former requiring all capabilities being finalized --
252+
* these indirect calls are subject to IBT and the paravirt stubs should have
253+
* ENDBR on.
254+
*
255+
* OTOH since this is effectively a __nocfi indirect call, the paravirt stubs
256+
* don't need to bother with CFI prefixes.
257+
*/
248258
#define PARAVIRT_CALL \
249259
ANNOTATE_RETPOLINE_SAFE \
250260
"call *%[paravirt_opptr];"

arch/x86/include/asm/special_insns.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,14 +42,14 @@ static __always_inline void native_write_cr2(unsigned long val)
4242
asm volatile("mov %0,%%cr2": : "r" (val) : "memory");
4343
}
4444

45-
static inline unsigned long __native_read_cr3(void)
45+
static __always_inline unsigned long __native_read_cr3(void)
4646
{
4747
unsigned long val;
4848
asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
4949
return val;
5050
}
5151

52-
static inline void native_write_cr3(unsigned long val)
52+
static __always_inline void native_write_cr3(unsigned long val)
5353
{
5454
asm volatile("mov %0,%%cr3": : "r" (val) : "memory");
5555
}

0 commit comments

Comments
 (0)