Skip to content

Commit 72e213a

Browse files
author
Peter Zijlstra
committed
x86/ibt: Clean up is_endbr()
Pretty much every caller of is_endbr() actually wants to test something at an address and ends up doing get_kernel_nofault(). Fold the lot into a more convenient helper. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Sami Tolvanen <samitolvanen@google.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Andrii Nakryiko <andrii@kernel.org> Acked-by: "Masami Hiramatsu (Google)" <mhiramat@kernel.org> Link: https://lore.kernel.org/r/20250207122546.181367417@infradead.org
1 parent 92d2da3 commit 72e213a

File tree

7 files changed

+27
-52
lines changed

7 files changed

+27
-52
lines changed

arch/x86/events/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2844,7 +2844,7 @@ static bool is_uprobe_at_func_entry(struct pt_regs *regs)
28442844
return true;
28452845

28462846
/* endbr64 (64-bit only) */
2847-
if (user_64bit_mode(regs) && is_endbr(*(u32 *)auprobe->insn))
2847+
if (user_64bit_mode(regs) && is_endbr((u32 *)auprobe->insn))
28482848
return true;
28492849

28502850
return false;

arch/x86/include/asm/ftrace.h

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -36,21 +36,9 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
3636

3737
static inline unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip)
3838
{
39-
#ifdef CONFIG_X86_KERNEL_IBT
40-
u32 instr;
41-
42-
/* We want to be extra safe in case entry ip is on the page edge,
43-
* but otherwise we need to avoid get_kernel_nofault()'s overhead.
44-
*/
45-
if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
46-
if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
47-
return fentry_ip;
48-
} else {
49-
instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
50-
}
51-
if (is_endbr(instr))
39+
if (is_endbr((void*)(fentry_ip - ENDBR_INSN_SIZE)))
5240
fentry_ip -= ENDBR_INSN_SIZE;
53-
#endif
41+
5442
return fentry_ip;
5543
}
5644
#define ftrace_get_symaddr(fentry_ip) arch_ftrace_get_symaddr(fentry_ip)

arch/x86/include/asm/ibt.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ static inline __attribute_const__ u32 gen_endbr_poison(void)
6565
return 0x001f0f66; /* osp nopl (%rax) */
6666
}
6767

68-
static inline bool is_endbr(u32 val)
68+
static inline bool __is_endbr(u32 val)
6969
{
7070
if (val == gen_endbr_poison())
7171
return true;
@@ -74,6 +74,7 @@ static inline bool is_endbr(u32 val)
7474
return val == gen_endbr();
7575
}
7676

77+
extern __noendbr bool is_endbr(u32 *val);
7778
extern __noendbr u64 ibt_save(bool disable);
7879
extern __noendbr void ibt_restore(u64 save);
7980

@@ -98,7 +99,7 @@ extern __noendbr void ibt_restore(u64 save);
9899

99100
#define __noendbr
100101

101-
static inline bool is_endbr(u32 val) { return false; }
102+
static inline bool is_endbr(u32 *val) { return false; }
102103

103104
static inline u64 ibt_save(bool disable) { return 0; }
104105
static inline void ibt_restore(u64 save) { }

arch/x86/kernel/alternative.c

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -852,16 +852,24 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
852852

853853
#ifdef CONFIG_X86_KERNEL_IBT
854854

855+
__noendbr bool is_endbr(u32 *val)
856+
{
857+
u32 endbr;
858+
859+
__get_kernel_nofault(&endbr, val, u32, Efault);
860+
return __is_endbr(endbr);
861+
862+
Efault:
863+
return false;
864+
}
865+
855866
static void poison_cfi(void *addr);
856867

857868
static void __init_or_module poison_endbr(void *addr, bool warn)
858869
{
859-
u32 endbr, poison = gen_endbr_poison();
860-
861-
if (WARN_ON_ONCE(get_kernel_nofault(endbr, addr)))
862-
return;
870+
u32 poison = gen_endbr_poison();
863871

864-
if (!is_endbr(endbr)) {
872+
if (!is_endbr(addr)) {
865873
WARN_ON_ONCE(warn);
866874
return;
867875
}
@@ -988,7 +996,7 @@ static u32 cfi_seed __ro_after_init;
988996
static u32 cfi_rehash(u32 hash)
989997
{
990998
hash ^= cfi_seed;
991-
while (unlikely(is_endbr(hash) || is_endbr(-hash))) {
999+
while (unlikely(__is_endbr(hash) || __is_endbr(-hash))) {
9921000
bool lsb = hash & 1;
9931001
hash >>= 1;
9941002
if (lsb)

arch/x86/kernel/kprobes/core.c

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -373,16 +373,7 @@ static bool can_probe(unsigned long paddr)
373373
kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
374374
bool *on_func_entry)
375375
{
376-
u32 insn;
377-
378-
/*
379-
* Since 'addr' is not guaranteed to be safe to access, use
380-
* copy_from_kernel_nofault() to read the instruction:
381-
*/
382-
if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(u32)))
383-
return NULL;
384-
385-
if (is_endbr(insn)) {
376+
if (is_endbr((u32 *)addr)) {
386377
*on_func_entry = !offset || offset == 4;
387378
if (*on_func_entry)
388379
offset = 4;

arch/x86/net/bpf_jit_comp.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -641,7 +641,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
641641
* See emit_prologue(), for IBT builds the trampoline hook is preceded
642642
* with an ENDBR instruction.
643643
*/
644-
if (is_endbr(*(u32 *)ip))
644+
if (is_endbr(ip))
645645
ip += ENDBR_INSN_SIZE;
646646

647647
return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
@@ -3036,7 +3036,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
30363036
/* skip patched call instruction and point orig_call to actual
30373037
* body of the kernel function.
30383038
*/
3039-
if (is_endbr(*(u32 *)orig_call))
3039+
if (is_endbr(orig_call))
30403040
orig_call += ENDBR_INSN_SIZE;
30413041
orig_call += X86_PATCH_SIZE;
30423042
}

kernel/trace/bpf_trace.c

Lines changed: 4 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1038,27 +1038,14 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
10381038
.arg1_type = ARG_PTR_TO_CTX,
10391039
};
10401040

1041-
#ifdef CONFIG_X86_KERNEL_IBT
1042-
static unsigned long get_entry_ip(unsigned long fentry_ip)
1041+
static inline unsigned long get_entry_ip(unsigned long fentry_ip)
10431042
{
1044-
u32 instr;
1045-
1046-
/* We want to be extra safe in case entry ip is on the page edge,
1047-
* but otherwise we need to avoid get_kernel_nofault()'s overhead.
1048-
*/
1049-
if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
1050-
if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
1051-
return fentry_ip;
1052-
} else {
1053-
instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
1054-
}
1055-
if (is_endbr(instr))
1043+
#ifdef CONFIG_X86_KERNEL_IBT
1044+
if (is_endbr((void *)(fentry_ip - ENDBR_INSN_SIZE)))
10561045
fentry_ip -= ENDBR_INSN_SIZE;
1046+
#endif
10571047
return fentry_ip;
10581048
}
1059-
#else
1060-
#define get_entry_ip(fentry_ip) fentry_ip
1061-
#endif
10621049

10631050
BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
10641051
{

0 commit comments

Comments
 (0)