Skip to content

Commit 23fb5e9

Browse files
Xu KuohaiNobody
Xu Kuohai
authored and
Nobody
committed
bpf, arm64: adjust the offset of str/ldr(immediate) to positive number
The BPF STX/LDX instruction uses offset relative to the FP to address stack space. Since the BPF_FP locates at the top of the frame, the offset is usually a negative number. However, arm64 str/ldr immediate instruction requires that offset be a positive number. Therefore, this patch tries to convert the offsets. The method is to find the negative offset furthest from the FP firstly. Then add it to the FP, calculate a bottom position, called FPB, and then adjust the offsets in other STR/LDX instructions relative to FPB. FPB is saved using the callee-saved register x27 of arm64 which is not used yet. Before adjusting the offset, the patch checks every instruction to ensure that the FP does not change in run-time. If the FP may change, no offset is adjusted. For example, for the following bpftrace command: bpftrace -e 'kprobe:do_sys_open { printf("opening: %s\n", str(arg1)); }' Without this patch, jited code(fragment): 0: bti c 4: stp x29, x30, [sp, #-16]! 8: mov x29, sp c: stp x19, x20, [sp, #-16]! 10: stp x21, x22, [sp, #-16]! 14: stp x25, x26, [sp, #-16]! 18: mov x25, sp 1c: mov x26, #0x0 // #0 20: bti j 24: sub sp, sp, #0x90 28: add x19, x0, #0x0 2c: mov x0, #0x0 // #0 30: mov x10, #0xffffffffffffff78 // #-136 34: str x0, [x25, x10] 38: mov x10, #0xffffffffffffff80 // #-128 3c: str x0, [x25, x10] 40: mov x10, #0xffffffffffffff88 // #-120 44: str x0, [x25, x10] 48: mov x10, #0xffffffffffffff90 // #-112 4c: str x0, [x25, x10] 50: mov x10, #0xffffffffffffff98 // #-104 54: str x0, [x25, x10] 58: mov x10, #0xffffffffffffffa0 // #-96 5c: str x0, [x25, x10] 60: mov x10, #0xffffffffffffffa8 // #-88 64: str x0, [x25, x10] 68: mov x10, #0xffffffffffffffb0 // #-80 6c: str x0, [x25, x10] 70: mov x10, #0xffffffffffffffb8 // #-72 74: str x0, [x25, x10] 78: mov x10, #0xffffffffffffffc0 // #-64 7c: str x0, [x25, x10] 80: mov x10, #0xffffffffffffffc8 // #-56 84: str x0, [x25, x10] 88: mov x10, #0xffffffffffffffd0 // #-48 8c: str x0, [x25, x10] 90: mov x10, #0xffffffffffffffd8 // #-40 94: str x0, [x25, x10] 98: mov x10, #0xffffffffffffffe0 // #-32 9c: str x0, [x25, x10] a0: mov x10, #0xffffffffffffffe8 // #-24 a4: str x0, [x25, x10] a8: mov x10, #0xfffffffffffffff0 // #-16 ac: str x0, [x25, x10] b0: mov x10, #0xfffffffffffffff8 // #-8 b4: str x0, [x25, x10] b8: mov x10, #0x8 // #8 bc: ldr x2, [x19, x10] [...] With this patch, jited code(fragment): 0: bti c 4: stp x29, x30, [sp, #-16]! 8: mov x29, sp c: stp x19, x20, [sp, #-16]! 10: stp x21, x22, [sp, #-16]! 14: stp x25, x26, [sp, #-16]! 18: stp x27, x28, [sp, #-16]! 1c: mov x25, sp 20: sub x27, x25, #0x88 24: mov x26, #0x0 // #0 28: bti j 2c: sub sp, sp, #0x90 30: add x19, x0, #0x0 34: mov x0, #0x0 // #0 38: str x0, [x27] 3c: str x0, [x27, #8] 40: str x0, [x27, #16] 44: str x0, [x27, #24] 48: str x0, [x27, #32] 4c: str x0, [x27, #40] 50: str x0, [x27, #48] 54: str x0, [x27, #56] 58: str x0, [x27, #64] 5c: str x0, [x27, #72] 60: str x0, [x27, #80] 64: str x0, [x27, #88] 68: str x0, [x27, #96] 6c: str x0, [x27, #104] 70: str x0, [x27, #112] 74: str x0, [x27, #120] 78: str x0, [x27, #128] 7c: ldr x2, [x19, #8] [...] Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
1 parent d8848b3 commit 23fb5e9

File tree

1 file changed

+128
-27
lines changed

1 file changed

+128
-27
lines changed

arch/arm64/net/bpf_jit_comp.c

+128-27
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
2727
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
2828
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
29+
#define FP_BOTTOM (MAX_BPF_JIT_REG + 4)
2930

3031
#define check_imm(bits, imm) do { \
3132
if ((((imm) > 0) && ((imm) >> (bits))) || \
@@ -63,6 +64,7 @@ static const int bpf2a64[] = {
6364
[TCALL_CNT] = A64_R(26),
6465
/* temporary register for blinding constants */
6566
[BPF_REG_AX] = A64_R(9),
67+
[FP_BOTTOM] = A64_R(27),
6668
};
6769

6870
struct jit_ctx {
@@ -73,6 +75,7 @@ struct jit_ctx {
7375
int exentry_idx;
7476
__le32 *image;
7577
u32 stack_size;
78+
int fpb_offset;
7679
};
7780

7881
static inline void emit(const u32 insn, struct jit_ctx *ctx)
@@ -218,7 +221,7 @@ static bool is_addsub_imm(u32 imm)
218221
*
219222
* offset = (u64)imm12 << scale
220223
*/
221-
static bool is_lsi_offset(s16 offset, int scale)
224+
static bool is_lsi_offset(int offset, int scale)
222225
{
223226
if (offset < 0)
224227
return false;
@@ -234,9 +237,9 @@ static bool is_lsi_offset(s16 offset, int scale)
234237

235238
/* Tail call offset to jump into */
236239
#if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)
237-
#define PROLOGUE_OFFSET 8
240+
#define PROLOGUE_OFFSET 10
238241
#else
239-
#define PROLOGUE_OFFSET 7
242+
#define PROLOGUE_OFFSET 9
240243
#endif
241244

242245
static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
@@ -248,6 +251,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
248251
const u8 r9 = bpf2a64[BPF_REG_9];
249252
const u8 fp = bpf2a64[BPF_REG_FP];
250253
const u8 tcc = bpf2a64[TCALL_CNT];
254+
const u8 fpb = bpf2a64[FP_BOTTOM];
251255
const int idx0 = ctx->idx;
252256
int cur_offset;
253257

@@ -286,9 +290,11 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
286290
emit(A64_PUSH(r6, r7, A64_SP), ctx);
287291
emit(A64_PUSH(r8, r9, A64_SP), ctx);
288292
emit(A64_PUSH(fp, tcc, A64_SP), ctx);
293+
emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx);
289294

290295
/* Set up BPF prog stack base register */
291296
emit(A64_MOV(1, fp, A64_SP), ctx);
297+
emit(A64_SUB_I(1, fpb, fp, ctx->fpb_offset), ctx);
292298

293299
if (!ebpf_from_cbpf) {
294300
/* Initialize tail_call_cnt */
@@ -553,10 +559,13 @@ static void build_epilogue(struct jit_ctx *ctx)
553559
const u8 r8 = bpf2a64[BPF_REG_8];
554560
const u8 r9 = bpf2a64[BPF_REG_9];
555561
const u8 fp = bpf2a64[BPF_REG_FP];
562+
const u8 fpb = bpf2a64[FP_BOTTOM];
556563

557564
/* We're done with BPF stack */
558565
emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
559566

567+
/* Restore x27 and x28 */
568+
emit(A64_POP(fpb, A64_R(28), A64_SP), ctx);
560569
/* Restore fs (x25) and x26 */
561570
emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
562571

@@ -650,6 +659,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
650659
const u8 src = bpf2a64[insn->src_reg];
651660
const u8 tmp = bpf2a64[TMP_REG_1];
652661
const u8 tmp2 = bpf2a64[TMP_REG_2];
662+
const u8 fp = bpf2a64[BPF_REG_FP];
663+
const u8 fpb = bpf2a64[FP_BOTTOM];
653664
const s16 off = insn->off;
654665
const s32 imm = insn->imm;
655666
const int i = insn - ctx->prog->insnsi;
@@ -658,6 +669,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
658669
u8 jmp_cond;
659670
s32 jmp_offset;
660671
u32 a64_insn;
672+
u8 src_adj;
673+
u8 dst_adj;
674+
int off_adj;
661675
int ret;
662676

663677
switch (code) {
@@ -1012,34 +1026,41 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
10121026
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
10131027
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
10141028
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1029+
if (ctx->fpb_offset > 0 && src == fp) {
1030+
src_adj = fpb;
1031+
off_adj = off + ctx->fpb_offset;
1032+
} else {
1033+
src_adj = src;
1034+
off_adj = off;
1035+
}
10151036
switch (BPF_SIZE(code)) {
10161037
case BPF_W:
1017-
if (is_lsi_offset(off, 2)) {
1018-
emit(A64_LDR32I(dst, src, off), ctx);
1038+
if (is_lsi_offset(off_adj, 2)) {
1039+
emit(A64_LDR32I(dst, src_adj, off_adj), ctx);
10191040
} else {
10201041
emit_a64_mov_i(1, tmp, off, ctx);
10211042
emit(A64_LDR32(dst, src, tmp), ctx);
10221043
}
10231044
break;
10241045
case BPF_H:
1025-
if (is_lsi_offset(off, 1)) {
1026-
emit(A64_LDRHI(dst, src, off), ctx);
1046+
if (is_lsi_offset(off_adj, 1)) {
1047+
emit(A64_LDRHI(dst, src_adj, off_adj), ctx);
10271048
} else {
10281049
emit_a64_mov_i(1, tmp, off, ctx);
10291050
emit(A64_LDRH(dst, src, tmp), ctx);
10301051
}
10311052
break;
10321053
case BPF_B:
1033-
if (is_lsi_offset(off, 0)) {
1034-
emit(A64_LDRBI(dst, src, off), ctx);
1054+
if (is_lsi_offset(off_adj, 0)) {
1055+
emit(A64_LDRBI(dst, src_adj, off_adj), ctx);
10351056
} else {
10361057
emit_a64_mov_i(1, tmp, off, ctx);
10371058
emit(A64_LDRB(dst, src, tmp), ctx);
10381059
}
10391060
break;
10401061
case BPF_DW:
1041-
if (is_lsi_offset(off, 3)) {
1042-
emit(A64_LDR64I(dst, src, off), ctx);
1062+
if (is_lsi_offset(off_adj, 3)) {
1063+
emit(A64_LDR64I(dst, src_adj, off_adj), ctx);
10431064
} else {
10441065
emit_a64_mov_i(1, tmp, off, ctx);
10451066
emit(A64_LDR64(dst, src, tmp), ctx);
@@ -1070,36 +1091,43 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
10701091
case BPF_ST | BPF_MEM | BPF_H:
10711092
case BPF_ST | BPF_MEM | BPF_B:
10721093
case BPF_ST | BPF_MEM | BPF_DW:
1094+
if (ctx->fpb_offset > 0 && dst == fp) {
1095+
dst_adj = fpb;
1096+
off_adj = off + ctx->fpb_offset;
1097+
} else {
1098+
dst_adj = dst;
1099+
off_adj = off;
1100+
}
10731101
/* Load imm to a register then store it */
10741102
emit_a64_mov_i(1, tmp, imm, ctx);
10751103
switch (BPF_SIZE(code)) {
10761104
case BPF_W:
1077-
if (is_lsi_offset(off, 2)) {
1078-
emit(A64_STR32I(tmp, dst, off), ctx);
1105+
if (is_lsi_offset(off_adj, 2)) {
1106+
emit(A64_STR32I(tmp, dst_adj, off_adj), ctx);
10791107
} else {
10801108
emit_a64_mov_i(1, tmp2, off, ctx);
10811109
emit(A64_STR32(tmp, dst, tmp2), ctx);
10821110
}
10831111
break;
10841112
case BPF_H:
1085-
if (is_lsi_offset(off, 1)) {
1086-
emit(A64_STRHI(tmp, dst, off), ctx);
1113+
if (is_lsi_offset(off_adj, 1)) {
1114+
emit(A64_STRHI(tmp, dst_adj, off_adj), ctx);
10871115
} else {
10881116
emit_a64_mov_i(1, tmp2, off, ctx);
10891117
emit(A64_STRH(tmp, dst, tmp2), ctx);
10901118
}
10911119
break;
10921120
case BPF_B:
1093-
if (is_lsi_offset(off, 0)) {
1094-
emit(A64_STRBI(tmp, dst, off), ctx);
1121+
if (is_lsi_offset(off_adj, 0)) {
1122+
emit(A64_STRBI(tmp, dst_adj, off_adj), ctx);
10951123
} else {
10961124
emit_a64_mov_i(1, tmp2, off, ctx);
10971125
emit(A64_STRB(tmp, dst, tmp2), ctx);
10981126
}
10991127
break;
11001128
case BPF_DW:
1101-
if (is_lsi_offset(off, 3)) {
1102-
emit(A64_STR64I(tmp, dst, off), ctx);
1129+
if (is_lsi_offset(off_adj, 3)) {
1130+
emit(A64_STR64I(tmp, dst_adj, off_adj), ctx);
11031131
} else {
11041132
emit_a64_mov_i(1, tmp2, off, ctx);
11051133
emit(A64_STR64(tmp, dst, tmp2), ctx);
@@ -1113,34 +1141,41 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
11131141
case BPF_STX | BPF_MEM | BPF_H:
11141142
case BPF_STX | BPF_MEM | BPF_B:
11151143
case BPF_STX | BPF_MEM | BPF_DW:
1144+
if (ctx->fpb_offset > 0 && dst == fp) {
1145+
dst_adj = fpb;
1146+
off_adj = off + ctx->fpb_offset;
1147+
} else {
1148+
dst_adj = dst;
1149+
off_adj = off;
1150+
}
11161151
switch (BPF_SIZE(code)) {
11171152
case BPF_W:
1118-
if (is_lsi_offset(off, 2)) {
1119-
emit(A64_STR32I(src, dst, off), ctx);
1153+
if (is_lsi_offset(off_adj, 2)) {
1154+
emit(A64_STR32I(src, dst_adj, off_adj), ctx);
11201155
} else {
11211156
emit_a64_mov_i(1, tmp, off, ctx);
11221157
emit(A64_STR32(src, dst, tmp), ctx);
11231158
}
11241159
break;
11251160
case BPF_H:
1126-
if (is_lsi_offset(off, 1)) {
1127-
emit(A64_STRHI(src, dst, off), ctx);
1161+
if (is_lsi_offset(off_adj, 1)) {
1162+
emit(A64_STRHI(src, dst_adj, off_adj), ctx);
11281163
} else {
11291164
emit_a64_mov_i(1, tmp, off, ctx);
11301165
emit(A64_STRH(src, dst, tmp), ctx);
11311166
}
11321167
break;
11331168
case BPF_B:
1134-
if (is_lsi_offset(off, 0)) {
1135-
emit(A64_STRBI(src, dst, off), ctx);
1169+
if (is_lsi_offset(off_adj, 0)) {
1170+
emit(A64_STRBI(src, dst_adj, off_adj), ctx);
11361171
} else {
11371172
emit_a64_mov_i(1, tmp, off, ctx);
11381173
emit(A64_STRB(src, dst, tmp), ctx);
11391174
}
11401175
break;
11411176
case BPF_DW:
1142-
if (is_lsi_offset(off, 3)) {
1143-
emit(A64_STR64I(src, dst, off), ctx);
1177+
if (is_lsi_offset(off_adj, 3)) {
1178+
emit(A64_STR64I(src, dst_adj, off_adj), ctx);
11441179
} else {
11451180
emit_a64_mov_i(1, tmp, off, ctx);
11461181
emit(A64_STR64(src, dst, tmp), ctx);
@@ -1167,6 +1202,70 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
11671202
return 0;
11681203
}
11691204

1205+
/*
1206+
* Return 0 if FP may change at runtime, otherwise find the minimum negative
1207+
* offset to FP and converts it to positive number.
1208+
*/
1209+
static int find_fpb_offset(struct bpf_prog *prog)
1210+
{
1211+
int i;
1212+
int offset = 0;
1213+
1214+
for (i = 0; i < prog->len; i++) {
1215+
const struct bpf_insn *insn = &prog->insnsi[i];
1216+
const u8 class = BPF_CLASS(insn->code);
1217+
const u8 mode = BPF_MODE(insn->code);
1218+
const u8 src = insn->src_reg;
1219+
const u8 dst = insn->dst_reg;
1220+
const s32 imm = insn->imm;
1221+
const s16 off = insn->off;
1222+
1223+
switch (class) {
1224+
case BPF_STX:
1225+
case BPF_ST:
1226+
/* fp holds atomic operation result */
1227+
if (class == BPF_STX && mode == BPF_ATOMIC &&
1228+
((imm == BPF_XCHG ||
1229+
imm == (BPF_FETCH | BPF_ADD) ||
1230+
imm == (BPF_FETCH | BPF_AND) ||
1231+
imm == (BPF_FETCH | BPF_XOR) ||
1232+
imm == (BPF_FETCH | BPF_OR)) &&
1233+
src == BPF_REG_FP))
1234+
return 0;
1235+
1236+
if (mode == BPF_MEM && dst == BPF_REG_FP &&
1237+
off < offset)
1238+
offset = insn->off;
1239+
break;
1240+
1241+
case BPF_JMP32:
1242+
case BPF_JMP:
1243+
break;
1244+
1245+
case BPF_LDX:
1246+
case BPF_LD:
1247+
/* fp holds load result */
1248+
if (dst == BPF_REG_FP)
1249+
return 0;
1250+
1251+
if (class == BPF_LDX && mode == BPF_MEM &&
1252+
src == BPF_REG_FP && off < offset)
1253+
offset = off;
1254+
break;
1255+
1256+
case BPF_ALU:
1257+
case BPF_ALU64:
1258+
default:
1259+
/* fp holds ALU result */
1260+
if (dst == BPF_REG_FP)
1261+
return 0;
1262+
}
1263+
}
1264+
1265+
/* safely be converted to a positive 'int', since insn->off is 's16' */
1266+
return -offset;
1267+
}
1268+
11701269
static int build_body(struct jit_ctx *ctx, bool extra_pass)
11711270
{
11721271
const struct bpf_prog *prog = ctx->prog;
@@ -1288,6 +1387,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
12881387
goto out_off;
12891388
}
12901389

1390+
ctx.fpb_offset = find_fpb_offset(prog);
1391+
12911392
/*
12921393
* 1. Initial fake pass to compute ctx->idx and ctx->offset.
12931394
*

0 commit comments

Comments
 (0)