26
26
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
27
27
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
28
28
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
29
+ #define FP_BOTTOM (MAX_BPF_JIT_REG + 4)
29
30
30
31
#define check_imm (bits , imm ) do { \
31
32
if ((((imm) > 0) && ((imm) >> (bits))) || \
@@ -63,6 +64,7 @@ static const int bpf2a64[] = {
63
64
[TCALL_CNT ] = A64_R (26 ),
64
65
/* temporary register for blinding constants */
65
66
[BPF_REG_AX ] = A64_R (9 ),
67
+ [FP_BOTTOM ] = A64_R (27 ),
66
68
};
67
69
68
70
struct jit_ctx {
@@ -73,6 +75,7 @@ struct jit_ctx {
73
75
int exentry_idx ;
74
76
__le32 * image ;
75
77
u32 stack_size ;
78
+ int fpb_offset ;
76
79
};
77
80
78
81
static inline void emit (const u32 insn , struct jit_ctx * ctx )
@@ -234,9 +237,9 @@ static noinline bool is_lsi_offset(s16 offset, int scale)
234
237
235
238
/* Tail call offset to jump into */
236
239
#if IS_ENABLED (CONFIG_ARM64_BTI_KERNEL )
237
- #define PROLOGUE_OFFSET 8
240
+ #define PROLOGUE_OFFSET 10
238
241
#else
239
- #define PROLOGUE_OFFSET 7
242
+ #define PROLOGUE_OFFSET 9
240
243
#endif
241
244
242
245
static int build_prologue (struct jit_ctx * ctx , bool ebpf_from_cbpf )
@@ -248,6 +251,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
248
251
const u8 r9 = bpf2a64 [BPF_REG_9 ];
249
252
const u8 fp = bpf2a64 [BPF_REG_FP ];
250
253
const u8 tcc = bpf2a64 [TCALL_CNT ];
254
+ const u8 fpb = bpf2a64 [FP_BOTTOM ];
251
255
const int idx0 = ctx -> idx ;
252
256
int cur_offset ;
253
257
@@ -286,9 +290,11 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
286
290
emit (A64_PUSH (r6 , r7 , A64_SP ), ctx );
287
291
emit (A64_PUSH (r8 , r9 , A64_SP ), ctx );
288
292
emit (A64_PUSH (fp , tcc , A64_SP ), ctx );
293
+ emit (A64_PUSH (fpb , A64_R (28 ), A64_SP ), ctx );
289
294
290
295
/* Set up BPF prog stack base register */
291
296
emit (A64_MOV (1 , fp , A64_SP ), ctx );
297
+ emit (A64_SUB_I (1 , fpb , fp , ctx -> fpb_offset ), ctx );
292
298
293
299
if (!ebpf_from_cbpf ) {
294
300
/* Initialize tail_call_cnt */
@@ -553,11 +559,13 @@ static void build_epilogue(struct jit_ctx *ctx)
553
559
const u8 r8 = bpf2a64 [BPF_REG_8 ];
554
560
const u8 r9 = bpf2a64 [BPF_REG_9 ];
555
561
const u8 fp = bpf2a64 [BPF_REG_FP ];
562
+ const u8 fpb = bpf2a64 [FP_BOTTOM ];
556
563
557
564
/* We're done with BPF stack */
558
565
emit (A64_ADD_I (1 , A64_SP , A64_SP , ctx -> stack_size ), ctx );
559
566
560
567
/* Restore fs (x25) and x26 */
568
+ emit (A64_POP (fpb , A64_R (28 ), A64_SP ), ctx );
561
569
emit (A64_POP (fp , A64_R (26 ), A64_SP ), ctx );
562
570
563
571
/* Restore callee-saved register */
@@ -645,12 +653,14 @@ static int add_exception_handler(const struct bpf_insn *insn,
645
653
static int build_insn (const struct bpf_insn * insn , struct jit_ctx * ctx ,
646
654
bool extra_pass )
647
655
{
656
+ u8 dst = bpf2a64 [insn -> dst_reg ];
657
+ u8 src = bpf2a64 [insn -> src_reg ];
658
+ s16 off = insn -> off ;
659
+ const u8 fp = bpf2a64 [BPF_REG_FP ];
648
660
const u8 code = insn -> code ;
649
- const u8 dst = bpf2a64 [insn -> dst_reg ];
650
- const u8 src = bpf2a64 [insn -> src_reg ];
651
661
const u8 tmp = bpf2a64 [TMP_REG_1 ];
652
662
const u8 tmp2 = bpf2a64 [TMP_REG_2 ];
653
- const s16 off = insn -> off ;
663
+ const u8 fpb = bpf2a64 [ FP_BOTTOM ] ;
654
664
const s32 imm = insn -> imm ;
655
665
const int i = insn - ctx -> prog -> insnsi ;
656
666
const bool is64 = BPF_CLASS (code ) == BPF_ALU64 ||
@@ -1012,6 +1022,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1012
1022
case BPF_LDX | BPF_PROBE_MEM | BPF_W :
1013
1023
case BPF_LDX | BPF_PROBE_MEM | BPF_H :
1014
1024
case BPF_LDX | BPF_PROBE_MEM | BPF_B :
1025
+ if (ctx -> fpb_offset > 0 && src == fp ) {
1026
+ src = fpb ;
1027
+ off += ctx -> fpb_offset ;
1028
+ }
1015
1029
switch (BPF_SIZE (code )) {
1016
1030
case BPF_W :
1017
1031
if (is_lsi_offset (off , 2 )) {
@@ -1070,6 +1084,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1070
1084
case BPF_ST | BPF_MEM | BPF_H :
1071
1085
case BPF_ST | BPF_MEM | BPF_B :
1072
1086
case BPF_ST | BPF_MEM | BPF_DW :
1087
+ if (ctx -> fpb_offset > 0 && dst == fp ) {
1088
+ dst = fpb ;
1089
+ off += ctx -> fpb_offset ;
1090
+ }
1073
1091
/* Load imm to a register then store it */
1074
1092
emit_a64_mov_i (1 , tmp , imm , ctx );
1075
1093
switch (BPF_SIZE (code )) {
@@ -1113,6 +1131,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1113
1131
case BPF_STX | BPF_MEM | BPF_H :
1114
1132
case BPF_STX | BPF_MEM | BPF_B :
1115
1133
case BPF_STX | BPF_MEM | BPF_DW :
1134
+ if (ctx -> fpb_offset > 0 && dst == fp ) {
1135
+ dst = fpb ;
1136
+ off += ctx -> fpb_offset ;
1137
+ }
1116
1138
switch (BPF_SIZE (code )) {
1117
1139
case BPF_W :
1118
1140
if (is_lsi_offset (off , 2 )) {
@@ -1167,6 +1189,56 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1167
1189
return 0 ;
1168
1190
}
1169
1191
1192
+ /*
1193
+ * Return 0 if FP may change at runtime, otherwise find the minimum negative
1194
+ * offset to FP and converts it to positive number.
1195
+ */
1196
+ static int find_fpb_offset (struct bpf_prog * prog )
1197
+ {
1198
+ int i ;
1199
+ int offset = 0 ;
1200
+
1201
+ for (i = 0 ; i < prog -> len ; i ++ ) {
1202
+ const struct bpf_insn * insn = & prog -> insnsi [i ];
1203
+
1204
+ switch (BPF_CLASS (insn -> code )) {
1205
+ case BPF_STX :
1206
+ case BPF_ST :
1207
+ if (BPF_MODE (insn -> code ) == BPF_ATOMIC ) {
1208
+ if ((insn -> imm == BPF_XCHG ||
1209
+ insn -> imm == (BPF_ADD | BPF_FETCH ) ||
1210
+ insn -> imm == (BPF_AND | BPF_FETCH ) ||
1211
+ insn -> imm == (BPF_OR | BPF_FETCH ) ||
1212
+ insn -> imm == (BPF_XOR | BPF_FETCH )) &&
1213
+ insn -> src_reg == BPF_REG_FP ) {
1214
+ return 0 ;
1215
+ }
1216
+ }
1217
+ if (BPF_MODE (insn -> code ) == BPF_MEM &&
1218
+ insn -> dst_reg == BPF_REG_FP ) {
1219
+ if (insn -> off < offset )
1220
+ offset = insn -> off ;
1221
+ }
1222
+ break ;
1223
+
1224
+ case BPF_JMP32 :
1225
+ case BPF_JMP :
1226
+ break ;
1227
+
1228
+ case BPF_ALU :
1229
+ case BPF_ALU64 :
1230
+ case BPF_LDX :
1231
+ case BPF_LD :
1232
+ default :
1233
+ if (insn -> dst_reg == BPF_REG_FP )
1234
+ return 0 ;
1235
+ }
1236
+ }
1237
+
1238
+ /* safely be converted to a positive 'int', since insn->off is 's16' */
1239
+ return - offset ;
1240
+ }
1241
+
1170
1242
static int build_body (struct jit_ctx * ctx , bool extra_pass )
1171
1243
{
1172
1244
const struct bpf_prog * prog = ctx -> prog ;
@@ -1288,6 +1360,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1288
1360
goto out_off ;
1289
1361
}
1290
1362
1363
+ ctx .fpb_offset = find_fpb_offset (prog );
1364
+
1291
1365
/*
1292
1366
* 1. Initial fake pass to compute ctx->idx and ctx->offset.
1293
1367
*
0 commit comments