Skip to content

Commit 874be05

Browse files
Johan Almbladhborkmann
authored andcommitted
bpf, tests: Add tail call test suite
While BPF_CALL instructions were tested implicitly by the cBPF-to-eBPF translation, there has not been any tests for BPF_TAIL_CALL instructions. The new test suite includes tests for tail call chaining, tail call count tracking and error paths. It is mainly intended for JIT development and testing. Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Yonghong Song <yhs@fb.com> Link: https://lore.kernel.org/bpf/20210809091829.810076-15-johan.almbladh@anyfinetworks.com
1 parent 6a3b24c commit 874be05

File tree

1 file changed

+248
-0
lines changed

1 file changed

+248
-0
lines changed

lib/test_bpf.c

Lines changed: 248 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8989,8 +8989,248 @@ static __init int test_bpf(void)
89898989
return err_cnt ? -EINVAL : 0;
89908990
}
89918991

8992+
struct tail_call_test {
8993+
const char *descr;
8994+
struct bpf_insn insns[MAX_INSNS];
8995+
int result;
8996+
int stack_depth;
8997+
};
8998+
8999+
/*
9000+
* Magic marker used in test snippets for tail calls below.
9001+
* BPF_LD/MOV to R2 and R2 with this immediate value is replaced
9002+
* with the proper values by the test runner.
9003+
*/
9004+
#define TAIL_CALL_MARKER 0x7a11ca11
9005+
9006+
/* Special offset to indicate a NULL call target */
9007+
#define TAIL_CALL_NULL 0x7fff
9008+
9009+
/* Special offset to indicate an out-of-range index */
9010+
#define TAIL_CALL_INVALID 0x7ffe
9011+
9012+
#define TAIL_CALL(offset) \
9013+
BPF_LD_IMM64(R2, TAIL_CALL_MARKER), \
9014+
BPF_RAW_INSN(BPF_ALU | BPF_MOV | BPF_K, R3, 0, \
9015+
offset, TAIL_CALL_MARKER), \
9016+
BPF_JMP_IMM(BPF_TAIL_CALL, 0, 0, 0)
9017+
9018+
/*
9019+
* Tail call tests. Each test case may call any other test in the table,
9020+
* including itself, specified as a relative index offset from the calling
9021+
* test. The index TAIL_CALL_NULL can be used to specify a NULL target
9022+
* function to test the JIT error path. Similarly, the index TAIL_CALL_INVALID
9023+
* results in a target index that is out of range.
9024+
*/
9025+
static struct tail_call_test tail_call_tests[] = {
9026+
{
9027+
"Tail call leaf",
9028+
.insns = {
9029+
BPF_ALU64_REG(BPF_MOV, R0, R1),
9030+
BPF_ALU64_IMM(BPF_ADD, R0, 1),
9031+
BPF_EXIT_INSN(),
9032+
},
9033+
.result = 1,
9034+
},
9035+
{
9036+
"Tail call 2",
9037+
.insns = {
9038+
BPF_ALU64_IMM(BPF_ADD, R1, 2),
9039+
TAIL_CALL(-1),
9040+
BPF_ALU64_IMM(BPF_MOV, R0, -1),
9041+
BPF_EXIT_INSN(),
9042+
},
9043+
.result = 3,
9044+
},
9045+
{
9046+
"Tail call 3",
9047+
.insns = {
9048+
BPF_ALU64_IMM(BPF_ADD, R1, 3),
9049+
TAIL_CALL(-1),
9050+
BPF_ALU64_IMM(BPF_MOV, R0, -1),
9051+
BPF_EXIT_INSN(),
9052+
},
9053+
.result = 6,
9054+
},
9055+
{
9056+
"Tail call 4",
9057+
.insns = {
9058+
BPF_ALU64_IMM(BPF_ADD, R1, 4),
9059+
TAIL_CALL(-1),
9060+
BPF_ALU64_IMM(BPF_MOV, R0, -1),
9061+
BPF_EXIT_INSN(),
9062+
},
9063+
.result = 10,
9064+
},
9065+
{
9066+
"Tail call error path, max count reached",
9067+
.insns = {
9068+
BPF_ALU64_IMM(BPF_ADD, R1, 1),
9069+
BPF_ALU64_REG(BPF_MOV, R0, R1),
9070+
TAIL_CALL(0),
9071+
BPF_EXIT_INSN(),
9072+
},
9073+
.result = MAX_TAIL_CALL_CNT + 1,
9074+
},
9075+
{
9076+
"Tail call error path, NULL target",
9077+
.insns = {
9078+
BPF_ALU64_IMM(BPF_MOV, R0, -1),
9079+
TAIL_CALL(TAIL_CALL_NULL),
9080+
BPF_ALU64_IMM(BPF_MOV, R0, 1),
9081+
BPF_EXIT_INSN(),
9082+
},
9083+
.result = 1,
9084+
},
9085+
{
9086+
"Tail call error path, index out of range",
9087+
.insns = {
9088+
BPF_ALU64_IMM(BPF_MOV, R0, -1),
9089+
TAIL_CALL(TAIL_CALL_INVALID),
9090+
BPF_ALU64_IMM(BPF_MOV, R0, 1),
9091+
BPF_EXIT_INSN(),
9092+
},
9093+
.result = 1,
9094+
},
9095+
};
9096+
9097+
static void __init destroy_tail_call_tests(struct bpf_array *progs)
9098+
{
9099+
int i;
9100+
9101+
for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++)
9102+
if (progs->ptrs[i])
9103+
bpf_prog_free(progs->ptrs[i]);
9104+
kfree(progs);
9105+
}
9106+
9107+
static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
9108+
{
9109+
int ntests = ARRAY_SIZE(tail_call_tests);
9110+
struct bpf_array *progs;
9111+
int which, err;
9112+
9113+
/* Allocate the table of programs to be used for tall calls */
9114+
progs = kzalloc(sizeof(*progs) + (ntests + 1) * sizeof(progs->ptrs[0]),
9115+
GFP_KERNEL);
9116+
if (!progs)
9117+
goto out_nomem;
9118+
9119+
/* Create all eBPF programs and populate the table */
9120+
for (which = 0; which < ntests; which++) {
9121+
struct tail_call_test *test = &tail_call_tests[which];
9122+
struct bpf_prog *fp;
9123+
int len, i;
9124+
9125+
/* Compute the number of program instructions */
9126+
for (len = 0; len < MAX_INSNS; len++) {
9127+
struct bpf_insn *insn = &test->insns[len];
9128+
9129+
if (len < MAX_INSNS - 1 &&
9130+
insn->code == (BPF_LD | BPF_DW | BPF_IMM))
9131+
len++;
9132+
if (insn->code == 0)
9133+
break;
9134+
}
9135+
9136+
/* Allocate and initialize the program */
9137+
fp = bpf_prog_alloc(bpf_prog_size(len), 0);
9138+
if (!fp)
9139+
goto out_nomem;
9140+
9141+
fp->len = len;
9142+
fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
9143+
fp->aux->stack_depth = test->stack_depth;
9144+
memcpy(fp->insnsi, test->insns, len * sizeof(struct bpf_insn));
9145+
9146+
/* Relocate runtime tail call offsets and addresses */
9147+
for (i = 0; i < len; i++) {
9148+
struct bpf_insn *insn = &fp->insnsi[i];
9149+
9150+
if (insn->imm != TAIL_CALL_MARKER)
9151+
continue;
9152+
9153+
switch (insn->code) {
9154+
case BPF_LD | BPF_DW | BPF_IMM:
9155+
insn[0].imm = (u32)(long)progs;
9156+
insn[1].imm = ((u64)(long)progs) >> 32;
9157+
break;
9158+
9159+
case BPF_ALU | BPF_MOV | BPF_K:
9160+
if (insn->off == TAIL_CALL_NULL)
9161+
insn->imm = ntests;
9162+
else if (insn->off == TAIL_CALL_INVALID)
9163+
insn->imm = ntests + 1;
9164+
else
9165+
insn->imm = which + insn->off;
9166+
insn->off = 0;
9167+
}
9168+
}
9169+
9170+
fp = bpf_prog_select_runtime(fp, &err);
9171+
if (err)
9172+
goto out_err;
9173+
9174+
progs->ptrs[which] = fp;
9175+
}
9176+
9177+
/* The last entry contains a NULL program pointer */
9178+
progs->map.max_entries = ntests + 1;
9179+
*pprogs = progs;
9180+
return 0;
9181+
9182+
out_nomem:
9183+
err = -ENOMEM;
9184+
9185+
out_err:
9186+
if (progs)
9187+
destroy_tail_call_tests(progs);
9188+
return err;
9189+
}
9190+
9191+
static __init int test_tail_calls(struct bpf_array *progs)
9192+
{
9193+
int i, err_cnt = 0, pass_cnt = 0;
9194+
int jit_cnt = 0, run_cnt = 0;
9195+
9196+
for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) {
9197+
struct tail_call_test *test = &tail_call_tests[i];
9198+
struct bpf_prog *fp = progs->ptrs[i];
9199+
u64 duration;
9200+
int ret;
9201+
9202+
cond_resched();
9203+
9204+
pr_info("#%d %s ", i, test->descr);
9205+
if (!fp) {
9206+
err_cnt++;
9207+
continue;
9208+
}
9209+
pr_cont("jited:%u ", fp->jited);
9210+
9211+
run_cnt++;
9212+
if (fp->jited)
9213+
jit_cnt++;
9214+
9215+
ret = __run_one(fp, NULL, MAX_TESTRUNS, &duration);
9216+
if (ret == test->result) {
9217+
pr_cont("%lld PASS", duration);
9218+
pass_cnt++;
9219+
} else {
9220+
pr_cont("ret %d != %d FAIL", ret, test->result);
9221+
err_cnt++;
9222+
}
9223+
}
9224+
9225+
pr_info("%s: Summary: %d PASSED, %d FAILED, [%d/%d JIT'ed]\n",
9226+
__func__, pass_cnt, err_cnt, jit_cnt, run_cnt);
9227+
9228+
return err_cnt ? -EINVAL : 0;
9229+
}
9230+
89929231
static int __init test_bpf_init(void)
89939232
{
9233+
struct bpf_array *progs = NULL;
89949234
int ret;
89959235

89969236
ret = prepare_bpf_tests();
@@ -9002,6 +9242,14 @@ static int __init test_bpf_init(void)
90029242
if (ret)
90039243
return ret;
90049244

9245+
ret = prepare_tail_call_tests(&progs);
9246+
if (ret)
9247+
return ret;
9248+
ret = test_tail_calls(progs);
9249+
destroy_tail_call_tests(progs);
9250+
if (ret)
9251+
return ret;
9252+
90059253
return test_skb_segment();
90069254
}
90079255

0 commit comments

Comments
 (0)