Skip to content

Commit c337f23

Browse files
author
Alexei Starovoitov
committed
Merge branch 'bpf-support-to-track-bpf_jne'
Menglong Dong says: ==================== bpf: support to track BPF_JNE For now, the reg bounds is not handled for BPF_JNE case, which can cause the failure of following case: /* The type of "a" is u32 */ if (a > 0 && a < 100) { /* the range of the register for a is [0, 99], not [1, 99], * and will cause the following error: * * invalid zero-sized read * * as a can be 0. */ bpf_skb_store_bytes(skb, xx, xx, a, 0); } In the code above, "a > 0" will be compiled to "if a == 0 goto xxx". In the TRUE branch, the dst_reg will be marked as known to 0. However, in the fallthrough(FALSE) branch, the dst_reg will not be handled, which makes the [min, max] for a is [0, 99], not [1, 99]. In the 1st patch, we reduce the range of the dst reg if the src reg is a const and is exactly the edge of the dst reg For BPF_JNE. In the 2nd patch, we remove reduplicated s32 casting in "crafted_cases". In the 3rd patch, we just activate the test case for this logic in range_cond(), which is committed by Andrii in the commit 8863238 ("selftests/bpf: BPF register range bounds tester"). In the 4th patch, we convert the case above to a testcase and add it to verifier_bounds.c. Changes since v4: - add the 2nd patch - add "{U32, U32, {0, U32_MAX}, {U32_MAX, U32_MAX}}" that we missed in the 3rd patch - add some comments to the function that we add in the 4th patch - add reg_not_equal_const() in the 4th patch Changes since v3: - do some adjustment to the crafted cases that we added in the 2nd patch - add the 3rd patch Changes since v2: - fix a typo in the subject of the 1st patch - add some comments to the 1st patch, as Eduard advised - add some cases to the "crafted_cases" Changes since v1: - simplify the code in the 1st patch - introduce the 2nd patch for the testing ==================== Link: https://lore.kernel.org/r/20231219134800.1550388-1-menglong8.dong@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2 parents 1728df7 + 463ea64 commit c337f23

File tree

3 files changed

+116
-11
lines changed

3 files changed

+116
-11
lines changed

kernel/bpf/verifier.c

Lines changed: 37 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14336,7 +14336,43 @@ static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state
1433614336
}
1433714337
break;
1433814338
case BPF_JNE:
14339-
/* we don't derive any new information for inequality yet */
14339+
if (!is_reg_const(reg2, is_jmp32))
14340+
swap(reg1, reg2);
14341+
if (!is_reg_const(reg2, is_jmp32))
14342+
break;
14343+
14344+
/* try to recompute the bound of reg1 if reg2 is a const and
14345+
* is exactly the edge of reg1.
14346+
*/
14347+
val = reg_const_value(reg2, is_jmp32);
14348+
if (is_jmp32) {
14349+
/* u32_min_value is not equal to 0xffffffff at this point,
14350+
* because otherwise u32_max_value is 0xffffffff as well,
14351+
* in such a case both reg1 and reg2 would be constants,
14352+
* jump would be predicted and reg_set_min_max() won't
14353+
* be called.
14354+
*
14355+
* Same reasoning works for all {u,s}{min,max}{32,64} cases
14356+
* below.
14357+
*/
14358+
if (reg1->u32_min_value == (u32)val)
14359+
reg1->u32_min_value++;
14360+
if (reg1->u32_max_value == (u32)val)
14361+
reg1->u32_max_value--;
14362+
if (reg1->s32_min_value == (s32)val)
14363+
reg1->s32_min_value++;
14364+
if (reg1->s32_max_value == (s32)val)
14365+
reg1->s32_max_value--;
14366+
} else {
14367+
if (reg1->umin_value == (u64)val)
14368+
reg1->umin_value++;
14369+
if (reg1->umax_value == (u64)val)
14370+
reg1->umax_value--;
14371+
if (reg1->smin_value == (s64)val)
14372+
reg1->smin_value++;
14373+
if (reg1->smax_value == (s64)val)
14374+
reg1->smax_value--;
14375+
}
1434014376
break;
1434114377
case BPF_JSET:
1434214378
if (!is_reg_const(reg2, is_jmp32))

tools/testing/selftests/bpf/prog_tests/reg_bounds.c

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -590,12 +590,7 @@ static void range_cond(enum num_t t, struct range x, struct range y,
590590
*newy = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b));
591591
break;
592592
case OP_NE:
593-
/* generic case, can't derive more information */
594-
*newx = range(t, x.a, x.b);
595-
*newy = range(t, y.a, y.b);
596-
break;
597-
598-
/* below extended logic is not supported by verifier just yet */
593+
/* below logic is supported by the verifier now */
599594
if (x.a == x.b && x.a == y.a) {
600595
/* X is a constant matching left side of Y */
601596
*newx = range(t, x.a, x.b);
@@ -2097,10 +2092,22 @@ static struct subtest_case crafted_cases[] = {
20972092

20982093
{U32, S32, {0, U32_MAX}, {U32_MAX, U32_MAX}},
20992094

2100-
{S32, U64, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}, {(u32)(s32)-255, 0}},
2101-
{S32, S64, {(u32)(s32)S32_MIN, (u32)(s32)-255}, {(u32)(s32)-2, 0}},
2102-
{S32, S64, {0, 1}, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}},
2103-
{S32, U32, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}},
2095+
{S32, U64, {(u32)S32_MIN, (u32)S32_MIN}, {(u32)(s32)-255, 0}},
2096+
{S32, S64, {(u32)S32_MIN, (u32)(s32)-255}, {(u32)(s32)-2, 0}},
2097+
{S32, S64, {0, 1}, {(u32)S32_MIN, (u32)S32_MIN}},
2098+
{S32, U32, {(u32)S32_MIN, (u32)S32_MIN}, {(u32)S32_MIN, (u32)S32_MIN}},
2099+
2100+
/* edge overlap testings for BPF_NE */
2101+
{U64, U64, {0, U64_MAX}, {U64_MAX, U64_MAX}},
2102+
{U64, U64, {0, U64_MAX}, {0, 0}},
2103+
{S64, U64, {S64_MIN, 0}, {S64_MIN, S64_MIN}},
2104+
{S64, U64, {S64_MIN, 0}, {0, 0}},
2105+
{S64, U64, {S64_MIN, S64_MAX}, {S64_MAX, S64_MAX}},
2106+
{U32, U32, {0, U32_MAX}, {0, 0}},
2107+
{U32, U32, {0, U32_MAX}, {U32_MAX, U32_MAX}},
2108+
{S32, U32, {(u32)S32_MIN, 0}, {0, 0}},
2109+
{S32, U32, {(u32)S32_MIN, 0}, {(u32)S32_MIN, (u32)S32_MIN}},
2110+
{S32, U32, {(u32)S32_MIN, S32_MAX}, {S32_MAX, S32_MAX}},
21042111
};
21052112

21062113
/* Go over crafted hard-coded cases. This is fast, so we do it as part of

tools/testing/selftests/bpf/progs/verifier_bounds.c

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1075,4 +1075,66 @@ l0_%=: r0 = 0; \
10751075
: __clobber_all);
10761076
}
10771077

1078+
SEC("tc")
1079+
__description("bounds check with JMP_NE for reg edge")
1080+
__success __retval(0)
1081+
__naked void reg_not_equal_const(void)
1082+
{
1083+
asm volatile (" \
1084+
r6 = r1; \
1085+
r1 = 0; \
1086+
*(u64*)(r10 - 8) = r1; \
1087+
call %[bpf_get_prandom_u32]; \
1088+
r4 = r0; \
1089+
r4 &= 7; \
1090+
if r4 != 0 goto l0_%=; \
1091+
r0 = 0; \
1092+
exit; \
1093+
l0_%=: r1 = r6; \
1094+
r2 = 0; \
1095+
r3 = r10; \
1096+
r3 += -8; \
1097+
r5 = 0; \
1098+
/* The 4th argument of bpf_skb_store_bytes is defined as \
1099+
* ARG_CONST_SIZE, so 0 is not allowed. The 'r4 != 0' \
1100+
* is providing us this exclusion of zero from initial \
1101+
* [0, 7] range. \
1102+
*/ \
1103+
call %[bpf_skb_store_bytes]; \
1104+
r0 = 0; \
1105+
exit; \
1106+
" :
1107+
: __imm(bpf_get_prandom_u32),
1108+
__imm(bpf_skb_store_bytes)
1109+
: __clobber_all);
1110+
}
1111+
1112+
SEC("tc")
1113+
__description("bounds check with JMP_EQ for reg edge")
1114+
__success __retval(0)
1115+
__naked void reg_equal_const(void)
1116+
{
1117+
asm volatile (" \
1118+
r6 = r1; \
1119+
r1 = 0; \
1120+
*(u64*)(r10 - 8) = r1; \
1121+
call %[bpf_get_prandom_u32]; \
1122+
r4 = r0; \
1123+
r4 &= 7; \
1124+
if r4 == 0 goto l0_%=; \
1125+
r1 = r6; \
1126+
r2 = 0; \
1127+
r3 = r10; \
1128+
r3 += -8; \
1129+
r5 = 0; \
1130+
/* Just the same as what we do in reg_not_equal_const() */ \
1131+
call %[bpf_skb_store_bytes]; \
1132+
l0_%=: r0 = 0; \
1133+
exit; \
1134+
" :
1135+
: __imm(bpf_get_prandom_u32),
1136+
__imm(bpf_skb_store_bytes)
1137+
: __clobber_all);
1138+
}
1139+
10781140
char _license[] SEC("license") = "GPL";

0 commit comments

Comments
 (0)