Skip to content

Commit 81f1d7a

Browse files
Benjamin TissoiresAlexei Starovoitov
authored andcommitted
bpf: wq: add bpf_wq_set_callback_impl
To support sleepable async callbacks, we need to tell push_async_cb() whether the cb is sleepable or not. The verifier now detects that we are in bpf_wq_set_callback_impl and can allow a sleepable callback to happen. Signed-off-by: Benjamin Tissoires <bentiss@kernel.org> Link: https://lore.kernel.org/r/20240420-bpf_wq-v2-13-6c986a5a741f@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent e3d9eac commit 81f1d7a

File tree

3 files changed

+70
-6
lines changed

3 files changed

+70
-6
lines changed

include/linux/bpf_verifier.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -426,6 +426,7 @@ struct bpf_verifier_state {
426426
* while they are still in use.
427427
*/
428428
bool used_as_loop_entry;
429+
bool in_sleepable;
429430

430431
/* first and last insn idx of this verifier state */
431432
u32 first_insn_idx;

kernel/bpf/helpers.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2711,6 +2711,20 @@ __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags)
27112711
return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ);
27122712
}
27132713

2714+
__bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq,
2715+
int (callback_fn)(void *map, int *key, struct bpf_wq *wq),
2716+
unsigned int flags,
2717+
void *aux__ign)
2718+
{
2719+
struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__ign;
2720+
struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
2721+
2722+
if (flags)
2723+
return -EINVAL;
2724+
2725+
return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ);
2726+
}
2727+
27142728
__bpf_kfunc_end_defs();
27152729

27162730
BTF_KFUNCS_START(generic_btf_ids)
@@ -2789,6 +2803,7 @@ BTF_ID_FLAGS(func, bpf_dynptr_size)
27892803
BTF_ID_FLAGS(func, bpf_dynptr_clone)
27902804
BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
27912805
BTF_ID_FLAGS(func, bpf_wq_init)
2806+
BTF_ID_FLAGS(func, bpf_wq_set_callback_impl)
27922807
BTF_KFUNCS_END(common_btf_ids)
27932808

27942809
static const struct btf_kfunc_id_set common_kfunc_set = {

kernel/bpf/verifier.c

Lines changed: 54 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -501,8 +501,12 @@ static bool is_dynptr_ref_function(enum bpf_func_id func_id)
501501
}
502502

503503
static bool is_sync_callback_calling_kfunc(u32 btf_id);
504+
static bool is_async_callback_calling_kfunc(u32 btf_id);
505+
static bool is_callback_calling_kfunc(u32 btf_id);
504506
static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
505507

508+
static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id);
509+
506510
static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
507511
{
508512
return func_id == BPF_FUNC_for_each_map_elem ||
@@ -530,7 +534,8 @@ static bool is_sync_callback_calling_insn(struct bpf_insn *insn)
530534

531535
static bool is_async_callback_calling_insn(struct bpf_insn *insn)
532536
{
533-
return bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm);
537+
return (bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm)) ||
538+
(bpf_pseudo_kfunc_call(insn) && is_async_callback_calling_kfunc(insn->imm));
534539
}
535540

536541
static bool is_may_goto_insn(struct bpf_insn *insn)
@@ -1429,6 +1434,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
14291434
}
14301435
dst_state->speculative = src->speculative;
14311436
dst_state->active_rcu_lock = src->active_rcu_lock;
1437+
dst_state->in_sleepable = src->in_sleepable;
14321438
dst_state->curframe = src->curframe;
14331439
dst_state->active_lock.ptr = src->active_lock.ptr;
14341440
dst_state->active_lock.id = src->active_lock.id;
@@ -2404,7 +2410,7 @@ static void init_func_state(struct bpf_verifier_env *env,
24042410
/* Similar to push_stack(), but for async callbacks */
24052411
static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
24062412
int insn_idx, int prev_insn_idx,
2407-
int subprog)
2413+
int subprog, bool is_sleepable)
24082414
{
24092415
struct bpf_verifier_stack_elem *elem;
24102416
struct bpf_func_state *frame;
@@ -2431,6 +2437,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
24312437
* Initialize it similar to do_check_common().
24322438
*/
24332439
elem->st.branches = 1;
2440+
elem->st.in_sleepable = is_sleepable;
24342441
frame = kzalloc(sizeof(*frame), GFP_KERNEL);
24352442
if (!frame)
24362443
goto err;
@@ -5278,7 +5285,8 @@ static int map_kptr_match_type(struct bpf_verifier_env *env,
52785285

52795286
static bool in_sleepable(struct bpf_verifier_env *env)
52805287
{
5281-
return env->prog->sleepable;
5288+
return env->prog->sleepable ||
5289+
(env->cur_state && env->cur_state->in_sleepable);
52825290
}
52835291

52845292
/* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
@@ -9513,7 +9521,7 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
95139521
*/
95149522
env->subprog_info[subprog].is_cb = true;
95159523
if (bpf_pseudo_kfunc_call(insn) &&
9516-
!is_sync_callback_calling_kfunc(insn->imm)) {
9524+
!is_callback_calling_kfunc(insn->imm)) {
95179525
verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
95189526
func_id_name(insn->imm), insn->imm);
95199527
return -EFAULT;
@@ -9527,10 +9535,11 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
95279535
if (is_async_callback_calling_insn(insn)) {
95289536
struct bpf_verifier_state *async_cb;
95299537

9530-
/* there is no real recursion here. timer callbacks are async */
9538+
/* there is no real recursion here. timer and workqueue callbacks are async */
95319539
env->subprog_info[subprog].is_async_cb = true;
95329540
async_cb = push_async_cb(env, env->subprog_info[subprog].start,
9533-
insn_idx, subprog);
9541+
insn_idx, subprog,
9542+
is_bpf_wq_set_callback_impl_kfunc(insn->imm));
95349543
if (!async_cb)
95359544
return -EFAULT;
95369545
callee = async_cb->frame[0];
@@ -11017,6 +11026,7 @@ enum special_kfunc_type {
1101711026
KF_bpf_percpu_obj_new_impl,
1101811027
KF_bpf_percpu_obj_drop_impl,
1101911028
KF_bpf_throw,
11029+
KF_bpf_wq_set_callback_impl,
1102011030
KF_bpf_iter_css_task_new,
1102111031
};
1102211032

@@ -11041,6 +11051,7 @@ BTF_ID(func, bpf_dynptr_clone)
1104111051
BTF_ID(func, bpf_percpu_obj_new_impl)
1104211052
BTF_ID(func, bpf_percpu_obj_drop_impl)
1104311053
BTF_ID(func, bpf_throw)
11054+
BTF_ID(func, bpf_wq_set_callback_impl)
1104411055
#ifdef CONFIG_CGROUPS
1104511056
BTF_ID(func, bpf_iter_css_task_new)
1104611057
#endif
@@ -11069,6 +11080,7 @@ BTF_ID(func, bpf_dynptr_clone)
1106911080
BTF_ID(func, bpf_percpu_obj_new_impl)
1107011081
BTF_ID(func, bpf_percpu_obj_drop_impl)
1107111082
BTF_ID(func, bpf_throw)
11083+
BTF_ID(func, bpf_wq_set_callback_impl)
1107211084
#ifdef CONFIG_CGROUPS
1107311085
BTF_ID(func, bpf_iter_css_task_new)
1107411086
#else
@@ -11402,12 +11414,28 @@ static bool is_sync_callback_calling_kfunc(u32 btf_id)
1140211414
return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
1140311415
}
1140411416

11417+
static bool is_async_callback_calling_kfunc(u32 btf_id)
11418+
{
11419+
return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl];
11420+
}
11421+
1140511422
static bool is_bpf_throw_kfunc(struct bpf_insn *insn)
1140611423
{
1140711424
return bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
1140811425
insn->imm == special_kfunc_list[KF_bpf_throw];
1140911426
}
1141011427

11428+
static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id)
11429+
{
11430+
return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl];
11431+
}
11432+
11433+
static bool is_callback_calling_kfunc(u32 btf_id)
11434+
{
11435+
return is_sync_callback_calling_kfunc(btf_id) ||
11436+
is_async_callback_calling_kfunc(btf_id);
11437+
}
11438+
1141111439
static bool is_rbtree_lock_required_kfunc(u32 btf_id)
1141211440
{
1141311441
return is_bpf_rbtree_api_kfunc(btf_id);
@@ -12219,6 +12247,16 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
1221912247
}
1222012248
}
1222112249

12250+
if (is_bpf_wq_set_callback_impl_kfunc(meta.func_id)) {
12251+
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
12252+
set_timer_callback_state);
12253+
if (err) {
12254+
verbose(env, "kfunc %s#%d failed callback verification\n",
12255+
func_name, meta.func_id);
12256+
return err;
12257+
}
12258+
}
12259+
1222212260
rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta);
1222312261
rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta);
1222412262

@@ -16968,6 +17006,9 @@ static bool states_equal(struct bpf_verifier_env *env,
1696817006
if (old->active_rcu_lock != cur->active_rcu_lock)
1696917007
return false;
1697017008

17009+
if (old->in_sleepable != cur->in_sleepable)
17010+
return false;
17011+
1697117012
/* for states to be equal callsites have to be the same
1697217013
* and all frame states need to be equivalent
1697317014
*/
@@ -19639,6 +19680,13 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
1963919680
desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
1964019681
insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
1964119682
*cnt = 1;
19683+
} else if (is_bpf_wq_set_callback_impl_kfunc(desc->func_id)) {
19684+
struct bpf_insn ld_addrs[2] = { BPF_LD_IMM64(BPF_REG_4, (long)env->prog->aux) };
19685+
19686+
insn_buf[0] = ld_addrs[0];
19687+
insn_buf[1] = ld_addrs[1];
19688+
insn_buf[2] = *insn;
19689+
*cnt = 3;
1964219690
}
1964319691
return 0;
1964419692
}

0 commit comments

Comments
 (0)