@@ -501,8 +501,12 @@ static bool is_dynptr_ref_function(enum bpf_func_id func_id)
501501}
502502
503503static bool is_sync_callback_calling_kfunc(u32 btf_id);
504+ static bool is_async_callback_calling_kfunc(u32 btf_id);
505+ static bool is_callback_calling_kfunc(u32 btf_id);
504506static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
505507
508+ static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id);
509+
506510static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
507511{
508512 return func_id == BPF_FUNC_for_each_map_elem ||
@@ -530,7 +534,8 @@ static bool is_sync_callback_calling_insn(struct bpf_insn *insn)
530534
531535static bool is_async_callback_calling_insn(struct bpf_insn *insn)
532536{
533- return bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm);
537+ return (bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm)) ||
538+ (bpf_pseudo_kfunc_call(insn) && is_async_callback_calling_kfunc(insn->imm));
534539}
535540
536541static bool is_may_goto_insn(struct bpf_insn *insn)
@@ -1429,6 +1434,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
14291434 }
14301435 dst_state->speculative = src->speculative;
14311436 dst_state->active_rcu_lock = src->active_rcu_lock;
1437+ dst_state->in_sleepable = src->in_sleepable;
14321438 dst_state->curframe = src->curframe;
14331439 dst_state->active_lock.ptr = src->active_lock.ptr;
14341440 dst_state->active_lock.id = src->active_lock.id;
@@ -2404,7 +2410,7 @@ static void init_func_state(struct bpf_verifier_env *env,
24042410/* Similar to push_stack(), but for async callbacks */
24052411static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
24062412 int insn_idx, int prev_insn_idx,
2407- int subprog)
2413+ int subprog, bool is_sleepable )
24082414{
24092415 struct bpf_verifier_stack_elem *elem;
24102416 struct bpf_func_state *frame;
@@ -2431,6 +2437,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
24312437 * Initialize it similar to do_check_common().
24322438 */
24332439 elem->st.branches = 1;
2440+ elem->st.in_sleepable = is_sleepable;
24342441 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
24352442 if (!frame)
24362443 goto err;
@@ -5278,7 +5285,8 @@ static int map_kptr_match_type(struct bpf_verifier_env *env,
52785285
52795286static bool in_sleepable(struct bpf_verifier_env *env)
52805287{
5281- return env->prog->sleepable;
5288+ return env->prog->sleepable ||
5289+ (env->cur_state && env->cur_state->in_sleepable);
52825290}
52835291
52845292/* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
@@ -9513,7 +9521,7 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
95139521 */
95149522 env->subprog_info[subprog].is_cb = true;
95159523 if (bpf_pseudo_kfunc_call(insn) &&
9516- !is_sync_callback_calling_kfunc (insn->imm)) {
9524+ !is_callback_calling_kfunc (insn->imm)) {
95179525 verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
95189526 func_id_name(insn->imm), insn->imm);
95199527 return -EFAULT;
@@ -9527,10 +9535,11 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
95279535 if (is_async_callback_calling_insn(insn)) {
95289536 struct bpf_verifier_state *async_cb;
95299537
9530- /* there is no real recursion here. timer callbacks are async */
9538+ /* there is no real recursion here. timer and workqueue callbacks are async */
95319539 env->subprog_info[subprog].is_async_cb = true;
95329540 async_cb = push_async_cb(env, env->subprog_info[subprog].start,
9533- insn_idx, subprog);
9541+ insn_idx, subprog,
9542+ is_bpf_wq_set_callback_impl_kfunc(insn->imm));
95349543 if (!async_cb)
95359544 return -EFAULT;
95369545 callee = async_cb->frame[0];
@@ -11017,6 +11026,7 @@ enum special_kfunc_type {
1101711026 KF_bpf_percpu_obj_new_impl,
1101811027 KF_bpf_percpu_obj_drop_impl,
1101911028 KF_bpf_throw,
11029+ KF_bpf_wq_set_callback_impl,
1102011030 KF_bpf_iter_css_task_new,
1102111031};
1102211032
@@ -11041,6 +11051,7 @@ BTF_ID(func, bpf_dynptr_clone)
1104111051BTF_ID(func, bpf_percpu_obj_new_impl)
1104211052BTF_ID(func, bpf_percpu_obj_drop_impl)
1104311053BTF_ID(func, bpf_throw)
11054+ BTF_ID(func, bpf_wq_set_callback_impl)
1104411055#ifdef CONFIG_CGROUPS
1104511056BTF_ID(func, bpf_iter_css_task_new)
1104611057#endif
@@ -11069,6 +11080,7 @@ BTF_ID(func, bpf_dynptr_clone)
1106911080BTF_ID(func, bpf_percpu_obj_new_impl)
1107011081BTF_ID(func, bpf_percpu_obj_drop_impl)
1107111082BTF_ID(func, bpf_throw)
11083+ BTF_ID(func, bpf_wq_set_callback_impl)
1107211084#ifdef CONFIG_CGROUPS
1107311085BTF_ID(func, bpf_iter_css_task_new)
1107411086#else
@@ -11402,12 +11414,28 @@ static bool is_sync_callback_calling_kfunc(u32 btf_id)
1140211414 return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
1140311415}
1140411416
11417+ static bool is_async_callback_calling_kfunc(u32 btf_id)
11418+ {
11419+ return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl];
11420+ }
11421+
1140511422static bool is_bpf_throw_kfunc(struct bpf_insn *insn)
1140611423{
1140711424 return bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
1140811425 insn->imm == special_kfunc_list[KF_bpf_throw];
1140911426}
1141011427
11428+ static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id)
11429+ {
11430+ return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl];
11431+ }
11432+
11433+ static bool is_callback_calling_kfunc(u32 btf_id)
11434+ {
11435+ return is_sync_callback_calling_kfunc(btf_id) ||
11436+ is_async_callback_calling_kfunc(btf_id);
11437+ }
11438+
1141111439static bool is_rbtree_lock_required_kfunc(u32 btf_id)
1141211440{
1141311441 return is_bpf_rbtree_api_kfunc(btf_id);
@@ -12219,6 +12247,16 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
1221912247 }
1222012248 }
1222112249
12250+ if (is_bpf_wq_set_callback_impl_kfunc(meta.func_id)) {
12251+ err = push_callback_call(env, insn, insn_idx, meta.subprogno,
12252+ set_timer_callback_state);
12253+ if (err) {
12254+ verbose(env, "kfunc %s#%d failed callback verification\n",
12255+ func_name, meta.func_id);
12256+ return err;
12257+ }
12258+ }
12259+
1222212260 rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta);
1222312261 rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta);
1222412262
@@ -16968,6 +17006,9 @@ static bool states_equal(struct bpf_verifier_env *env,
1696817006 if (old->active_rcu_lock != cur->active_rcu_lock)
1696917007 return false;
1697017008
17009+ if (old->in_sleepable != cur->in_sleepable)
17010+ return false;
17011+
1697117012 /* for states to be equal callsites have to be the same
1697217013 * and all frame states need to be equivalent
1697317014 */
@@ -19639,6 +19680,13 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
1963919680 desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
1964019681 insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
1964119682 *cnt = 1;
19683+ } else if (is_bpf_wq_set_callback_impl_kfunc(desc->func_id)) {
19684+ struct bpf_insn ld_addrs[2] = { BPF_LD_IMM64(BPF_REG_4, (long)env->prog->aux) };
19685+
19686+ insn_buf[0] = ld_addrs[0];
19687+ insn_buf[1] = ld_addrs[1];
19688+ insn_buf[2] = *insn;
19689+ *cnt = 3;
1964219690 }
1964319691 return 0;
1964419692}
0 commit comments