Skip to content

Commit 6e8778f

Browse files
committed
Merge branch 'sched-fixes'
Victor Nogueira says: ==================== net: sched: Fixes for classifiers Four different classifiers (bpf, u32, matchall, and flower) are calling tcf_bind_filter in their callbacks, but arent't undoing it by calling tcf_unbind_filter if their was an error after binding. This patch set fixes all this by calling tcf_unbind_filter in such cases. This set also undoes a refcount decrement in cls_u32 when an update fails under specific conditions which are described in patch #3. v1 -> v2: * Remove blank line after fixes tag * Fix reverse xmas tree issues pointed out by Simon v2 -> v3: * Inline functions cls_bpf_set_parms and fl_set_parms to avoid adding yet another parameter (and a return value at it) to them. * Remove similar fixes for u32 and matchall, which will be sent soon, once we find a way to do the fixes without adding a return parameter to their set_parms functions. v3 -> v4: * Inline mall_set_parms to avoid adding yet another parameter. * Remove set_flags parameter from u32_set_parms and create a separate function for calling tcf_bind_filter and tcf_unbind_filter in case of failure. * Change cover letter title to also encompass refcnt fix for u32 v4 -> v5: * Change back tag to net ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents 0dd1805 + ac177a3 commit 6e8778f

File tree

4 files changed

+143
-138
lines changed

4 files changed

+143
-138
lines changed

net/sched/cls_bpf.c

Lines changed: 47 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -406,66 +406,19 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
406406
return 0;
407407
}
408408

409-
static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
410-
struct cls_bpf_prog *prog, unsigned long base,
411-
struct nlattr **tb, struct nlattr *est, u32 flags,
412-
struct netlink_ext_ack *extack)
413-
{
414-
bool is_bpf, is_ebpf, have_exts = false;
415-
u32 gen_flags = 0;
416-
int ret;
417-
418-
is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
419-
is_ebpf = tb[TCA_BPF_FD];
420-
if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
421-
return -EINVAL;
422-
423-
ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, flags,
424-
extack);
425-
if (ret < 0)
426-
return ret;
427-
428-
if (tb[TCA_BPF_FLAGS]) {
429-
u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
430-
431-
if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
432-
return -EINVAL;
433-
434-
have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
435-
}
436-
if (tb[TCA_BPF_FLAGS_GEN]) {
437-
gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
438-
if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
439-
!tc_flags_valid(gen_flags))
440-
return -EINVAL;
441-
}
442-
443-
prog->exts_integrated = have_exts;
444-
prog->gen_flags = gen_flags;
445-
446-
ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
447-
cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
448-
if (ret < 0)
449-
return ret;
450-
451-
if (tb[TCA_BPF_CLASSID]) {
452-
prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
453-
tcf_bind_filter(tp, &prog->res, base);
454-
}
455-
456-
return 0;
457-
}
458-
459409
static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
460410
struct tcf_proto *tp, unsigned long base,
461411
u32 handle, struct nlattr **tca,
462412
void **arg, u32 flags,
463413
struct netlink_ext_ack *extack)
464414
{
465415
struct cls_bpf_head *head = rtnl_dereference(tp->root);
416+
bool is_bpf, is_ebpf, have_exts = false;
466417
struct cls_bpf_prog *oldprog = *arg;
467418
struct nlattr *tb[TCA_BPF_MAX + 1];
419+
bool bound_to_filter = false;
468420
struct cls_bpf_prog *prog;
421+
u32 gen_flags = 0;
469422
int ret;
470423

471424
if (tca[TCA_OPTIONS] == NULL)
@@ -504,11 +457,51 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
504457
goto errout;
505458
prog->handle = handle;
506459

507-
ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], flags,
508-
extack);
460+
is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
461+
is_ebpf = tb[TCA_BPF_FD];
462+
if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
463+
ret = -EINVAL;
464+
goto errout_idr;
465+
}
466+
467+
ret = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &prog->exts,
468+
flags, extack);
469+
if (ret < 0)
470+
goto errout_idr;
471+
472+
if (tb[TCA_BPF_FLAGS]) {
473+
u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
474+
475+
if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
476+
ret = -EINVAL;
477+
goto errout_idr;
478+
}
479+
480+
have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
481+
}
482+
if (tb[TCA_BPF_FLAGS_GEN]) {
483+
gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
484+
if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
485+
!tc_flags_valid(gen_flags)) {
486+
ret = -EINVAL;
487+
goto errout_idr;
488+
}
489+
}
490+
491+
prog->exts_integrated = have_exts;
492+
prog->gen_flags = gen_flags;
493+
494+
ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
495+
cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
509496
if (ret < 0)
510497
goto errout_idr;
511498

499+
if (tb[TCA_BPF_CLASSID]) {
500+
prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
501+
tcf_bind_filter(tp, &prog->res, base);
502+
bound_to_filter = true;
503+
}
504+
512505
ret = cls_bpf_offload(tp, prog, oldprog, extack);
513506
if (ret)
514507
goto errout_parms;
@@ -530,6 +523,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
530523
return 0;
531524

532525
errout_parms:
526+
if (bound_to_filter)
527+
tcf_unbind_filter(tp, &prog->res);
533528
cls_bpf_free_parms(prog);
534529
errout_idr:
535530
if (!oldprog)

net/sched/cls_flower.c

Lines changed: 47 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -2173,53 +2173,6 @@ static bool fl_needs_tc_skb_ext(const struct fl_flow_key *mask)
21732173
return mask->meta.l2_miss;
21742174
}
21752175

2176-
static int fl_set_parms(struct net *net, struct tcf_proto *tp,
2177-
struct cls_fl_filter *f, struct fl_flow_mask *mask,
2178-
unsigned long base, struct nlattr **tb,
2179-
struct nlattr *est,
2180-
struct fl_flow_tmplt *tmplt,
2181-
u32 flags, u32 fl_flags,
2182-
struct netlink_ext_ack *extack)
2183-
{
2184-
int err;
2185-
2186-
err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags,
2187-
fl_flags, extack);
2188-
if (err < 0)
2189-
return err;
2190-
2191-
if (tb[TCA_FLOWER_CLASSID]) {
2192-
f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
2193-
if (flags & TCA_ACT_FLAGS_NO_RTNL)
2194-
rtnl_lock();
2195-
tcf_bind_filter(tp, &f->res, base);
2196-
if (flags & TCA_ACT_FLAGS_NO_RTNL)
2197-
rtnl_unlock();
2198-
}
2199-
2200-
err = fl_set_key(net, tb, &f->key, &mask->key, extack);
2201-
if (err)
2202-
return err;
2203-
2204-
fl_mask_update_range(mask);
2205-
fl_set_masked_key(&f->mkey, &f->key, mask);
2206-
2207-
if (!fl_mask_fits_tmplt(tmplt, mask)) {
2208-
NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
2209-
return -EINVAL;
2210-
}
2211-
2212-
/* Enable tc skb extension if filter matches on data extracted from
2213-
* this extension.
2214-
*/
2215-
if (fl_needs_tc_skb_ext(&mask->key)) {
2216-
f->needs_tc_skb_ext = 1;
2217-
tc_skb_ext_tc_enable();
2218-
}
2219-
2220-
return 0;
2221-
}
2222-
22232176
static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
22242177
struct cls_fl_filter *fold,
22252178
bool *in_ht)
@@ -2251,6 +2204,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
22512204
struct cls_fl_head *head = fl_head_dereference(tp);
22522205
bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
22532206
struct cls_fl_filter *fold = *arg;
2207+
bool bound_to_filter = false;
22542208
struct cls_fl_filter *fnew;
22552209
struct fl_flow_mask *mask;
22562210
struct nlattr **tb;
@@ -2335,15 +2289,46 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
23352289
if (err < 0)
23362290
goto errout_idr;
23372291

2338-
err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
2339-
tp->chain->tmplt_priv, flags, fnew->flags,
2340-
extack);
2341-
if (err)
2292+
err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE],
2293+
&fnew->exts, flags, fnew->flags,
2294+
extack);
2295+
if (err < 0)
23422296
goto errout_idr;
23432297

2298+
if (tb[TCA_FLOWER_CLASSID]) {
2299+
fnew->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
2300+
if (flags & TCA_ACT_FLAGS_NO_RTNL)
2301+
rtnl_lock();
2302+
tcf_bind_filter(tp, &fnew->res, base);
2303+
if (flags & TCA_ACT_FLAGS_NO_RTNL)
2304+
rtnl_unlock();
2305+
bound_to_filter = true;
2306+
}
2307+
2308+
err = fl_set_key(net, tb, &fnew->key, &mask->key, extack);
2309+
if (err)
2310+
goto unbind_filter;
2311+
2312+
fl_mask_update_range(mask);
2313+
fl_set_masked_key(&fnew->mkey, &fnew->key, mask);
2314+
2315+
if (!fl_mask_fits_tmplt(tp->chain->tmplt_priv, mask)) {
2316+
NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
2317+
err = -EINVAL;
2318+
goto unbind_filter;
2319+
}
2320+
2321+
/* Enable tc skb extension if filter matches on data extracted from
2322+
* this extension.
2323+
*/
2324+
if (fl_needs_tc_skb_ext(&mask->key)) {
2325+
fnew->needs_tc_skb_ext = 1;
2326+
tc_skb_ext_tc_enable();
2327+
}
2328+
23442329
err = fl_check_assign_mask(head, fnew, fold, mask);
23452330
if (err)
2346-
goto errout_idr;
2331+
goto unbind_filter;
23472332

23482333
err = fl_ht_insert_unique(fnew, fold, &in_ht);
23492334
if (err)
@@ -2434,6 +2419,16 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
24342419
fnew->mask->filter_ht_params);
24352420
errout_mask:
24362421
fl_mask_put(head, fnew->mask);
2422+
2423+
unbind_filter:
2424+
if (bound_to_filter) {
2425+
if (flags & TCA_ACT_FLAGS_NO_RTNL)
2426+
rtnl_lock();
2427+
tcf_unbind_filter(tp, &fnew->res);
2428+
if (flags & TCA_ACT_FLAGS_NO_RTNL)
2429+
rtnl_unlock();
2430+
}
2431+
24372432
errout_idr:
24382433
if (!fold)
24392434
idr_remove(&head->handle_idr, fnew->handle);

net/sched/cls_matchall.c

Lines changed: 12 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -159,26 +159,6 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
159159
[TCA_MATCHALL_FLAGS] = { .type = NLA_U32 },
160160
};
161161

162-
static int mall_set_parms(struct net *net, struct tcf_proto *tp,
163-
struct cls_mall_head *head,
164-
unsigned long base, struct nlattr **tb,
165-
struct nlattr *est, u32 flags, u32 fl_flags,
166-
struct netlink_ext_ack *extack)
167-
{
168-
int err;
169-
170-
err = tcf_exts_validate_ex(net, tp, tb, est, &head->exts, flags,
171-
fl_flags, extack);
172-
if (err < 0)
173-
return err;
174-
175-
if (tb[TCA_MATCHALL_CLASSID]) {
176-
head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
177-
tcf_bind_filter(tp, &head->res, base);
178-
}
179-
return 0;
180-
}
181-
182162
static int mall_change(struct net *net, struct sk_buff *in_skb,
183163
struct tcf_proto *tp, unsigned long base,
184164
u32 handle, struct nlattr **tca,
@@ -187,6 +167,7 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
187167
{
188168
struct cls_mall_head *head = rtnl_dereference(tp->root);
189169
struct nlattr *tb[TCA_MATCHALL_MAX + 1];
170+
bool bound_to_filter = false;
190171
struct cls_mall_head *new;
191172
u32 userflags = 0;
192173
int err;
@@ -226,11 +207,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
226207
goto err_alloc_percpu;
227208
}
228209

229-
err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE],
230-
flags, new->flags, extack);
231-
if (err)
210+
err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE],
211+
&new->exts, flags, new->flags, extack);
212+
if (err < 0)
232213
goto err_set_parms;
233214

215+
if (tb[TCA_MATCHALL_CLASSID]) {
216+
new->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
217+
tcf_bind_filter(tp, &new->res, base);
218+
bound_to_filter = true;
219+
}
220+
234221
if (!tc_skip_hw(new->flags)) {
235222
err = mall_replace_hw_filter(tp, new, (unsigned long)new,
236223
extack);
@@ -246,6 +233,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
246233
return 0;
247234

248235
err_replace_hw_filter:
236+
if (bound_to_filter)
237+
tcf_unbind_filter(tp, &new->res);
249238
err_set_parms:
250239
free_percpu(new->pf);
251240
err_alloc_percpu:

0 commit comments

Comments
 (0)