Skip to content

Commit 5c00ebd

Browse files
committed
netfilter: nft_set_rbtree: fix overlap expiration walk
JIRA: https://issues.redhat.com/browse/RHEL-1720 JIRA: https://issues.redhat.com/browse/RHEL-1721 Upstream Status: commit f718863 commit f718863 Author: Florian Westphal <fw@strlen.de> Date: Thu Jul 20 21:30:05 2023 +0200 netfilter: nft_set_rbtree: fix overlap expiration walk The lazy gc on insert that should remove timed-out entries fails to release the other half of the interval, if any. Can be reproduced with tests/shell/testcases/sets/0044interval_overlap_0 in nftables.git and kmemleak enabled kernel. Second bug is the use of rbe_prev vs. prev pointer. If rbe_prev() returns NULL after at least one iteration, rbe_prev points to element that is not an end interval, hence it should not be removed. Lastly, check the genmask of the end interval if this is active in the current generation. Fixes: c9e6978 ("netfilter: nft_set_rbtree: Switch to node list walk for overlap detection") Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Florian Westphal <fwestpha@redhat.com>
1 parent bf6d3e9 commit 5c00ebd

File tree

1 file changed

+14
-6
lines changed

1 file changed

+14
-6
lines changed

net/netfilter/nft_set_rbtree.c

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -217,29 +217,37 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
217217

218218
static int nft_rbtree_gc_elem(const struct nft_set *__set,
219219
struct nft_rbtree *priv,
220-
struct nft_rbtree_elem *rbe)
220+
struct nft_rbtree_elem *rbe,
221+
u8 genmask)
221222
{
222223
struct nft_set *set = (struct nft_set *)__set;
223224
struct rb_node *prev = rb_prev(&rbe->node);
224-
struct nft_rbtree_elem *rbe_prev = NULL;
225+
struct nft_rbtree_elem *rbe_prev;
225226
struct nft_set_gc_batch *gcb;
226227

227228
gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
228229
if (!gcb)
229230
return -ENOMEM;
230231

231-
/* search for expired end interval coming before this element. */
232+
/* search for end interval coming before this element.
233+
* end intervals don't carry a timeout extension, they
234+
* are coupled with the interval start element.
235+
*/
232236
while (prev) {
233237
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
234-
if (nft_rbtree_interval_end(rbe_prev))
238+
if (nft_rbtree_interval_end(rbe_prev) &&
239+
nft_set_elem_active(&rbe_prev->ext, genmask))
235240
break;
236241

237242
prev = rb_prev(prev);
238243
}
239244

240-
if (rbe_prev) {
245+
if (prev) {
246+
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
247+
241248
rb_erase(&rbe_prev->node, &priv->root);
242249
atomic_dec(&set->nelems);
250+
nft_set_gc_batch_add(gcb, rbe_prev);
243251
}
244252

245253
rb_erase(&rbe->node, &priv->root);
@@ -321,7 +329,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
321329

322330
/* perform garbage collection to avoid bogus overlap reports. */
323331
if (nft_set_elem_expired(&rbe->ext)) {
324-
err = nft_rbtree_gc_elem(set, priv, rbe);
332+
err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
325333
if (err < 0)
326334
return err;
327335

0 commit comments

Comments
 (0)