Skip to content

Commit 4c5c6aa

Browse files
committed
netfilter: nft_set_pipapo: prevent overflow in lookup table allocation
When calculating the lookup table size, ensure the following multiplication does not overflow: - desc->field_len[] maximum value is U8_MAX multiplied by NFT_PIPAPO_GROUPS_PER_BYTE(f) that can be 2, worst case. - NFT_PIPAPO_BUCKETS(f->bb) is 2^8, worst case. - sizeof(unsigned long), from sizeof(*f->lt), lt in struct nft_pipapo_field. Then, use check_mul_overflow() to multiply by bucket size and then use check_add_overflow() to the alignment for avx2 (if needed). Finally, add lt_size_check_overflow() helper and use it to consolidate this. While at it, replace leftover allocation using the GFP_KERNEL to GFP_KERNEL_ACCOUNT for consistency, in pipapo_resize(). Fixes: 3c4287f ("nf_tables: Add set type for arbitrary concatenation of ranges") Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> Reviewed-by: Stefano Brivio <sbrivio@redhat.com> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
1 parent 5e4d107 commit 4c5c6aa

File tree

1 file changed

+44
-14
lines changed

1 file changed

+44
-14
lines changed

net/netfilter/nft_set_pipapo.c

Lines changed: 44 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -683,6 +683,30 @@ static int pipapo_realloc_mt(struct nft_pipapo_field *f,
683683
return 0;
684684
}
685685

686+
687+
/**
688+
* lt_calculate_size() - Get storage size for lookup table with overflow check
689+
* @groups: Amount of bit groups
690+
* @bb: Number of bits grouped together in lookup table buckets
691+
* @bsize: Size of each bucket in lookup table, in longs
692+
*
693+
* Return: allocation size including alignment overhead, negative on overflow
694+
*/
695+
static ssize_t lt_calculate_size(unsigned int groups, unsigned int bb,
696+
unsigned int bsize)
697+
{
698+
ssize_t ret = groups * NFT_PIPAPO_BUCKETS(bb) * sizeof(long);
699+
700+
if (check_mul_overflow(ret, bsize, &ret))
701+
return -1;
702+
if (check_add_overflow(ret, NFT_PIPAPO_ALIGN_HEADROOM, &ret))
703+
return -1;
704+
if (ret > INT_MAX)
705+
return -1;
706+
707+
return ret;
708+
}
709+
686710
/**
687711
* pipapo_resize() - Resize lookup or mapping table, or both
688712
* @f: Field containing lookup and mapping tables
@@ -701,6 +725,7 @@ static int pipapo_resize(struct nft_pipapo_field *f,
701725
long *new_lt = NULL, *new_p, *old_lt = f->lt, *old_p;
702726
unsigned int new_bucket_size, copy;
703727
int group, bucket, err;
728+
ssize_t lt_size;
704729

705730
if (rules >= NFT_PIPAPO_RULE0_MAX)
706731
return -ENOSPC;
@@ -719,10 +744,11 @@ static int pipapo_resize(struct nft_pipapo_field *f,
719744
else
720745
copy = new_bucket_size;
721746

722-
new_lt = kvzalloc(f->groups * NFT_PIPAPO_BUCKETS(f->bb) *
723-
new_bucket_size * sizeof(*new_lt) +
724-
NFT_PIPAPO_ALIGN_HEADROOM,
725-
GFP_KERNEL);
747+
lt_size = lt_calculate_size(f->groups, f->bb, new_bucket_size);
748+
if (lt_size < 0)
749+
return -ENOMEM;
750+
751+
new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT);
726752
if (!new_lt)
727753
return -ENOMEM;
728754

@@ -907,7 +933,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
907933
{
908934
unsigned int groups, bb;
909935
unsigned long *new_lt;
910-
size_t lt_size;
936+
ssize_t lt_size;
911937

912938
lt_size = f->groups * NFT_PIPAPO_BUCKETS(f->bb) * f->bsize *
913939
sizeof(*f->lt);
@@ -917,15 +943,17 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
917943
groups = f->groups * 2;
918944
bb = NFT_PIPAPO_GROUP_BITS_LARGE_SET;
919945

920-
lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
921-
sizeof(*f->lt);
946+
lt_size = lt_calculate_size(groups, bb, f->bsize);
947+
if (lt_size < 0)
948+
return;
922949
} else if (f->bb == NFT_PIPAPO_GROUP_BITS_LARGE_SET &&
923950
lt_size < NFT_PIPAPO_LT_SIZE_LOW) {
924951
groups = f->groups / 2;
925952
bb = NFT_PIPAPO_GROUP_BITS_SMALL_SET;
926953

927-
lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
928-
sizeof(*f->lt);
954+
lt_size = lt_calculate_size(groups, bb, f->bsize);
955+
if (lt_size < 0)
956+
return;
929957

930958
/* Don't increase group width if the resulting lookup table size
931959
* would exceed the upper size threshold for a "small" set.
@@ -936,7 +964,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
936964
return;
937965
}
938966

939-
new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL_ACCOUNT);
967+
new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT);
940968
if (!new_lt)
941969
return;
942970

@@ -1451,13 +1479,15 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
14511479

14521480
for (i = 0; i < old->field_count; i++) {
14531481
unsigned long *new_lt;
1482+
ssize_t lt_size;
14541483

14551484
memcpy(dst, src, offsetof(struct nft_pipapo_field, lt));
14561485

1457-
new_lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS(src->bb) *
1458-
src->bsize * sizeof(*dst->lt) +
1459-
NFT_PIPAPO_ALIGN_HEADROOM,
1460-
GFP_KERNEL_ACCOUNT);
1486+
lt_size = lt_calculate_size(src->groups, src->bb, src->bsize);
1487+
if (lt_size < 0)
1488+
goto out_lt;
1489+
1490+
new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT);
14611491
if (!new_lt)
14621492
goto out_lt;
14631493

0 commit comments

Comments
 (0)