Skip to content

Commit

Permalink
bpf/nat: introduce snat_v*_can_skip() nat and rev_nat
Browse files Browse the repository at this point in the history
In this desire of continuing improving code quality and verifier
complexity. This patch is splitting snat_v*_can_skip into two
functions.

Signed-off-by: Sahid Orentino Ferdjaoui <sahid.ferdjaoui@industrialdiscipline.com>
  • Loading branch information
sahid authored and pchaigno committed Sep 20, 2022
1 parent a0d059e commit e5df587
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 36 deletions.
66 changes: 31 additions & 35 deletions bpf/lib/nat.h
Original file line number Diff line number Diff line change
Expand Up @@ -443,20 +443,21 @@ static __always_inline int snat_v4_rewrite_ingress(struct __ctx_buff *ctx,
}

static __always_inline bool
snat_v4_can_skip(const struct ipv4_nat_target *target,
const struct ipv4_ct_tuple *tuple, enum nat_dir dir,
bool from_endpoint, bool icmp_echoreply)
snat_v4_nat_can_skip(const struct ipv4_nat_target *target, const struct ipv4_ct_tuple *tuple,
bool from_endpoint, bool icmp_echoreply)
{
__u16 dport = bpf_ntohs(tuple->dport), sport = bpf_ntohs(tuple->sport);
__u16 sport = bpf_ntohs(tuple->sport);

if (dir == NAT_DIR_EGRESS &&
((!from_endpoint && !target->src_from_world && sport < NAT_MIN_EGRESS) ||
icmp_echoreply))
return true;
if (dir == NAT_DIR_INGRESS && (dport < target->min_port || dport > target->max_port))
return true;
return (!from_endpoint && !target->src_from_world && sport < NAT_MIN_EGRESS) ||
icmp_echoreply;
}

return false;
static __always_inline bool
snat_v4_rev_nat_can_skip(const struct ipv4_nat_target *target, const struct ipv4_ct_tuple *tuple)
{
__u16 dport = bpf_ntohs(tuple->dport);

return dport < target->min_port || dport > target->max_port;
}

static __always_inline __maybe_unused int snat_v4_create_dsr(struct __ctx_buff *ctx,
Expand Down Expand Up @@ -572,7 +573,7 @@ snat_v4_nat(struct __ctx_buff *ctx, const struct ipv4_nat_target *target,
return NAT_PUNT_TO_STACK;
};

if (snat_v4_can_skip(target, &tuple, NAT_DIR_EGRESS, from_endpoint, icmp_echoreply))
if (snat_v4_nat_can_skip(target, &tuple, from_endpoint, icmp_echoreply))
return NAT_PUNT_TO_STACK;
ret = snat_v4_handle_mapping(ctx, &tuple, &state, &tmp, NAT_DIR_EGRESS, off, target);
if (ret > 0)
Expand All @@ -584,8 +585,7 @@ snat_v4_nat(struct __ctx_buff *ctx, const struct ipv4_nat_target *target,
}

static __always_inline __maybe_unused int
snat_v4_rev_nat(struct __ctx_buff *ctx, const struct ipv4_nat_target *target,
bool from_endpoint)
snat_v4_rev_nat(struct __ctx_buff *ctx, const struct ipv4_nat_target *target)
{
struct icmphdr icmphdr __align_stack_8;
struct ipv4_nat_entry *state, tmp;
Expand All @@ -596,7 +596,6 @@ snat_v4_rev_nat(struct __ctx_buff *ctx, const struct ipv4_nat_target *target,
__be16 sport;
__be16 dport;
} l4hdr;
bool icmp_echoreply = false;
__u64 off;
int ret;

Expand Down Expand Up @@ -633,14 +632,13 @@ snat_v4_rev_nat(struct __ctx_buff *ctx, const struct ipv4_nat_target *target,
} else {
tuple.dport = icmphdr.un.echo.id;
tuple.sport = 0;
icmp_echoreply = true;
}
break;
default:
return NAT_PUNT_TO_STACK;
};

if (snat_v4_can_skip(target, &tuple, NAT_DIR_INGRESS, from_endpoint, icmp_echoreply))
if (snat_v4_rev_nat_can_skip(target, &tuple))
return NAT_PUNT_TO_STACK;
ret = snat_v4_handle_mapping(ctx, &tuple, &state, &tmp, NAT_DIR_INGRESS, off, target);
if (ret > 0)
Expand All @@ -661,8 +659,7 @@ int snat_v4_nat(struct __ctx_buff *ctx __maybe_unused,

static __always_inline __maybe_unused
int snat_v4_rev_nat(struct __ctx_buff *ctx __maybe_unused,
const struct ipv4_nat_target *target __maybe_unused,
bool from_endpoint __maybe_unused)
const struct ipv4_nat_target *target __maybe_unused)
{
return CTX_ACT_OK;
}
Expand Down Expand Up @@ -999,19 +996,20 @@ static __always_inline int snat_v6_rewrite_ingress(struct __ctx_buff *ctx,
}

static __always_inline bool
snat_v6_can_skip(const struct ipv6_nat_target *target,
const struct ipv6_ct_tuple *tuple, enum nat_dir dir,
bool icmp_echoreply)
snat_v6_nat_can_skip(const struct ipv6_nat_target *target, const struct ipv6_ct_tuple *tuple,
bool icmp_echoreply)
{
__u16 dport = bpf_ntohs(tuple->dport), sport = bpf_ntohs(tuple->sport);

if (dir == NAT_DIR_EGRESS &&
((!target->src_from_world && sport < NAT_MIN_EGRESS) ||
icmp_echoreply))
return true;
if (dir == NAT_DIR_INGRESS && (dport < target->min_port || dport > target->max_port))
return true;
return false;
__u16 sport = bpf_ntohs(tuple->sport);

return (!target->src_from_world && sport < NAT_MIN_EGRESS) || icmp_echoreply;
}

static __always_inline bool
snat_v6_rev_nat_can_skip(const struct ipv6_nat_target *target, const struct ipv6_ct_tuple *tuple)
{
__u16 dport = bpf_ntohs(tuple->dport);

return dport < target->min_port || dport > target->max_port;
}

static __always_inline __maybe_unused int snat_v6_create_dsr(struct __ctx_buff *ctx,
Expand Down Expand Up @@ -1140,7 +1138,7 @@ snat_v6_nat(struct __ctx_buff *ctx, const struct ipv6_nat_target *target)
return NAT_PUNT_TO_STACK;
};

if (snat_v6_can_skip(target, &tuple, NAT_DIR_EGRESS, icmp_echoreply))
if (snat_v6_nat_can_skip(target, &tuple, icmp_echoreply))
return NAT_PUNT_TO_STACK;
ret = snat_v6_handle_mapping(ctx, &tuple, &state, &tmp, NAT_DIR_EGRESS, off, target);
if (ret > 0)
Expand All @@ -1166,7 +1164,6 @@ snat_v6_rev_nat(struct __ctx_buff *ctx, const struct ipv6_nat_target *target)
} l4hdr;
__u8 nexthdr;
__u32 off;
bool icmp_echoreply = false;

build_bug_on(sizeof(struct ipv6_nat_entry) > 64);

Expand Down Expand Up @@ -1210,14 +1207,13 @@ snat_v6_rev_nat(struct __ctx_buff *ctx, const struct ipv6_nat_target *target)
} else {
tuple.dport = icmp6hdr.icmp6_dataun.u_echo.identifier;
tuple.sport = 0;
icmp_echoreply = true;
}
break;
default:
return NAT_PUNT_TO_STACK;
};

if (snat_v6_can_skip(target, &tuple, NAT_DIR_INGRESS, icmp_echoreply))
if (snat_v6_rev_nat_can_skip(target, &tuple))
return NAT_PUNT_TO_STACK;
ret = snat_v6_handle_mapping(ctx, &tuple, &state, &tmp, NAT_DIR_INGRESS, off, target);
if (ret > 0)
Expand Down
2 changes: 1 addition & 1 deletion bpf/lib/nodeport.h
Original file line number Diff line number Diff line change
Expand Up @@ -1645,7 +1645,7 @@ int tail_nodeport_nat_ingress_ipv4(struct __ctx_buff *ctx)
*/
target.addr = IPV4_DIRECT_ROUTING;

ret = snat_v4_rev_nat(ctx, &target, false);
ret = snat_v4_rev_nat(ctx, &target);
if (IS_ERR(ret)) {
/* In case of no mapping, recircle back to main path. SNAT is very
* expensive in terms of instructions (since we don't have BPF to
Expand Down

0 comments on commit e5df587

Please sign in to comment.