Skip to content

Commit

Permalink
Merge pull request #31 from dynatrace-oss/loopback_filter_on_bpf_level
Browse files Browse the repository at this point in the history
Filtering  loopback traffic on bpf level
  • Loading branch information
pawsten authored Sep 2, 2024
2 parents 089db1b + 4fda739 commit a4cea2a
Show file tree
Hide file tree
Showing 11 changed files with 230 additions and 186 deletions.
1 change: 1 addition & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ endif()

if(ARCHITECTURE STREQUAL "x86_64")
set(ARCH "x86")
add_definitions(-D__TARGET_ARCH_x86)
elseif(ARCHITECTURE STREQUAL "aarch64")
set(ARCH "arm64")
else()
Expand Down
42 changes: 42 additions & 0 deletions bpf_program/metrics_utilities.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,3 +61,45 @@ static void maybe_fix_missing_connection_tuple(enum protocol proto, void* tuple)
struct pid_comm_t p = {.pid = pid, .state = CONN_ACTIVE};
bpf_map_update_elem(map, tuple, &p, BPF_NOEXIST);
}

__attribute__((always_inline))
static bool filter_loopback(const int32_t ip) {
#ifdef __TARGET_ARCH_x86
const uint32_t loopback = 0x0000007f;
return (ip & loopback) == loopback;
#else
const uint32_t loopback = 0x7f000000;
return (htonl(ip) & loopback) == loopback;
#endif
}

__attribute__((always_inline))
static bool filter_ipv4(struct ipv4_tuple_t* ipv4) {
return filter_loopback(ipv4->saddr);
}

__attribute__((always_inline))
static bool isipv4ipv6(uint64_t addr_l, uint64_t addr_h) {
if (addr_h != 0) {
return false;
}

#ifdef __TARGET_ARCH_x86
uint64_t mask = 0x00000000ffff0000;
uint64_t res = addr_l & mask;
#else
uint64_t mask = 0xffff;
uint64_t res = htonl(addr_l) & mask;
#endif
return res == mask;
}

__attribute__((always_inline))
static bool filter_ipv6(const struct ipv6_tuple_t* key) {
if (isipv4ipv6(key->saddr_l, key->daddr_h)) {
uint32_t ipv4 = (uint32_t)(key->saddr_l >> 32);
return filter_loopback(ipv4);
}
const uint64_t loopback = 0xffffffff00000000;
return ((key->saddr_l & loopback) == key->saddr_l);
}
115 changes: 58 additions & 57 deletions bpf_program/probes/connections.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#include "bpf_helpers.h"
#include "log.h"
#include "maps.h"
#include "metrics_utilities.h"
#include "offset_guessing.h"
#include "tuples_utilities.h"

Expand Down Expand Up @@ -55,6 +56,10 @@ int kretprobe__tcp_v4_connect(struct pt_regs *ctx)
return 0;
}

if(filter_ipv4(&t)){
return 0;
}

struct pid_comm_t p = {.pid = pid, .state = CONN_ACTIVE};
uint32_t cpu = bpf_get_smp_processor_id();
if (bpf_map_update_elem(&tuplepid_ipv4, &t, &p, BPF_ANY) < 0) {
Expand Down Expand Up @@ -116,20 +121,14 @@ int kretprobe__tcp_v6_connect(struct pt_regs *ctx)
return 0;
}

struct pid_comm_t p = {.pid = pid, .state = CONN_ACTIVE };
uint32_t cpu = bpf_get_smp_processor_id();
if (is_ipv4_mapped_ipv6_tuple(t)) {
struct ipv4_tuple_t t4 = convert_ipv4_mapped_ipv6_tuple_to_ipv4(t);

if (bpf_map_update_elem(&tuplepid_ipv4, &t4, &p, BPF_ANY) < 0) {
LOG_DEBUG_BPF(ctx, "Connect missed, reached max conns?: {:d}:{:d} {:d}:{:d} {:d}", t4.saddr, t4.sport, t4.daddr, t4.dport, pid >> 32);
}

struct tcp_ipv4_event_t evt4 = convert_ipv4_tuple_to_event(t4, cpu, TCP_EVENT_TYPE_CONNECT, pid >> 32);
bpf_perf_event_output(ctx, &tcp_event_ipv4, cpu, &evt4, sizeof(evt4));
if(filter_ipv6(&t)){
return 0;
}

struct pid_comm_t p = {.pid = pid, .state = CONN_ACTIVE };
uint32_t cpu = bpf_get_smp_processor_id();

if (bpf_map_update_elem(&tuplepid_ipv6, &t, &p, BPF_ANY) < 0) {
LOG_DEBUG_BPF(ctx, "Connect v6 missed, reached max conns?: {:d}{:d}:{:d} {:d}{:d}:{:d} {:d}", t.saddr_h, t.saddr_l, t.sport, t.daddr_h, t.daddr_l, t.dport, pid >> 32);
}
Expand Down Expand Up @@ -162,6 +161,10 @@ int kretprobe__inet_csk_accept(struct pt_regs *ctx)
return 0;
}

if(filter_ipv4(&t)){
return 0;
}

struct tcp_ipv4_event_t evt = convert_ipv4_tuple_to_event(t, cpu, TCP_EVENT_TYPE_ACCEPT, pid >> 32);

// do not send event if IP address is 0.0.0.0 or port is 0
Expand All @@ -174,34 +177,32 @@ int kretprobe__inet_csk_accept(struct pt_regs *ctx)
}
} else if (check_family(newsk, AF_INET6)) {
struct ipv6_tuple_t t = {};
if (!read_ipv6_tuple(&t, status, newsk)){
if (!read_ipv6_tuple(&t, status, newsk)) {
return 0;
}

if (is_ipv4_mapped_ipv6_tuple(t)) {
struct ipv4_tuple_t t4 = convert_ipv4_mapped_ipv6_tuple_to_ipv4(t);
struct tcp_ipv4_event_t evt4 = convert_ipv4_tuple_to_event(t4, cpu, TCP_EVENT_TYPE_ACCEPT, pid >> 32);

// do not send event if IP address is 0.0.0.0 or port is 0
if (evt4.saddr != 0 && evt4.daddr != 0 && evt4.sport != 0 && evt4.dport != 0) {
struct pid_comm_t p = {.pid = pid, .state = CONN_ACTIVE};
if (bpf_map_update_elem(&tuplepid_ipv4, &t4, &p, BPF_ANY) < 0) {
LOG_DEBUG_BPF(ctx, "Accept missed, reached max conns?: {:d}:{:d} {:d}:{:d} {:d}", t4.saddr, t4.sport, t4.daddr, t4.dport, pid >> 32);
}
bpf_perf_event_output(ctx, &tcp_event_ipv4, cpu, &evt4, sizeof(evt4));
}
if (filter_ipv6(&t)) {
return 0;
}
else {
struct tcp_ipv6_event_t evt = convert_ipv6_tuple_to_event(t, cpu, TCP_EVENT_TYPE_ACCEPT, pid >> 32);

// do not send event if IP address is :: or port is 0
if ((evt.saddr_h || evt.saddr_l) && (evt.daddr_h || evt.daddr_l) && evt.sport != 0 && evt.dport != 0) {
struct pid_comm_t p = {.pid = pid, .state = CONN_ACTIVE};
if (bpf_map_update_elem(&tuplepid_ipv6, &t, &p, BPF_ANY) < 0) {
LOG_DEBUG_BPF(ctx, "Accept v6 missed, reached max conns?: {:d}{:d}:{:d} {:d}{:d}:{:d} {:d}", t.saddr_h, t.saddr_l, t.sport, t.daddr_h, t.daddr_l, t.dport, pid >> 32);
}
bpf_perf_event_output(ctx, &tcp_event_ipv6, cpu, &evt, sizeof(evt));

struct tcp_ipv6_event_t evt = convert_ipv6_tuple_to_event(t, cpu, TCP_EVENT_TYPE_ACCEPT, pid >> 32);

// do not send event if IP address is :: or port is 0
if ((evt.saddr_h || evt.saddr_l) && (evt.daddr_h || evt.daddr_l) && evt.sport != 0 && evt.dport != 0) {
struct pid_comm_t p = {.pid = pid, .state = CONN_ACTIVE};
if (bpf_map_update_elem(&tuplepid_ipv6, &t, &p, BPF_ANY) < 0) {
LOG_DEBUG_BPF(
ctx,
"Accept v6 missed, reached max conns?: {:d}{:d}:{:d} {:d}{:d}:{:d} {:d}",
t.saddr_h,
t.saddr_l,
t.sport,
t.daddr_h,
t.daddr_l,
t.dport,
pid >> 32);
}
bpf_perf_event_output(ctx, &tcp_event_ipv6, cpu, &evt, sizeof(evt));
}
}
return 0;
Expand All @@ -228,6 +229,11 @@ int kprobe__tcp_close(struct pt_regs *ctx)
return 0;
}


if(filter_ipv4(&t)){
return 0;
}

struct pid_comm_t* pp;
pp = bpf_map_lookup_elem(&tuplepid_ipv4, &t);
if (pp == NULL) {
Expand All @@ -243,33 +249,28 @@ int kprobe__tcp_close(struct pt_regs *ctx)
if (!read_ipv6_tuple(&t, status, sk)) {
return 0;
}

if (is_ipv4_mapped_ipv6_tuple(t)) {
struct ipv4_tuple_t t4 = convert_ipv4_mapped_ipv6_tuple_to_ipv4(t);

struct pid_comm_t* pp;
pp = bpf_map_lookup_elem(&tuplepid_ipv4, &t4);
if (pp == NULL) {
LOG_DEBUG_BPF(ctx, "Missing tuplepid entry: {:d}:{:d} {:d}:{:d}", t4.saddr, t4.sport, t4.daddr, t4.dport);
} else {
pp->state = CONN_CLOSED;
}

struct tcp_ipv4_event_t evt4 = convert_ipv4_tuple_to_event(t4, cpu, TCP_EVENT_TYPE_CLOSE, pid >> 32);
bpf_perf_event_output(ctx, &tcp_event_ipv4, cpu, &evt4, sizeof(evt4));
if (filter_ipv6(&t)) {
return 0;
}
else {
struct pid_comm_t* pp;
pp = bpf_map_lookup_elem(&tuplepid_ipv6, &t);
if (pp == NULL) {
LOG_DEBUG_BPF(ctx, "Missing tuplepid entry: {:d}{:d}:{:d} {:d}{:d}:{:d}", t.saddr_h, t.saddr_l, t.sport, t.daddr_h, t.daddr_l, t.dport);
} else {
pp->state = CONN_CLOSED;
}

struct tcp_ipv6_event_t evt = convert_ipv6_tuple_to_event(t, cpu, TCP_EVENT_TYPE_CLOSE, pid >> 32);
bpf_perf_event_output(ctx, &tcp_event_ipv6, cpu, &evt, sizeof(evt));
struct pid_comm_t* pp;
pp = bpf_map_lookup_elem(&tuplepid_ipv6, &t);
if (pp == NULL) {
LOG_DEBUG_BPF(
ctx,
"Missing tuplepid entry: {:d}{:d}:{:d} {:d}{:d}:{:d}",
t.saddr_h,
t.saddr_l,
t.sport,
t.daddr_h,
t.daddr_l,
t.dport);
} else {
pp->state = CONN_CLOSED;
}

struct tcp_ipv6_event_t evt = convert_ipv6_tuple_to_event(t, cpu, TCP_EVENT_TYPE_CLOSE, pid >> 32);
bpf_perf_event_output(ctx, &tcp_event_ipv6, cpu, &evt, sizeof(evt));
}
return 0;
}
64 changes: 31 additions & 33 deletions bpf_program/probes/metrics.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,10 @@ static int send_metric(struct sock* sk, int32_t bytes_sent) {
return 0;
}

if(filter_ipv4(&ipv4_tuple)){
return 0;
}

maybe_fix_missing_connection_tuple(IPV4, &ipv4_tuple);
update_stats(&ipv4_tuple, IPV4, bytes_sent, 0);
update_tcp_stats(&ipv4_tuple, IPV4, status, sk);
Expand All @@ -38,17 +42,13 @@ static int send_metric(struct sock* sk, int32_t bytes_sent) {
return 0;
}

if (is_ipv4_mapped_ipv6_tuple(ipv6_tuple)) {
struct ipv4_tuple_t ipv4_tuple = convert_ipv4_mapped_ipv6_tuple_to_ipv4(ipv6_tuple);

maybe_fix_missing_connection_tuple(IPV4, &ipv4_tuple);
update_stats(&ipv4_tuple, IPV4, bytes_sent, 0);
update_tcp_stats(&ipv4_tuple, IPV4, status, sk);
} else {
maybe_fix_missing_connection_tuple(IPV6, &ipv6_tuple);
update_stats(&ipv6_tuple, IPV6, bytes_sent, 0);
update_tcp_stats(&ipv6_tuple, IPV6, status, sk);
if(filter_ipv6(&ipv6_tuple)){
return 0;
}

maybe_fix_missing_connection_tuple(IPV6, &ipv6_tuple);
update_stats(&ipv6_tuple, IPV6, bytes_sent, 0);
update_tcp_stats(&ipv6_tuple, IPV6, status, sk);
}
return 0;
}
Expand Down Expand Up @@ -106,6 +106,10 @@ int kprobe__tcp_cleanup_rbuf(struct pt_regs* ctx) {
return 0;
}

if(filter_ipv4(&ipv4_tuple)){
return 0;
}

maybe_fix_missing_connection_tuple(IPV4, &ipv4_tuple);
update_stats(&ipv4_tuple, IPV4, 0, bytes_received);
update_tcp_stats(&ipv4_tuple, IPV4, status, sk);
Expand All @@ -115,19 +119,13 @@ int kprobe__tcp_cleanup_rbuf(struct pt_regs* ctx) {
if (!read_ipv6_tuple(&ipv6_tuple, status, sk)) {
return 0;
}

if (is_ipv4_mapped_ipv6_tuple(ipv6_tuple)) {
struct ipv4_tuple_t ipv4_tuple = convert_ipv4_mapped_ipv6_tuple_to_ipv4(ipv6_tuple);

maybe_fix_missing_connection_tuple(IPV4, &ipv4_tuple);
update_stats(&ipv4_tuple, IPV4, 0, bytes_received);
update_tcp_stats(&ipv4_tuple, IPV4, status, sk);
}
else {
maybe_fix_missing_connection_tuple(IPV6, &ipv6_tuple);
update_stats(&ipv6_tuple, IPV6, 0, bytes_received);
update_tcp_stats(&ipv6_tuple, IPV6, status, sk);
if(filter_ipv6(&ipv6_tuple)){
return 0;
}

maybe_fix_missing_connection_tuple(IPV6, &ipv6_tuple);
update_stats(&ipv6_tuple, IPV6, 0, bytes_received);
update_tcp_stats(&ipv6_tuple, IPV6, status, sk);
}
return 0;
}
Expand All @@ -149,6 +147,10 @@ int kprobe__tcp_retransmit_skb(struct pt_regs* ctx) {
return 0;
}

if(filter_ipv4(&ipv4_tuple)){
return 0;
}

struct tcp_stats_t empty = { 0 };
maybe_fix_missing_connection_tuple(IPV4, &ipv4_tuple);
bpf_map_update_elem(&tcp_stats_ipv4, &ipv4_tuple, &empty, BPF_NOEXIST);
Expand All @@ -166,20 +168,16 @@ int kprobe__tcp_retransmit_skb(struct pt_regs* ctx) {
return 0;
}

if(filter_ipv6(&ipv6_tuple)){
return 0;
}

struct tcp_stats_t empty = { 0 };
struct tcp_stats_t *stats = NULL;

if (is_ipv4_mapped_ipv6_tuple(ipv6_tuple)) {
struct ipv4_tuple_t ipv4_tuple = convert_ipv4_mapped_ipv6_tuple_to_ipv4(ipv6_tuple);
maybe_fix_missing_connection_tuple(IPV4, &ipv4_tuple);
bpf_map_update_elem(&tcp_stats_ipv4, &ipv4_tuple, &empty, BPF_NOEXIST);
stats = bpf_map_lookup_elem(&tcp_stats_ipv4, &ipv4_tuple);
}
else {
maybe_fix_missing_connection_tuple(IPV6, &ipv6_tuple);
bpf_map_update_elem(&tcp_stats_ipv6, &ipv6_tuple, &empty, BPF_NOEXIST);
stats = bpf_map_lookup_elem(&tcp_stats_ipv6, &ipv6_tuple);
}
maybe_fix_missing_connection_tuple(IPV6, &ipv6_tuple);
bpf_map_update_elem(&tcp_stats_ipv6, &ipv6_tuple, &empty, BPF_NOEXIST);
stats = bpf_map_lookup_elem(&tcp_stats_ipv6, &ipv6_tuple);
if (stats == NULL) {
return 0;
}
Expand Down
Loading

0 comments on commit a4cea2a

Please sign in to comment.