Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
117 changes: 91 additions & 26 deletions libbpf-tools/tcppktlat.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,22 @@
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_tracing.h>

#include "bits.bpf.h"
#include "compat.bpf.h"
#include "core_fixes.bpf.h"
#include "maps.bpf.h"
#include "tcppktlat.h"

#define MAX_ENTRIES 10240
#define AF_INET 2
#define MAX_ENTRIES 10240
#define AF_INET 2

const volatile pid_t targ_pid = 0;
const volatile pid_t targ_tid = 0;
const volatile __u16 targ_sport = 0;
const volatile __u16 targ_dport = 0;
const volatile __u64 targ_min_us = 0;
const volatile bool targ_hist = false;
const volatile bool targ_per_thread = false;

struct {
__uint(type, BPF_MAP_TYPE_HASH);
Expand All @@ -25,6 +29,15 @@ struct {
__type(value, u64);
} start SEC(".maps");

static struct hist zero;

struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, u32);
__type(value, struct hist);
} hists SEC(".maps");

static int handle_tcp_probe(struct sock *sk, struct sk_buff *skb)
{
const struct inet_sock *inet = (struct inet_sock *)(sk);
Expand All @@ -33,9 +46,10 @@ static int handle_tcp_probe(struct sock *sk, struct sk_buff *skb)

if (targ_sport && targ_sport != BPF_CORE_READ(inet, inet_sport))
return 0;
if (targ_dport && targ_dport != BPF_CORE_READ(sk, __sk_common.skc_dport))
if (targ_dport &&
targ_dport != BPF_CORE_READ(sk, __sk_common.skc_dport))
return 0;
th = (const struct tcphdr*)BPF_CORE_READ(skb, data);
th = (const struct tcphdr *)BPF_CORE_READ(skb, data);
doff = BPF_CORE_READ_BITFIELD_PROBED(th, doff);
len = BPF_CORE_READ(skb, len);
/* `doff * 4` means `__tcp_hdrlen` */
Expand All @@ -47,32 +61,52 @@ static int handle_tcp_probe(struct sock *sk, struct sk_buff *skb)
return 0;
}

static int handle_tcp_rcv_space_adjust(void *ctx, struct sock *sk)
static int handle_hist_event(struct sock *sk, u32 pid, u32 tid, s64 delta_us)
{
struct hist *histp;
struct task_struct *current;
u32 hkey;
u64 slot;

/* Use TID for per-thread, PID (tgid) for per-process */
if (targ_per_thread)
hkey = tid;
else
hkey = pid;

histp = bpf_map_lookup_or_try_init(&hists, &hkey, &zero);
if (!histp)
return -1;

/* Store comm if not already set */
if (!histp->comm[0]) {
if (targ_per_thread) {
/* For per-thread, use current thread comm */
bpf_get_current_comm(&histp->comm, TASK_COMM_LEN);
} else {
/* For per-process, use process group leader comm */
current = (struct task_struct *)bpf_get_current_task();
BPF_CORE_READ_STR_INTO(&histp->comm, current,
group_leader, comm);
}
}
slot = log2l(delta_us);
if (slot >= MAX_SLOTS)
slot = MAX_SLOTS - 1;
__sync_fetch_and_add(&histp->slots[slot], 1);
return 0;
}

static int handle_event_output(void *ctx, struct sock *sk, u32 pid, u32 tid,
s64 delta_us)
{
const struct inet_sock *inet = (struct inet_sock *)(sk);
u64 sock_ident = get_sock_ident(sk);
u64 id = bpf_get_current_pid_tgid(), *tsp;
u32 pid = id >> 32, tid = id;
struct event *eventp;
s64 delta_us;
u16 family;

tsp = bpf_map_lookup_elem(&start, &sock_ident);
if (!tsp)
return 0;

if (targ_pid && targ_pid != pid)
goto cleanup;
if (targ_tid && targ_tid != tid)
goto cleanup;

delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
if (delta_us < 0 || delta_us <= targ_min_us)
goto cleanup;

eventp = reserve_buf(sizeof(*eventp));
if (!eventp)
goto cleanup;
return -1;

eventp->pid = pid;
eventp->tid = tid;
Expand All @@ -85,11 +119,41 @@ static int handle_tcp_rcv_space_adjust(void *ctx, struct sock *sk)
eventp->saddr[0] = BPF_CORE_READ(sk, __sk_common.skc_rcv_saddr);
eventp->daddr[0] = BPF_CORE_READ(sk, __sk_common.skc_daddr);
} else { /* family == AF_INET6 */
BPF_CORE_READ_INTO(eventp->saddr, sk, __sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
BPF_CORE_READ_INTO(eventp->daddr, sk, __sk_common.skc_v6_daddr.in6_u.u6_addr32);
BPF_CORE_READ_INTO(
eventp->saddr, sk,
__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
BPF_CORE_READ_INTO(eventp->daddr, sk,
__sk_common.skc_v6_daddr.in6_u.u6_addr32);
}
eventp->family = family;
submit_buf(ctx, eventp, sizeof(*eventp));
return 0;
}

static int handle_tcp_rcv_space_adjust(void *ctx, struct sock *sk)
{
u64 sock_ident = get_sock_ident(sk);
u64 id = bpf_get_current_pid_tgid(), *tsp;
u32 pid = id >> 32, tid = id;
s64 delta_us;

tsp = bpf_map_lookup_elem(&start, &sock_ident);
if (!tsp)
return 0;

if (targ_pid && targ_pid != pid)
goto cleanup;
if (targ_tid && targ_tid != tid)
goto cleanup;

delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
if (delta_us < 0 || delta_us <= targ_min_us)
goto cleanup;

if (targ_hist)
handle_hist_event(sk, pid, tid, delta_us);
else
handle_event_output(ctx, sk, pid, tid, delta_us);

cleanup:
bpf_map_delete_elem(&start, &sock_ident);
Expand Down Expand Up @@ -123,7 +187,8 @@ int BPF_PROG(tcp_destroy_sock_btf, struct sock *sk)
}

SEC("raw_tp/tcp_probe")
int BPF_PROG(tcp_probe, struct sock *sk, struct sk_buff *skb) {
int BPF_PROG(tcp_probe, struct sock *sk, struct sk_buff *skb)
{
return handle_tcp_probe(sk, skb);
}

Expand Down
Loading
Loading