forked from iovisor/bcc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
biolatency.bpf.c
183 lines (151 loc) · 4.16 KB
/
biolatency.bpf.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Wenbo Zhang
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_tracing.h>
#include "biolatency.h"
#include "bits.bpf.h"
#include "core_fixes.bpf.h"
#define MAX_ENTRIES 10240
extern int LINUX_KERNEL_VERSION __kconfig;
const volatile bool filter_cg = false;
const volatile bool targ_per_disk = false;
const volatile bool targ_per_flag = false;
const volatile bool targ_queued = false;
const volatile bool targ_ms = false;
const volatile bool filter_dev = false;
const volatile __u32 targ_dev = 0;
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
__type(key, u32);
__type(value, u32);
__uint(max_entries, 1);
} cgroup_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, struct request *);
__type(value, u64);
} start SEC(".maps");
static struct hist initial_hist;
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, struct hist_key);
__type(value, struct hist);
} hists SEC(".maps");
static int __always_inline trace_rq_start(struct request *rq, int issue)
{
u64 ts;
if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
return 0;
if (issue && targ_queued && BPF_CORE_READ(rq, q, elevator))
return 0;
ts = bpf_ktime_get_ns();
if (filter_dev) {
struct gendisk *disk = get_disk(rq);
u32 dev;
dev = disk ? MKDEV(BPF_CORE_READ(disk, major),
BPF_CORE_READ(disk, first_minor)) : 0;
if (targ_dev != dev)
return 0;
}
bpf_map_update_elem(&start, &rq, &ts, 0);
return 0;
}
static int handle_block_rq_insert(__u64 *ctx)
{
/**
* commit a54895fa (v5.11-rc1) changed tracepoint argument list
* from TP_PROTO(struct request_queue *q, struct request *rq)
* to TP_PROTO(struct request *rq)
*/
if (LINUX_KERNEL_VERSION < KERNEL_VERSION(5, 11, 0))
return trace_rq_start((void *)ctx[1], false);
else
return trace_rq_start((void *)ctx[0], false);
}
static int handle_block_rq_issue(__u64 *ctx)
{
/**
* commit a54895fa (v5.11-rc1) changed tracepoint argument list
* from TP_PROTO(struct request_queue *q, struct request *rq)
* to TP_PROTO(struct request *rq)
*/
if (LINUX_KERNEL_VERSION < KERNEL_VERSION(5, 11, 0))
return trace_rq_start((void *)ctx[1], true);
else
return trace_rq_start((void *)ctx[0], true);
}
static int handle_block_rq_complete(struct request *rq, int error, unsigned int nr_bytes)
{
u64 slot, *tsp, ts = bpf_ktime_get_ns();
struct hist_key hkey = {};
struct hist *histp;
s64 delta;
if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
return 0;
tsp = bpf_map_lookup_elem(&start, &rq);
if (!tsp)
return 0;
delta = (s64)(ts - *tsp);
if (delta < 0)
goto cleanup;
if (targ_per_disk) {
struct gendisk *disk = get_disk(rq);
hkey.dev = disk ? MKDEV(BPF_CORE_READ(disk, major),
BPF_CORE_READ(disk, first_minor)) : 0;
}
if (targ_per_flag)
hkey.cmd_flags = BPF_CORE_READ(rq, cmd_flags);
histp = bpf_map_lookup_elem(&hists, &hkey);
if (!histp) {
bpf_map_update_elem(&hists, &hkey, &initial_hist, 0);
histp = bpf_map_lookup_elem(&hists, &hkey);
if (!histp)
goto cleanup;
}
if (targ_ms)
delta /= 1000000U;
else
delta /= 1000U;
slot = log2l(delta);
if (slot >= MAX_SLOTS)
slot = MAX_SLOTS - 1;
__sync_fetch_and_add(&histp->slots[slot], 1);
cleanup:
bpf_map_delete_elem(&start, &rq);
return 0;
}
SEC("tp_btf/block_rq_insert")
int block_rq_insert_btf(u64 *ctx)
{
return handle_block_rq_insert(ctx);
}
SEC("tp_btf/block_rq_issue")
int block_rq_issue_btf(u64 *ctx)
{
return handle_block_rq_issue(ctx);
}
SEC("tp_btf/block_rq_complete")
int BPF_PROG(block_rq_complete_btf, struct request *rq, int error, unsigned int nr_bytes)
{
return handle_block_rq_complete(rq, error, nr_bytes);
}
SEC("raw_tp/block_rq_insert")
int BPF_PROG(block_rq_insert)
{
return handle_block_rq_insert(ctx);
}
SEC("raw_tp/block_rq_issue")
int BPF_PROG(block_rq_issue)
{
return handle_block_rq_issue(ctx);
}
SEC("raw_tp/block_rq_complete")
int BPF_PROG(block_rq_complete, struct request *rq, int error, unsigned int nr_bytes)
{
return handle_block_rq_complete(rq, error, nr_bytes);
}
char LICENSE[] SEC("license") = "GPL";