Skip to content

Commit b841d4f

Browse files
rostedtsmb49
authored andcommitted
ring-buffer: Include dropped pages in counting dirty patches
BugLink: https://bugs.launchpad.net/bugs/2002347 [ Upstream commit 31029a8 ] The function ring_buffer_nr_dirty_pages() was created to find out how many pages are filled in the ring buffer. There's two running counters. One is incremented whenever a new page is touched (pages_touched) and the other is whenever a page is read (pages_read). The dirty count is the number touched minus the number read. This is used to determine if a blocked task should be woken up if the percentage of the ring buffer it is waiting for is hit. The problem is that it does not take into account dropped pages (when the new writes overwrite pages that were not read). And then the dirty pages will always be greater than the percentage. This makes the "buffer_percent" file inaccurate, as the number of dirty pages end up always being larger than the percentage, event when it's not and this causes user space to be woken up more than it wants to be. Add a new counter to keep track of lost pages, and include that in the accounting of dirty pages so that it is actually accurate. Link: https://lkml.kernel.org/r/20221021123013.55fb6055@gandalf.local.home Fixes: 2c2b0a7 ("ring-buffer: Add percentage of ring buffer full to wake up reader") Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org> Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Kamal Mostafa <kamal@canonical.com> Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
1 parent 06adf2f commit b841d4f

File tree

1 file changed

+12
-0
lines changed

1 file changed

+12
-0
lines changed

kernel/trace/ring_buffer.c

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -477,6 +477,7 @@ struct ring_buffer_per_cpu {
477477
local_t committing;
478478
local_t commits;
479479
local_t pages_touched;
480+
local_t pages_lost;
480481
local_t pages_read;
481482
long last_pages_touch;
482483
size_t shortest_full;
@@ -544,10 +545,18 @@ size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
544545
size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu)
545546
{
546547
size_t read;
548+
size_t lost;
547549
size_t cnt;
548550

549551
read = local_read(&buffer->buffers[cpu]->pages_read);
552+
lost = local_read(&buffer->buffers[cpu]->pages_lost);
550553
cnt = local_read(&buffer->buffers[cpu]->pages_touched);
554+
555+
if (WARN_ON_ONCE(cnt < lost))
556+
return 0;
557+
558+
cnt -= lost;
559+
551560
/* The reader can read an empty page, but not more than that */
552561
if (cnt < read) {
553562
WARN_ON_ONCE(read > cnt + 1);
@@ -1599,6 +1608,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
15991608
*/
16001609
local_add(page_entries, &cpu_buffer->overrun);
16011610
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1611+
local_inc(&cpu_buffer->pages_lost);
16021612
}
16031613

16041614
/*
@@ -2023,6 +2033,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
20232033
*/
20242034
local_add(entries, &cpu_buffer->overrun);
20252035
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2036+
local_inc(&cpu_buffer->pages_lost);
20262037

20272038
/*
20282039
* The entries will be zeroed out when we move the
@@ -4475,6 +4486,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
44754486
local_set(&cpu_buffer->committing, 0);
44764487
local_set(&cpu_buffer->commits, 0);
44774488
local_set(&cpu_buffer->pages_touched, 0);
4489+
local_set(&cpu_buffer->pages_lost, 0);
44784490
local_set(&cpu_buffer->pages_read, 0);
44794491
cpu_buffer->last_pages_touch = 0;
44804492
cpu_buffer->shortest_full = 0;

0 commit comments

Comments
 (0)