@@ -487,6 +487,9 @@ struct ring_buffer_per_cpu {
487487 local_t dropped_events ;
488488 local_t committing ;
489489 local_t commits ;
490+ local_t pages_touched ;
491+ local_t pages_read ;
492+ size_t shortest_full ;
490493 unsigned long read ;
491494 unsigned long read_bytes ;
492495 u64 write_stamp ;
@@ -529,6 +532,41 @@ struct ring_buffer_iter {
529532 u64 read_stamp ;
530533};
531534
535+ /**
536+ * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
537+ * @buffer: The ring_buffer to get the number of pages from
538+ * @cpu: The cpu of the ring_buffer to get the number of pages from
539+ *
540+ * Returns the number of pages used by a per_cpu buffer of the ring buffer.
541+ */
542+ size_t ring_buffer_nr_pages (struct ring_buffer * buffer , int cpu )
543+ {
544+ return buffer -> buffers [cpu ]-> nr_pages ;
545+ }
546+
547+ /**
548+ * ring_buffer_nr_pages_dirty - get the number of used pages in the ring buffer
549+ * @buffer: The ring_buffer to get the number of pages from
550+ * @cpu: The cpu of the ring_buffer to get the number of pages from
551+ *
552+ * Returns the number of pages that have content in the ring buffer.
553+ */
554+ size_t ring_buffer_nr_dirty_pages (struct ring_buffer * buffer , int cpu )
555+ {
556+ size_t read ;
557+ size_t cnt ;
558+
559+ read = local_read (& buffer -> buffers [cpu ]-> pages_read );
560+ cnt = local_read (& buffer -> buffers [cpu ]-> pages_touched );
561+ /* The reader can read an empty page, but not more than that */
562+ if (cnt < read ) {
563+ WARN_ON_ONCE (read > cnt + 1 );
564+ return 0 ;
565+ }
566+
567+ return cnt - read ;
568+ }
569+
532570/*
533571 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
534572 *
@@ -556,7 +594,7 @@ static void rb_wake_up_waiters(struct irq_work *work)
556594 * as data is added to any of the @buffer's cpu buffers. Otherwise
557595 * it will wait for data to be added to a specific cpu buffer.
558596 */
559- int ring_buffer_wait (struct ring_buffer * buffer , int cpu , bool full )
597+ int ring_buffer_wait (struct ring_buffer * buffer , int cpu , int full )
560598{
561599 struct ring_buffer_per_cpu * uninitialized_var (cpu_buffer );
562600 DEFINE_WAIT (wait );
@@ -571,7 +609,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
571609 if (cpu == RING_BUFFER_ALL_CPUS ) {
572610 work = & buffer -> irq_work ;
573611 /* Full only makes sense on per cpu reads */
574- full = false ;
612+ full = 0 ;
575613 } else {
576614 if (!cpumask_test_cpu (cpu , buffer -> cpumask ))
577615 return - ENODEV ;
@@ -623,15 +661,22 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
623661 !ring_buffer_empty_cpu (buffer , cpu )) {
624662 unsigned long flags ;
625663 bool pagebusy ;
664+ size_t nr_pages ;
665+ size_t dirty ;
626666
627667 if (!full )
628668 break ;
629669
630670 raw_spin_lock_irqsave (& cpu_buffer -> reader_lock , flags );
631671 pagebusy = cpu_buffer -> reader_page == cpu_buffer -> commit_page ;
672+ nr_pages = cpu_buffer -> nr_pages ;
673+ dirty = ring_buffer_nr_dirty_pages (buffer , cpu );
674+ if (!cpu_buffer -> shortest_full ||
675+ cpu_buffer -> shortest_full < full )
676+ cpu_buffer -> shortest_full = full ;
632677 raw_spin_unlock_irqrestore (& cpu_buffer -> reader_lock , flags );
633-
634- if (! pagebusy )
678+ if (! pagebusy &&
679+ (! nr_pages || ( dirty * 100 ) > full * nr_pages ) )
635680 break ;
636681 }
637682
@@ -1054,6 +1099,7 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
10541099 old_write = local_add_return (RB_WRITE_INTCNT , & next_page -> write );
10551100 old_entries = local_add_return (RB_WRITE_INTCNT , & next_page -> entries );
10561101
1102+ local_inc (& cpu_buffer -> pages_touched );
10571103 /*
10581104 * Just make sure we have seen our old_write and synchronize
10591105 * with any interrupts that come in.
@@ -2603,6 +2649,16 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
26032649 pagebusy = cpu_buffer -> reader_page == cpu_buffer -> commit_page ;
26042650
26052651 if (!pagebusy && cpu_buffer -> irq_work .full_waiters_pending ) {
2652+ size_t nr_pages ;
2653+ size_t dirty ;
2654+ size_t full ;
2655+
2656+ full = cpu_buffer -> shortest_full ;
2657+ nr_pages = cpu_buffer -> nr_pages ;
2658+ dirty = ring_buffer_nr_dirty_pages (buffer , cpu_buffer -> cpu );
2659+ if (full && nr_pages && (dirty * 100 ) <= full * nr_pages )
2660+ return ;
2661+
26062662 cpu_buffer -> irq_work .wakeup_full = true;
26072663 cpu_buffer -> irq_work .full_waiters_pending = false;
26082664 /* irq_work_queue() supplies it's own memory barriers */
@@ -3732,13 +3788,15 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
37323788 goto spin ;
37333789
37343790 /*
3735- * Yeah ! We succeeded in replacing the page.
3791+ * Yay ! We succeeded in replacing the page.
37363792 *
37373793 * Now make the new head point back to the reader page.
37383794 */
37393795 rb_list_head (reader -> list .next )-> prev = & cpu_buffer -> reader_page -> list ;
37403796 rb_inc_page (cpu_buffer , & cpu_buffer -> head_page );
37413797
3798+ local_inc (& cpu_buffer -> pages_read );
3799+
37423800 /* Finally update the reader page to the new head */
37433801 cpu_buffer -> reader_page = reader ;
37443802 cpu_buffer -> reader_page -> read = 0 ;
@@ -4334,6 +4392,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
43344392 local_set (& cpu_buffer -> entries , 0 );
43354393 local_set (& cpu_buffer -> committing , 0 );
43364394 local_set (& cpu_buffer -> commits , 0 );
4395+ local_set (& cpu_buffer -> pages_touched , 0 );
4396+ local_set (& cpu_buffer -> pages_read , 0 );
4397+ cpu_buffer -> shortest_full = 0 ;
43374398 cpu_buffer -> read = 0 ;
43384399 cpu_buffer -> read_bytes = 0 ;
43394400
0 commit comments