@@ -487,6 +487,9 @@ struct ring_buffer_per_cpu {
487
487
local_t dropped_events ;
488
488
local_t committing ;
489
489
local_t commits ;
490
+ local_t pages_touched ;
491
+ local_t pages_read ;
492
+ size_t shortest_full ;
490
493
unsigned long read ;
491
494
unsigned long read_bytes ;
492
495
u64 write_stamp ;
@@ -529,6 +532,41 @@ struct ring_buffer_iter {
529
532
u64 read_stamp ;
530
533
};
531
534
535
+ /**
536
+ * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
537
+ * @buffer: The ring_buffer to get the number of pages from
538
+ * @cpu: The cpu of the ring_buffer to get the number of pages from
539
+ *
540
+ * Returns the number of pages used by a per_cpu buffer of the ring buffer.
541
+ */
542
+ size_t ring_buffer_nr_pages (struct ring_buffer * buffer , int cpu )
543
+ {
544
+ return buffer -> buffers [cpu ]-> nr_pages ;
545
+ }
546
+
547
+ /**
548
+ * ring_buffer_nr_pages_dirty - get the number of used pages in the ring buffer
549
+ * @buffer: The ring_buffer to get the number of pages from
550
+ * @cpu: The cpu of the ring_buffer to get the number of pages from
551
+ *
552
+ * Returns the number of pages that have content in the ring buffer.
553
+ */
554
+ size_t ring_buffer_nr_dirty_pages (struct ring_buffer * buffer , int cpu )
555
+ {
556
+ size_t read ;
557
+ size_t cnt ;
558
+
559
+ read = local_read (& buffer -> buffers [cpu ]-> pages_read );
560
+ cnt = local_read (& buffer -> buffers [cpu ]-> pages_touched );
561
+ /* The reader can read an empty page, but not more than that */
562
+ if (cnt < read ) {
563
+ WARN_ON_ONCE (read > cnt + 1 );
564
+ return 0 ;
565
+ }
566
+
567
+ return cnt - read ;
568
+ }
569
+
532
570
/*
533
571
* rb_wake_up_waiters - wake up tasks waiting for ring buffer input
534
572
*
@@ -556,7 +594,7 @@ static void rb_wake_up_waiters(struct irq_work *work)
556
594
* as data is added to any of the @buffer's cpu buffers. Otherwise
557
595
* it will wait for data to be added to a specific cpu buffer.
558
596
*/
559
- int ring_buffer_wait (struct ring_buffer * buffer , int cpu , bool full )
597
+ int ring_buffer_wait (struct ring_buffer * buffer , int cpu , int full )
560
598
{
561
599
struct ring_buffer_per_cpu * uninitialized_var (cpu_buffer );
562
600
DEFINE_WAIT (wait );
@@ -571,7 +609,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
571
609
if (cpu == RING_BUFFER_ALL_CPUS ) {
572
610
work = & buffer -> irq_work ;
573
611
/* Full only makes sense on per cpu reads */
574
- full = false ;
612
+ full = 0 ;
575
613
} else {
576
614
if (!cpumask_test_cpu (cpu , buffer -> cpumask ))
577
615
return - ENODEV ;
@@ -623,15 +661,22 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
623
661
!ring_buffer_empty_cpu (buffer , cpu )) {
624
662
unsigned long flags ;
625
663
bool pagebusy ;
664
+ size_t nr_pages ;
665
+ size_t dirty ;
626
666
627
667
if (!full )
628
668
break ;
629
669
630
670
raw_spin_lock_irqsave (& cpu_buffer -> reader_lock , flags );
631
671
pagebusy = cpu_buffer -> reader_page == cpu_buffer -> commit_page ;
672
+ nr_pages = cpu_buffer -> nr_pages ;
673
+ dirty = ring_buffer_nr_dirty_pages (buffer , cpu );
674
+ if (!cpu_buffer -> shortest_full ||
675
+ cpu_buffer -> shortest_full < full )
676
+ cpu_buffer -> shortest_full = full ;
632
677
raw_spin_unlock_irqrestore (& cpu_buffer -> reader_lock , flags );
633
-
634
- if (! pagebusy )
678
+ if (! pagebusy &&
679
+ (! nr_pages || ( dirty * 100 ) > full * nr_pages ) )
635
680
break ;
636
681
}
637
682
@@ -1054,6 +1099,7 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1054
1099
old_write = local_add_return (RB_WRITE_INTCNT , & next_page -> write );
1055
1100
old_entries = local_add_return (RB_WRITE_INTCNT , & next_page -> entries );
1056
1101
1102
+ local_inc (& cpu_buffer -> pages_touched );
1057
1103
/*
1058
1104
* Just make sure we have seen our old_write and synchronize
1059
1105
* with any interrupts that come in.
@@ -2603,6 +2649,16 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2603
2649
pagebusy = cpu_buffer -> reader_page == cpu_buffer -> commit_page ;
2604
2650
2605
2651
if (!pagebusy && cpu_buffer -> irq_work .full_waiters_pending ) {
2652
+ size_t nr_pages ;
2653
+ size_t dirty ;
2654
+ size_t full ;
2655
+
2656
+ full = cpu_buffer -> shortest_full ;
2657
+ nr_pages = cpu_buffer -> nr_pages ;
2658
+ dirty = ring_buffer_nr_dirty_pages (buffer , cpu_buffer -> cpu );
2659
+ if (full && nr_pages && (dirty * 100 ) <= full * nr_pages )
2660
+ return ;
2661
+
2606
2662
cpu_buffer -> irq_work .wakeup_full = true;
2607
2663
cpu_buffer -> irq_work .full_waiters_pending = false;
2608
2664
/* irq_work_queue() supplies it's own memory barriers */
@@ -3732,13 +3788,15 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3732
3788
goto spin ;
3733
3789
3734
3790
/*
3735
- * Yeah ! We succeeded in replacing the page.
3791
+ * Yay ! We succeeded in replacing the page.
3736
3792
*
3737
3793
* Now make the new head point back to the reader page.
3738
3794
*/
3739
3795
rb_list_head (reader -> list .next )-> prev = & cpu_buffer -> reader_page -> list ;
3740
3796
rb_inc_page (cpu_buffer , & cpu_buffer -> head_page );
3741
3797
3798
+ local_inc (& cpu_buffer -> pages_read );
3799
+
3742
3800
/* Finally update the reader page to the new head */
3743
3801
cpu_buffer -> reader_page = reader ;
3744
3802
cpu_buffer -> reader_page -> read = 0 ;
@@ -4334,6 +4392,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4334
4392
local_set (& cpu_buffer -> entries , 0 );
4335
4393
local_set (& cpu_buffer -> committing , 0 );
4336
4394
local_set (& cpu_buffer -> commits , 0 );
4395
+ local_set (& cpu_buffer -> pages_touched , 0 );
4396
+ local_set (& cpu_buffer -> pages_read , 0 );
4397
+ cpu_buffer -> shortest_full = 0 ;
4337
4398
cpu_buffer -> read = 0 ;
4338
4399
cpu_buffer -> read_bytes = 0 ;
4339
4400
0 commit comments