@@ -522,89 +522,6 @@ rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag)
522522 scale_type , tag , nrealreaders , nrealwriters , verbose , shutdown );
523523}
524524
525- static void
526- rcu_scale_cleanup (void )
527- {
528- int i ;
529- int j ;
530- int ngps = 0 ;
531- u64 * wdp ;
532- u64 * wdpp ;
533-
534- /*
535- * Would like warning at start, but everything is expedited
536- * during the mid-boot phase, so have to wait till the end.
537- */
538- if (rcu_gp_is_expedited () && !rcu_gp_is_normal () && !gp_exp )
539- SCALEOUT_ERRSTRING ("All grace periods expedited, no normal ones to measure!" );
540- if (rcu_gp_is_normal () && gp_exp )
541- SCALEOUT_ERRSTRING ("All grace periods normal, no expedited ones to measure!" );
542- if (gp_exp && gp_async )
543- SCALEOUT_ERRSTRING ("No expedited async GPs, so went with async!" );
544-
545- if (torture_cleanup_begin ())
546- return ;
547- if (!cur_ops ) {
548- torture_cleanup_end ();
549- return ;
550- }
551-
552- if (reader_tasks ) {
553- for (i = 0 ; i < nrealreaders ; i ++ )
554- torture_stop_kthread (rcu_scale_reader ,
555- reader_tasks [i ]);
556- kfree (reader_tasks );
557- }
558-
559- if (writer_tasks ) {
560- for (i = 0 ; i < nrealwriters ; i ++ ) {
561- torture_stop_kthread (rcu_scale_writer ,
562- writer_tasks [i ]);
563- if (!writer_n_durations )
564- continue ;
565- j = writer_n_durations [i ];
566- pr_alert ("%s%s writer %d gps: %d\n" ,
567- scale_type , SCALE_FLAG , i , j );
568- ngps += j ;
569- }
570- pr_alert ("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n" ,
571- scale_type , SCALE_FLAG ,
572- t_rcu_scale_writer_started , t_rcu_scale_writer_finished ,
573- t_rcu_scale_writer_finished -
574- t_rcu_scale_writer_started ,
575- ngps ,
576- rcuscale_seq_diff (b_rcu_gp_test_finished ,
577- b_rcu_gp_test_started ));
578- for (i = 0 ; i < nrealwriters ; i ++ ) {
579- if (!writer_durations )
580- break ;
581- if (!writer_n_durations )
582- continue ;
583- wdpp = writer_durations [i ];
584- if (!wdpp )
585- continue ;
586- for (j = 0 ; j < writer_n_durations [i ]; j ++ ) {
587- wdp = & wdpp [j ];
588- pr_alert ("%s%s %4d writer-duration: %5d %llu\n" ,
589- scale_type , SCALE_FLAG ,
590- i , j , * wdp );
591- if (j % 100 == 0 )
592- schedule_timeout_uninterruptible (1 );
593- }
594- kfree (writer_durations [i ]);
595- }
596- kfree (writer_tasks );
597- kfree (writer_durations );
598- kfree (writer_n_durations );
599- }
600-
601- /* Do torture-type-specific cleanup operations. */
602- if (cur_ops -> cleanup != NULL )
603- cur_ops -> cleanup ();
604-
605- torture_cleanup_end ();
606- }
607-
608525/*
609526 * Return the number if non-negative. If -1, the number of CPUs.
610527 * If less than -1, that much less than the number of CPUs, but
@@ -624,20 +541,6 @@ static int compute_real(int n)
624541 return nr ;
625542}
626543
627- /*
628- * RCU scalability shutdown kthread. Just waits to be awakened, then shuts
629- * down system.
630- */
631- static int
632- rcu_scale_shutdown (void * arg )
633- {
634- wait_event_idle (shutdown_wq , atomic_read (& n_rcu_scale_writer_finished ) >= nrealwriters );
635- smp_mb (); /* Wake before output. */
636- rcu_scale_cleanup ();
637- kernel_power_off ();
638- return - EINVAL ;
639- }
640-
641544/*
642545 * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
643546 * of iterations and measure total time and number of GP for all iterations to complete.
@@ -874,6 +777,103 @@ kfree_scale_init(void)
874777 return firsterr ;
875778}
876779
780+ static void
781+ rcu_scale_cleanup (void )
782+ {
783+ int i ;
784+ int j ;
785+ int ngps = 0 ;
786+ u64 * wdp ;
787+ u64 * wdpp ;
788+
789+ /*
790+ * Would like warning at start, but everything is expedited
791+ * during the mid-boot phase, so have to wait till the end.
792+ */
793+ if (rcu_gp_is_expedited () && !rcu_gp_is_normal () && !gp_exp )
794+ SCALEOUT_ERRSTRING ("All grace periods expedited, no normal ones to measure!" );
795+ if (rcu_gp_is_normal () && gp_exp )
796+ SCALEOUT_ERRSTRING ("All grace periods normal, no expedited ones to measure!" );
797+ if (gp_exp && gp_async )
798+ SCALEOUT_ERRSTRING ("No expedited async GPs, so went with async!" );
799+
800+ if (torture_cleanup_begin ())
801+ return ;
802+ if (!cur_ops ) {
803+ torture_cleanup_end ();
804+ return ;
805+ }
806+
807+ if (reader_tasks ) {
808+ for (i = 0 ; i < nrealreaders ; i ++ )
809+ torture_stop_kthread (rcu_scale_reader ,
810+ reader_tasks [i ]);
811+ kfree (reader_tasks );
812+ }
813+
814+ if (writer_tasks ) {
815+ for (i = 0 ; i < nrealwriters ; i ++ ) {
816+ torture_stop_kthread (rcu_scale_writer ,
817+ writer_tasks [i ]);
818+ if (!writer_n_durations )
819+ continue ;
820+ j = writer_n_durations [i ];
821+ pr_alert ("%s%s writer %d gps: %d\n" ,
822+ scale_type , SCALE_FLAG , i , j );
823+ ngps += j ;
824+ }
825+ pr_alert ("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n" ,
826+ scale_type , SCALE_FLAG ,
827+ t_rcu_scale_writer_started , t_rcu_scale_writer_finished ,
828+ t_rcu_scale_writer_finished -
829+ t_rcu_scale_writer_started ,
830+ ngps ,
831+ rcuscale_seq_diff (b_rcu_gp_test_finished ,
832+ b_rcu_gp_test_started ));
833+ for (i = 0 ; i < nrealwriters ; i ++ ) {
834+ if (!writer_durations )
835+ break ;
836+ if (!writer_n_durations )
837+ continue ;
838+ wdpp = writer_durations [i ];
839+ if (!wdpp )
840+ continue ;
841+ for (j = 0 ; j < writer_n_durations [i ]; j ++ ) {
842+ wdp = & wdpp [j ];
843+ pr_alert ("%s%s %4d writer-duration: %5d %llu\n" ,
844+ scale_type , SCALE_FLAG ,
845+ i , j , * wdp );
846+ if (j % 100 == 0 )
847+ schedule_timeout_uninterruptible (1 );
848+ }
849+ kfree (writer_durations [i ]);
850+ }
851+ kfree (writer_tasks );
852+ kfree (writer_durations );
853+ kfree (writer_n_durations );
854+ }
855+
856+ /* Do torture-type-specific cleanup operations. */
857+ if (cur_ops -> cleanup != NULL )
858+ cur_ops -> cleanup ();
859+
860+ torture_cleanup_end ();
861+ }
862+
863+ /*
864+ * RCU scalability shutdown kthread. Just waits to be awakened, then shuts
865+ * down system.
866+ */
867+ static int
868+ rcu_scale_shutdown (void * arg )
869+ {
870+ wait_event_idle (shutdown_wq , atomic_read (& n_rcu_scale_writer_finished ) >= nrealwriters );
871+ smp_mb (); /* Wake before output. */
872+ rcu_scale_cleanup ();
873+ kernel_power_off ();
874+ return - EINVAL ;
875+ }
876+
877877static int __init
878878rcu_scale_init (void )
879879{
0 commit comments