Skip to content

Commit 7d29907

Browse files
qzhuo2Fishwaldo
authored andcommitted
rcu/rcuscale: Move rcu_scale_*() after kfree_scale_cleanup()
[ Upstream commit bf5ddd7 ] This code-movement-only commit moves the rcu_scale_cleanup() and rcu_scale_shutdown() functions to follow kfree_scale_cleanup(). This is code movement is in preparation for a bug-fix patch that invokes kfree_scale_cleanup() from rcu_scale_cleanup(). Signed-off-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org> Stable-dep-of: 23fc8df ("rcu/rcuscale: Stop kfree_scale_thread thread(s) after unloading rcuscale") Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent d52d778 commit 7d29907

File tree

1 file changed

+97
-97
lines changed

1 file changed

+97
-97
lines changed

kernel/rcu/rcuscale.c

Lines changed: 97 additions & 97 deletions
Original file line numberDiff line numberDiff line change
@@ -500,89 +500,6 @@ rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag)
500500
scale_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
501501
}
502502

503-
static void
504-
rcu_scale_cleanup(void)
505-
{
506-
int i;
507-
int j;
508-
int ngps = 0;
509-
u64 *wdp;
510-
u64 *wdpp;
511-
512-
/*
513-
* Would like warning at start, but everything is expedited
514-
* during the mid-boot phase, so have to wait till the end.
515-
*/
516-
if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
517-
SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
518-
if (rcu_gp_is_normal() && gp_exp)
519-
SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
520-
if (gp_exp && gp_async)
521-
SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
522-
523-
if (torture_cleanup_begin())
524-
return;
525-
if (!cur_ops) {
526-
torture_cleanup_end();
527-
return;
528-
}
529-
530-
if (reader_tasks) {
531-
for (i = 0; i < nrealreaders; i++)
532-
torture_stop_kthread(rcu_scale_reader,
533-
reader_tasks[i]);
534-
kfree(reader_tasks);
535-
}
536-
537-
if (writer_tasks) {
538-
for (i = 0; i < nrealwriters; i++) {
539-
torture_stop_kthread(rcu_scale_writer,
540-
writer_tasks[i]);
541-
if (!writer_n_durations)
542-
continue;
543-
j = writer_n_durations[i];
544-
pr_alert("%s%s writer %d gps: %d\n",
545-
scale_type, SCALE_FLAG, i, j);
546-
ngps += j;
547-
}
548-
pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
549-
scale_type, SCALE_FLAG,
550-
t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
551-
t_rcu_scale_writer_finished -
552-
t_rcu_scale_writer_started,
553-
ngps,
554-
rcuscale_seq_diff(b_rcu_gp_test_finished,
555-
b_rcu_gp_test_started));
556-
for (i = 0; i < nrealwriters; i++) {
557-
if (!writer_durations)
558-
break;
559-
if (!writer_n_durations)
560-
continue;
561-
wdpp = writer_durations[i];
562-
if (!wdpp)
563-
continue;
564-
for (j = 0; j < writer_n_durations[i]; j++) {
565-
wdp = &wdpp[j];
566-
pr_alert("%s%s %4d writer-duration: %5d %llu\n",
567-
scale_type, SCALE_FLAG,
568-
i, j, *wdp);
569-
if (j % 100 == 0)
570-
schedule_timeout_uninterruptible(1);
571-
}
572-
kfree(writer_durations[i]);
573-
}
574-
kfree(writer_tasks);
575-
kfree(writer_durations);
576-
kfree(writer_n_durations);
577-
}
578-
579-
/* Do torture-type-specific cleanup operations. */
580-
if (cur_ops->cleanup != NULL)
581-
cur_ops->cleanup();
582-
583-
torture_cleanup_end();
584-
}
585-
586503
/*
587504
* Return the number if non-negative. If -1, the number of CPUs.
588505
* If less than -1, that much less than the number of CPUs, but
@@ -602,20 +519,6 @@ static int compute_real(int n)
602519
return nr;
603520
}
604521

605-
/*
606-
* RCU scalability shutdown kthread. Just waits to be awakened, then shuts
607-
* down system.
608-
*/
609-
static int
610-
rcu_scale_shutdown(void *arg)
611-
{
612-
wait_event_idle(shutdown_wq, atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
613-
smp_mb(); /* Wake before output. */
614-
rcu_scale_cleanup();
615-
kernel_power_off();
616-
return -EINVAL;
617-
}
618-
619522
/*
620523
* kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
621524
* of iterations and measure total time and number of GP for all iterations to complete.
@@ -790,6 +693,103 @@ kfree_scale_init(void)
790693
return firsterr;
791694
}
792695

696+
static void
697+
rcu_scale_cleanup(void)
698+
{
699+
int i;
700+
int j;
701+
int ngps = 0;
702+
u64 *wdp;
703+
u64 *wdpp;
704+
705+
/*
706+
* Would like warning at start, but everything is expedited
707+
* during the mid-boot phase, so have to wait till the end.
708+
*/
709+
if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
710+
SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
711+
if (rcu_gp_is_normal() && gp_exp)
712+
SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
713+
if (gp_exp && gp_async)
714+
SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
715+
716+
if (torture_cleanup_begin())
717+
return;
718+
if (!cur_ops) {
719+
torture_cleanup_end();
720+
return;
721+
}
722+
723+
if (reader_tasks) {
724+
for (i = 0; i < nrealreaders; i++)
725+
torture_stop_kthread(rcu_scale_reader,
726+
reader_tasks[i]);
727+
kfree(reader_tasks);
728+
}
729+
730+
if (writer_tasks) {
731+
for (i = 0; i < nrealwriters; i++) {
732+
torture_stop_kthread(rcu_scale_writer,
733+
writer_tasks[i]);
734+
if (!writer_n_durations)
735+
continue;
736+
j = writer_n_durations[i];
737+
pr_alert("%s%s writer %d gps: %d\n",
738+
scale_type, SCALE_FLAG, i, j);
739+
ngps += j;
740+
}
741+
pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
742+
scale_type, SCALE_FLAG,
743+
t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
744+
t_rcu_scale_writer_finished -
745+
t_rcu_scale_writer_started,
746+
ngps,
747+
rcuscale_seq_diff(b_rcu_gp_test_finished,
748+
b_rcu_gp_test_started));
749+
for (i = 0; i < nrealwriters; i++) {
750+
if (!writer_durations)
751+
break;
752+
if (!writer_n_durations)
753+
continue;
754+
wdpp = writer_durations[i];
755+
if (!wdpp)
756+
continue;
757+
for (j = 0; j < writer_n_durations[i]; j++) {
758+
wdp = &wdpp[j];
759+
pr_alert("%s%s %4d writer-duration: %5d %llu\n",
760+
scale_type, SCALE_FLAG,
761+
i, j, *wdp);
762+
if (j % 100 == 0)
763+
schedule_timeout_uninterruptible(1);
764+
}
765+
kfree(writer_durations[i]);
766+
}
767+
kfree(writer_tasks);
768+
kfree(writer_durations);
769+
kfree(writer_n_durations);
770+
}
771+
772+
/* Do torture-type-specific cleanup operations. */
773+
if (cur_ops->cleanup != NULL)
774+
cur_ops->cleanup();
775+
776+
torture_cleanup_end();
777+
}
778+
779+
/*
780+
* RCU scalability shutdown kthread. Just waits to be awakened, then shuts
781+
* down system.
782+
*/
783+
static int
784+
rcu_scale_shutdown(void *arg)
785+
{
786+
wait_event_idle(shutdown_wq, atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
787+
smp_mb(); /* Wake before output. */
788+
rcu_scale_cleanup();
789+
kernel_power_off();
790+
return -EINVAL;
791+
}
792+
793793
static int __init
794794
rcu_scale_init(void)
795795
{

0 commit comments

Comments
 (0)