1- From 621f1ecd04cdfbe39b65ff5f83f168b439a2b251 Mon Sep 17 00:00:00 2001
1+ From f0626f74cee6463884ab30abe77b6f68165cc17a Mon Sep 17 00:00:00 2001
22From: Peter Jung <admin@ptr1337.dev>
3- Date: Sat, 19 Nov 2022 15:43:12 +0100
3+ Date: Thu, 24 Nov 2022 13:48:52 +0100
44Subject: [PATCH] bore-cachy
55
66Signed-off-by: Peter Jung <admin@ptr1337.dev>
77---
88 include/linux/sched.h | 5 ++
99 init/Kconfig | 21 ++++++++
10- kernel/sched/core.c | 22 ++++++++
10+ kernel/sched/core.c | 29 +++++++++++
11+ kernel/sched/debug.c | 3 ++
1112 kernel/sched/fair.c | 112 +++++++++++++++++++++++++++++++++++++++-
1213 kernel/sched/features.h | 8 +++
13- 5 files changed, 166 insertions(+), 2 deletions(-)
14+ 6 files changed, 176 insertions(+), 2 deletions(-)
1415
1516diff --git a/include/linux/sched.h b/include/linux/sched.h
16- index 87eb96123e6c..e1ca07e4f531 100644
17+ index f601b193bc26..3b748bda6031 100644
1718--- a/include/linux/sched.h
1819+++ b/include/linux/sched.h
1920@@ -556,6 +556,11 @@ struct sched_entity {
@@ -61,18 +62,19 @@ index c6d646a27d9a..a58d2919c78c 100644
6162 bool "Automatic process group scheduling"
6263 select CGROUPS
6364diff --git a/kernel/sched/core.c b/kernel/sched/core.c
64- index 9ba8821927a5..8e8d0b01fa13 100644
65+ index d0df2f1fe5a9..99158ac2b614 100644
6566--- a/kernel/sched/core.c
6667+++ b/kernel/sched/core.c
67- @@ -4351 ,6 +4351,21 @@ int wake_up_state(struct task_struct *p, unsigned int state)
68+ @@ -4369 ,6 +4369,22 @@ int wake_up_state(struct task_struct *p, unsigned int state)
6869 return try_to_wake_up(p, state, 0);
6970 }
7071
7172+ #ifdef CONFIG_SCHED_BORE
72- + static inline void sched_fork_burst (struct task_struct *p)
73+ + static inline void sched_on_fork_calc_prev_burst_from_siblings (struct task_struct *p)
7374+ {
7475+ struct task_struct *sib;
75- + u64 cnt, sum, avg;
76+ + u64 sum, avg;
77+ + u16 cnt;
7678+ list_for_each_entry(sib, &p->sibling, sibling) {
7779+ cnt++;
7880+ sum += sib->se.prev_burst_time;
@@ -86,29 +88,63 @@ index 9ba8821927a5..8e8d0b01fa13 100644
8688 /*
8789 * Perform scheduler related setup for a newly forked process p.
8890 * p is forked by current.
89- @@ -4367 ,6 +4382 ,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
91+ @@ -4385 ,6 +4401 ,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
9092 p->se.prev_sum_exec_runtime = 0;
9193 p->se.nr_migrations = 0;
9294 p->se.vruntime = 0;
9395+ #ifdef CONFIG_SCHED_BORE
94- + sched_fork_burst(p) ;
96+ + p->se.burst_time = 0 ;
9597+ #endif // CONFIG_SCHED_BORE
9698 INIT_LIST_HEAD(&p->se.group_node);
9799 RB_CLEAR_NODE(&p->se.latency_node);
98100
99- @@ -9732,6 +9750,10 @@ void __init sched_init(void)
101+ @@ -4600,6 +4619,9 @@ late_initcall(sched_core_sysctl_init);
102+ int sched_fork(unsigned long clone_flags, struct task_struct *p)
103+ {
104+ __sched_fork(clone_flags, p);
105+ + #ifdef CONFIG_SCHED_BORE
106+ + sched_on_fork_calc_prev_burst_from_siblings(p);
107+ + #endif // CONFIG_SCHED_BORE
108+ /*
109+ * We mark the process as NEW here. This guarantees that
110+ * nobody will actually run it, and a signal or other external
111+ @@ -9083,6 +9105,9 @@ void __init init_idle(struct task_struct *idle, int cpu)
112+
113+ idle->__state = TASK_RUNNING;
114+ idle->se.exec_start = sched_clock();
115+ + #ifdef CONFIG_SCHED_BORE
116+ + idle->se.prev_burst_time = 0;
117+ + #endif //CONFIG_SCHED_BORE
118+ /*
119+ * PF_KTHREAD should already be set at this point; regardless, make it
120+ * look like a proper per-CPU kthread.
121+ @@ -9750,6 +9775,10 @@ void __init sched_init(void)
100122 BUG_ON(&dl_sched_class != &stop_sched_class + 1);
101123 #endif
102124
103125+ #ifdef CONFIG_SCHED_BORE
104- + printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 1.7.0 by Masahito Suzuki");
126+ + printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 1.7.1 by Masahito Suzuki");
105127+ #endif // CONFIG_SCHED_BORE
106128+
107129 wait_bit_init();
108130
109131 #ifdef CONFIG_FAIR_GROUP_SCHED
132+ diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
133+ index b3922184af91..6c87abccf794 100644
134+ --- a/kernel/sched/debug.c
135+ +++ b/kernel/sched/debug.c
136+ @@ -547,6 +547,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
137+ SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
138+ SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
139+
140+ + #ifdef CONFIG_SCHED_BORE
141+ + SEQ_printf(m, " %2d", p->se.burst_score);
142+ + #endif
143+ #ifdef CONFIG_NUMA_BALANCING
144+ SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
145+ #endif
110146diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
111- index 16aac664de6b..58a869c637a8 100644
147+ index e2005697b538..ad450649b52e 100644
112148--- a/kernel/sched/fair.c
113149+++ b/kernel/sched/fair.c
114150@@ -19,6 +19,9 @@
@@ -136,7 +172,7 @@ index 16aac664de6b..58a869c637a8 100644
136172 int sched_thermal_decay_shift;
137173 static int __init setup_sched_thermal_decay_shift(char *str)
138174 {
139- @@ -198 ,6 +209 ,35 @@ static unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
175+ @@ -201 ,6 +212 ,35 @@ static unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
140176
141177 #ifdef CONFIG_SYSCTL
142178 static struct ctl_table sched_fair_sysctls[] = {
@@ -172,7 +208,7 @@ index 16aac664de6b..58a869c637a8 100644
172208 {
173209 .procname = "sched_child_runs_first",
174210 .data = &sysctl_sched_child_runs_first,
175- @@ -963 ,6 +1003 ,31 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
211+ @@ -966 ,6 +1006 ,31 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
176212 }
177213 #endif /* CONFIG_SMP */
178214
@@ -204,7 +240,7 @@ index 16aac664de6b..58a869c637a8 100644
204240 /*
205241 * Update the current task's runtime statistics.
206242 */
207- @@ -992 ,6 +1057 ,13 @@ static void update_curr(struct cfs_rq *cfs_rq)
243+ @@ -995 ,6 +1060 ,13 @@ static void update_curr(struct cfs_rq *cfs_rq)
208244 curr->sum_exec_runtime += delta_exec;
209245 schedstat_add(cfs_rq->exec_clock, delta_exec);
210246
@@ -218,7 +254,7 @@ index 16aac664de6b..58a869c637a8 100644
218254 curr->vruntime += calc_delta_fair(delta_exec, curr);
219255 update_min_vruntime(cfs_rq);
220256
221- @@ -5038 ,6 +5110 ,11 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
257+ @@ -5046 ,6 +5118 ,11 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
222258 se->prev_sum_exec_runtime = se->sum_exec_runtime;
223259 }
224260
@@ -230,7 +266,7 @@ index 16aac664de6b..58a869c637a8 100644
230266 static int
231267 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
232268
233- @@ -5082 ,7 +5159 ,13 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
269+ @@ -5090 ,7 +5167 ,13 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
234270 se = second;
235271 }
236272
@@ -245,7 +281,7 @@ index 16aac664de6b..58a869c637a8 100644
245281 /*
246282 * Someone really wants this to run. If it's not unfair, run it.
247283 */
248- @@ -6238 ,6 +6321 ,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
284+ @@ -6376 ,6 +6459 ,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
249285 util_est_dequeue(&rq->cfs, p);
250286
251287 for_each_sched_entity(se) {
@@ -255,7 +291,7 @@ index 16aac664de6b..58a869c637a8 100644
255291 cfs_rq = cfs_rq_of(se);
256292 dequeue_entity(cfs_rq, se, flags);
257293
258- @@ -7622 ,7 +7708 ,12 @@ static unsigned long wakeup_gran(struct sched_entity *se)
294+ @@ -7760 ,7 +7846 ,12 @@ static unsigned long wakeup_gran(struct sched_entity *se)
259295 *
260296 */
261297 static int
@@ -268,7 +304,7 @@ index 16aac664de6b..58a869c637a8 100644
268304 {
269305 s64 gran, vdiff = curr->vruntime - se->vruntime;
270306 s64 offset = wakeup_latency_gran(curr, se);
271- @@ -7642 ,12 +7733 ,21 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
307+ @@ -7780 ,12 +7871 ,21 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
272308 * chance to preempt current.
273309 */
274310 gran = min_t(s64, gran, get_latency_max());
@@ -290,7 +326,7 @@ index 16aac664de6b..58a869c637a8 100644
290326
291327 static void set_last_buddy(struct sched_entity *se)
292328 {
293- @@ -7747 ,7 +7847 ,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
329+ @@ -7885 ,7 +7985 ,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
294330 return;
295331
296332 update_curr(cfs_rq_of(se));
@@ -304,7 +340,7 @@ index 16aac664de6b..58a869c637a8 100644
304340 /*
305341 * Bias pick_next to pick the sched entity that is
306342 * triggering this preemption.
307- @@ -7983 ,6 +8088 ,9 @@ static void yield_task_fair(struct rq *rq)
343+ @@ -8121 ,6 +8226 ,9 @@ static void yield_task_fair(struct rq *rq)
308344 struct task_struct *curr = rq->curr;
309345 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
310346 struct sched_entity *se = &curr->se;
@@ -343,5 +379,5 @@ index ee7f23c76bd3..3115bde98211 100644
343379 /*
344380 * Prefer to schedule the task that ran last (when we did
345381- -
346- 2.38.1.436.geea7033409
382+ 2.39.0.rc0
347383
0 commit comments