Skip to content

Commit fd2f441

Browse files
Peter ZijlstraIngo Molnar
Peter Zijlstra
authored and
Ingo Molnar
committed
sched: Provide p->on_rq
Provide a generic p->on_rq because the p->se.on_rq semantics are unfavourable for lockless wakeups but needed for sched_fair. In particular, p->on_rq is only cleared when we actually dequeue the task in schedule() and not on any random dequeue as done by things like __migrate_task() and __sched_setscheduler(). This also allows us to remove p->se usage from !sched_fair code. Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110405152728.949545047@chello.nl
1 parent d7c01d2 commit fd2f441

File tree

5 files changed

+31
-28
lines changed

5 files changed

+31
-28
lines changed

include/linux/sched.h

+1
Original file line numberDiff line numberDiff line change
@@ -1202,6 +1202,7 @@ struct task_struct {
12021202
#ifdef CONFIG_SMP
12031203
int on_cpu;
12041204
#endif
1205+
int on_rq;
12051206

12061207
int prio, static_prio, normal_prio;
12071208
unsigned int rt_priority;

kernel/sched.c

+20-18
Original file line numberDiff line numberDiff line change
@@ -1785,15 +1785,13 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
17851785
update_rq_clock(rq);
17861786
sched_info_queued(p);
17871787
p->sched_class->enqueue_task(rq, p, flags);
1788-
p->se.on_rq = 1;
17891788
}
17901789

17911790
static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
17921791
{
17931792
update_rq_clock(rq);
17941793
sched_info_dequeued(p);
17951794
p->sched_class->dequeue_task(rq, p, flags);
1796-
p->se.on_rq = 0;
17971795
}
17981796

17991797
/*
@@ -2128,7 +2126,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
21282126
* A queue event has occurred, and we're going to schedule. In
21292127
* this case, we can save a useless back to back clock update.
21302128
*/
2131-
if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr))
2129+
if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
21322130
rq->skip_clock_update = 1;
21332131
}
21342132

@@ -2203,7 +2201,7 @@ static bool migrate_task(struct task_struct *p, struct rq *rq)
22032201
* If the task is not on a runqueue (and not running), then
22042202
* the next wake-up will properly place the task.
22052203
*/
2206-
return p->se.on_rq || task_running(rq, p);
2204+
return p->on_rq || task_running(rq, p);
22072205
}
22082206

22092207
/*
@@ -2263,7 +2261,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
22632261
rq = task_rq_lock(p, &flags);
22642262
trace_sched_wait_task(p);
22652263
running = task_running(rq, p);
2266-
on_rq = p->se.on_rq;
2264+
on_rq = p->on_rq;
22672265
ncsw = 0;
22682266
if (!match_state || p->state == match_state)
22692267
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
@@ -2444,6 +2442,7 @@ ttwu_stat(struct rq *rq, struct task_struct *p, int cpu, int wake_flags)
24442442
static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
24452443
{
24462444
activate_task(rq, p, en_flags);
2445+
p->on_rq = 1;
24472446

24482447
/* if a worker is waking up, notify workqueue */
24492448
if (p->flags & PF_WQ_WORKER)
@@ -2506,7 +2505,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
25062505

25072506
cpu = task_cpu(p);
25082507

2509-
if (p->se.on_rq)
2508+
if (p->on_rq)
25102509
goto out_running;
25112510

25122511
orig_cpu = cpu;
@@ -2583,7 +2582,7 @@ static void try_to_wake_up_local(struct task_struct *p)
25832582
if (!(p->state & TASK_NORMAL))
25842583
return;
25852584

2586-
if (!p->se.on_rq)
2585+
if (!p->on_rq)
25872586
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
25882587

25892588
ttwu_post_activation(p, rq, 0);
@@ -2620,19 +2619,21 @@ int wake_up_state(struct task_struct *p, unsigned int state)
26202619
*/
26212620
static void __sched_fork(struct task_struct *p)
26222621
{
2622+
p->on_rq = 0;
2623+
2624+
p->se.on_rq = 0;
26232625
p->se.exec_start = 0;
26242626
p->se.sum_exec_runtime = 0;
26252627
p->se.prev_sum_exec_runtime = 0;
26262628
p->se.nr_migrations = 0;
26272629
p->se.vruntime = 0;
2630+
INIT_LIST_HEAD(&p->se.group_node);
26282631

26292632
#ifdef CONFIG_SCHEDSTATS
26302633
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
26312634
#endif
26322635

26332636
INIT_LIST_HEAD(&p->rt.run_list);
2634-
p->se.on_rq = 0;
2635-
INIT_LIST_HEAD(&p->se.group_node);
26362637

26372638
#ifdef CONFIG_PREEMPT_NOTIFIERS
26382639
INIT_HLIST_HEAD(&p->preempt_notifiers);
@@ -2750,6 +2751,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
27502751

27512752
rq = task_rq_lock(p, &flags);
27522753
activate_task(rq, p, 0);
2754+
p->on_rq = 1;
27532755
trace_sched_wakeup_new(p, true);
27542756
check_preempt_curr(rq, p, WF_FORK);
27552757
#ifdef CONFIG_SMP
@@ -4051,7 +4053,7 @@ static inline void schedule_debug(struct task_struct *prev)
40514053

40524054
static void put_prev_task(struct rq *rq, struct task_struct *prev)
40534055
{
4054-
if (prev->se.on_rq)
4056+
if (prev->on_rq)
40554057
update_rq_clock(rq);
40564058
prev->sched_class->put_prev_task(rq, prev);
40574059
}
@@ -4126,7 +4128,9 @@ asmlinkage void __sched schedule(void)
41264128
if (to_wakeup)
41274129
try_to_wake_up_local(to_wakeup);
41284130
}
4131+
41294132
deactivate_task(rq, prev, DEQUEUE_SLEEP);
4133+
prev->on_rq = 0;
41304134

41314135
/*
41324136
* If we are going to sleep and we have plugged IO queued, make
@@ -4695,7 +4699,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
46954699
trace_sched_pi_setprio(p, prio);
46964700
oldprio = p->prio;
46974701
prev_class = p->sched_class;
4698-
on_rq = p->se.on_rq;
4702+
on_rq = p->on_rq;
46994703
running = task_current(rq, p);
47004704
if (on_rq)
47014705
dequeue_task(rq, p, 0);
@@ -4743,7 +4747,7 @@ void set_user_nice(struct task_struct *p, long nice)
47434747
p->static_prio = NICE_TO_PRIO(nice);
47444748
goto out_unlock;
47454749
}
4746-
on_rq = p->se.on_rq;
4750+
on_rq = p->on_rq;
47474751
if (on_rq)
47484752
dequeue_task(rq, p, 0);
47494753

@@ -4877,8 +4881,6 @@ static struct task_struct *find_process_by_pid(pid_t pid)
48774881
static void
48784882
__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
48794883
{
4880-
BUG_ON(p->se.on_rq);
4881-
48824884
p->policy = policy;
48834885
p->rt_priority = prio;
48844886
p->normal_prio = normal_prio(p);
@@ -5044,7 +5046,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
50445046
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
50455047
goto recheck;
50465048
}
5047-
on_rq = p->se.on_rq;
5049+
on_rq = p->on_rq;
50485050
running = task_current(rq, p);
50495051
if (on_rq)
50505052
deactivate_task(rq, p, 0);
@@ -5965,7 +5967,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
59655967
* If we're not on a rq, the next wake-up will ensure we're
59665968
* placed properly.
59675969
*/
5968-
if (p->se.on_rq) {
5970+
if (p->on_rq) {
59695971
deactivate_task(rq_src, p, 0);
59705972
set_task_cpu(p, dest_cpu);
59715973
activate_task(rq_dest, p, 0);
@@ -8339,7 +8341,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
83398341
int old_prio = p->prio;
83408342
int on_rq;
83418343

8342-
on_rq = p->se.on_rq;
8344+
on_rq = p->on_rq;
83438345
if (on_rq)
83448346
deactivate_task(rq, p, 0);
83458347
__setscheduler(rq, p, SCHED_NORMAL, 0);
@@ -8682,7 +8684,7 @@ void sched_move_task(struct task_struct *tsk)
86828684
rq = task_rq_lock(tsk, &flags);
86838685

86848686
running = task_current(rq, tsk);
8685-
on_rq = tsk->se.on_rq;
8687+
on_rq = tsk->on_rq;
86868688

86878689
if (on_rq)
86888690
dequeue_task(rq, tsk, 0);

kernel/sched_debug.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
152152
read_lock_irqsave(&tasklist_lock, flags);
153153

154154
do_each_thread(g, p) {
155-
if (!p->se.on_rq || task_cpu(p) != rq_cpu)
155+
if (!p->on_rq || task_cpu(p) != rq_cpu)
156156
continue;
157157

158158
print_task(m, rq, p);

kernel/sched_rt.c

+8-8
Original file line numberDiff line numberDiff line change
@@ -1136,7 +1136,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
11361136
* The previous task needs to be made eligible for pushing
11371137
* if it is still active
11381138
*/
1139-
if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
1139+
if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
11401140
enqueue_pushable_task(rq, p);
11411141
}
11421142

@@ -1287,7 +1287,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
12871287
!cpumask_test_cpu(lowest_rq->cpu,
12881288
&task->cpus_allowed) ||
12891289
task_running(rq, task) ||
1290-
!task->se.on_rq)) {
1290+
!task->on_rq)) {
12911291

12921292
raw_spin_unlock(&lowest_rq->lock);
12931293
lowest_rq = NULL;
@@ -1321,7 +1321,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
13211321
BUG_ON(task_current(rq, p));
13221322
BUG_ON(p->rt.nr_cpus_allowed <= 1);
13231323

1324-
BUG_ON(!p->se.on_rq);
1324+
BUG_ON(!p->on_rq);
13251325
BUG_ON(!rt_task(p));
13261326

13271327
return p;
@@ -1467,7 +1467,7 @@ static int pull_rt_task(struct rq *this_rq)
14671467
*/
14681468
if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
14691469
WARN_ON(p == src_rq->curr);
1470-
WARN_ON(!p->se.on_rq);
1470+
WARN_ON(!p->on_rq);
14711471

14721472
/*
14731473
* There's a chance that p is higher in priority
@@ -1538,7 +1538,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
15381538
* Update the migration status of the RQ if we have an RT task
15391539
* which is running AND changing its weight value.
15401540
*/
1541-
if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1541+
if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
15421542
struct rq *rq = task_rq(p);
15431543

15441544
if (!task_current(rq, p)) {
@@ -1608,7 +1608,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
16081608
* we may need to handle the pulling of RT tasks
16091609
* now.
16101610
*/
1611-
if (p->se.on_rq && !rq->rt.rt_nr_running)
1611+
if (p->on_rq && !rq->rt.rt_nr_running)
16121612
pull_rt_task(rq);
16131613
}
16141614

@@ -1638,7 +1638,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
16381638
* If that current running task is also an RT task
16391639
* then see if we can move to another run queue.
16401640
*/
1641-
if (p->se.on_rq && rq->curr != p) {
1641+
if (p->on_rq && rq->curr != p) {
16421642
#ifdef CONFIG_SMP
16431643
if (rq->rt.overloaded && push_rt_task(rq) &&
16441644
/* Don't resched if we changed runqueues */
@@ -1657,7 +1657,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
16571657
static void
16581658
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
16591659
{
1660-
if (!p->se.on_rq)
1660+
if (!p->on_rq)
16611661
return;
16621662

16631663
if (rq->curr == p) {

kernel/sched_stoptask.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
2626
{
2727
struct task_struct *stop = rq->stop;
2828

29-
if (stop && stop->se.on_rq)
29+
if (stop && stop->on_rq)
3030
return stop;
3131

3232
return NULL;

0 commit comments

Comments
 (0)