Skip to content

Commit 3ca7a44

Browse files
Peter ZijlstraIngo Molnar
Peter Zijlstra
authored and
Ingo Molnar
committed
sched: Always provide p->on_cpu
Always provide p->on_cpu so that we can determine if its on a cpu without having to lock the rq. Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110405152728.785452014@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
1 parent 184748c commit 3ca7a44

File tree

2 files changed

+30
-20
lines changed

2 files changed

+30
-20
lines changed

include/linux/sched.h

+1-3
Original file line numberDiff line numberDiff line change
@@ -1200,9 +1200,7 @@ struct task_struct {
12001200
int lock_depth; /* BKL lock depth */
12011201

12021202
#ifdef CONFIG_SMP
1203-
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1204-
int oncpu;
1205-
#endif
1203+
int on_cpu;
12061204
#endif
12071205

12081206
int prio, static_prio, normal_prio;

kernel/sched.c

+29-17
Original file line numberDiff line numberDiff line change
@@ -838,18 +838,39 @@ static inline int task_current(struct rq *rq, struct task_struct *p)
838838
return rq->curr == p;
839839
}
840840

841-
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
842841
static inline int task_running(struct rq *rq, struct task_struct *p)
843842
{
843+
#ifdef CONFIG_SMP
844+
return p->on_cpu;
845+
#else
844846
return task_current(rq, p);
847+
#endif
845848
}
846849

850+
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
847851
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
848852
{
853+
#ifdef CONFIG_SMP
854+
/*
855+
* We can optimise this out completely for !SMP, because the
856+
* SMP rebalancing from interrupt is the only thing that cares
857+
* here.
858+
*/
859+
next->on_cpu = 1;
860+
#endif
849861
}
850862

851863
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
852864
{
865+
#ifdef CONFIG_SMP
866+
/*
867+
* After ->on_cpu is cleared, the task can be moved to a different CPU.
868+
* We must ensure this doesn't happen until the switch is completely
869+
* finished.
870+
*/
871+
smp_wmb();
872+
prev->on_cpu = 0;
873+
#endif
853874
#ifdef CONFIG_DEBUG_SPINLOCK
854875
/* this is a valid case when another task releases the spinlock */
855876
rq->lock.owner = current;
@@ -865,15 +886,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
865886
}
866887

867888
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
868-
static inline int task_running(struct rq *rq, struct task_struct *p)
869-
{
870-
#ifdef CONFIG_SMP
871-
return p->oncpu;
872-
#else
873-
return task_current(rq, p);
874-
#endif
875-
}
876-
877889
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
878890
{
879891
#ifdef CONFIG_SMP
@@ -882,7 +894,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
882894
* SMP rebalancing from interrupt is the only thing that cares
883895
* here.
884896
*/
885-
next->oncpu = 1;
897+
next->on_cpu = 1;
886898
#endif
887899
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
888900
raw_spin_unlock_irq(&rq->lock);
@@ -895,12 +907,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
895907
{
896908
#ifdef CONFIG_SMP
897909
/*
898-
* After ->oncpu is cleared, the task can be moved to a different CPU.
910+
* After ->on_cpu is cleared, the task can be moved to a different CPU.
899911
* We must ensure this doesn't happen until the switch is completely
900912
* finished.
901913
*/
902914
smp_wmb();
903-
prev->oncpu = 0;
915+
prev->on_cpu = 0;
904916
#endif
905917
#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
906918
local_irq_enable();
@@ -2686,8 +2698,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
26862698
if (likely(sched_info_on()))
26872699
memset(&p->sched_info, 0, sizeof(p->sched_info));
26882700
#endif
2689-
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
2690-
p->oncpu = 0;
2701+
#if defined(CONFIG_SMP)
2702+
p->on_cpu = 0;
26912703
#endif
26922704
#ifdef CONFIG_PREEMPT
26932705
/* Want to start with kernel preemption disabled. */
@@ -5776,8 +5788,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
57765788
rcu_read_unlock();
57775789

57785790
rq->curr = rq->idle = idle;
5779-
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
5780-
idle->oncpu = 1;
5791+
#if defined(CONFIG_SMP)
5792+
idle->on_cpu = 1;
57815793
#endif
57825794
raw_spin_unlock_irqrestore(&rq->lock, flags);
57835795

0 commit comments

Comments
 (0)