@@ -838,18 +838,39 @@ static inline int task_current(struct rq *rq, struct task_struct *p)
838
838
return rq -> curr == p ;
839
839
}
840
840
841
- #ifndef __ARCH_WANT_UNLOCKED_CTXSW
842
841
static inline int task_running (struct rq * rq , struct task_struct * p )
843
842
{
843
+ #ifdef CONFIG_SMP
844
+ return p -> on_cpu ;
845
+ #else
844
846
return task_current (rq , p );
847
+ #endif
845
848
}
846
849
850
+ #ifndef __ARCH_WANT_UNLOCKED_CTXSW
847
851
static inline void prepare_lock_switch (struct rq * rq , struct task_struct * next )
848
852
{
853
+ #ifdef CONFIG_SMP
854
+ /*
855
+ * We can optimise this out completely for !SMP, because the
856
+ * SMP rebalancing from interrupt is the only thing that cares
857
+ * here.
858
+ */
859
+ next -> on_cpu = 1 ;
860
+ #endif
849
861
}
850
862
851
863
static inline void finish_lock_switch (struct rq * rq , struct task_struct * prev )
852
864
{
865
+ #ifdef CONFIG_SMP
866
+ /*
867
+ * After ->on_cpu is cleared, the task can be moved to a different CPU.
868
+ * We must ensure this doesn't happen until the switch is completely
869
+ * finished.
870
+ */
871
+ smp_wmb ();
872
+ prev -> on_cpu = 0 ;
873
+ #endif
853
874
#ifdef CONFIG_DEBUG_SPINLOCK
854
875
/* this is a valid case when another task releases the spinlock */
855
876
rq -> lock .owner = current ;
@@ -865,15 +886,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
865
886
}
866
887
867
888
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
868
- static inline int task_running (struct rq * rq , struct task_struct * p )
869
- {
870
- #ifdef CONFIG_SMP
871
- return p -> oncpu ;
872
- #else
873
- return task_current (rq , p );
874
- #endif
875
- }
876
-
877
889
static inline void prepare_lock_switch (struct rq * rq , struct task_struct * next )
878
890
{
879
891
#ifdef CONFIG_SMP
@@ -882,7 +894,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
882
894
* SMP rebalancing from interrupt is the only thing that cares
883
895
* here.
884
896
*/
885
- next -> oncpu = 1 ;
897
+ next -> on_cpu = 1 ;
886
898
#endif
887
899
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
888
900
raw_spin_unlock_irq (& rq -> lock );
@@ -895,12 +907,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
895
907
{
896
908
#ifdef CONFIG_SMP
897
909
/*
898
- * After ->oncpu is cleared, the task can be moved to a different CPU.
910
+ * After ->on_cpu is cleared, the task can be moved to a different CPU.
899
911
* We must ensure this doesn't happen until the switch is completely
900
912
* finished.
901
913
*/
902
914
smp_wmb ();
903
- prev -> oncpu = 0 ;
915
+ prev -> on_cpu = 0 ;
904
916
#endif
905
917
#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
906
918
local_irq_enable ();
@@ -2686,8 +2698,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
2686
2698
if (likely (sched_info_on ()))
2687
2699
memset (& p -> sched_info , 0 , sizeof (p -> sched_info ));
2688
2700
#endif
2689
- #if defined(CONFIG_SMP ) && defined( __ARCH_WANT_UNLOCKED_CTXSW )
2690
- p -> oncpu = 0 ;
2701
+ #if defined(CONFIG_SMP )
2702
+ p -> on_cpu = 0 ;
2691
2703
#endif
2692
2704
#ifdef CONFIG_PREEMPT
2693
2705
/* Want to start with kernel preemption disabled. */
@@ -5776,8 +5788,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5776
5788
rcu_read_unlock ();
5777
5789
5778
5790
rq -> curr = rq -> idle = idle ;
5779
- #if defined(CONFIG_SMP ) && defined( __ARCH_WANT_UNLOCKED_CTXSW )
5780
- idle -> oncpu = 1 ;
5791
+ #if defined(CONFIG_SMP )
5792
+ idle -> on_cpu = 1 ;
5781
5793
#endif
5782
5794
raw_spin_unlock_irqrestore (& rq -> lock , flags );
5783
5795
0 commit comments