@@ -1785,15 +1785,13 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
1785
1785
update_rq_clock (rq );
1786
1786
sched_info_queued (p );
1787
1787
p -> sched_class -> enqueue_task (rq , p , flags );
1788
- p -> se .on_rq = 1 ;
1789
1788
}
1790
1789
1791
1790
static void dequeue_task (struct rq * rq , struct task_struct * p , int flags )
1792
1791
{
1793
1792
update_rq_clock (rq );
1794
1793
sched_info_dequeued (p );
1795
1794
p -> sched_class -> dequeue_task (rq , p , flags );
1796
- p -> se .on_rq = 0 ;
1797
1795
}
1798
1796
1799
1797
/*
@@ -2128,7 +2126,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2128
2126
* A queue event has occurred, and we're going to schedule. In
2129
2127
* this case, we can save a useless back to back clock update.
2130
2128
*/
2131
- if (rq -> curr -> se . on_rq && test_tsk_need_resched (rq -> curr ))
2129
+ if (rq -> curr -> on_rq && test_tsk_need_resched (rq -> curr ))
2132
2130
rq -> skip_clock_update = 1 ;
2133
2131
}
2134
2132
@@ -2203,7 +2201,7 @@ static bool migrate_task(struct task_struct *p, struct rq *rq)
2203
2201
* If the task is not on a runqueue (and not running), then
2204
2202
* the next wake-up will properly place the task.
2205
2203
*/
2206
- return p -> se . on_rq || task_running (rq , p );
2204
+ return p -> on_rq || task_running (rq , p );
2207
2205
}
2208
2206
2209
2207
/*
@@ -2263,7 +2261,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2263
2261
rq = task_rq_lock (p , & flags );
2264
2262
trace_sched_wait_task (p );
2265
2263
running = task_running (rq , p );
2266
- on_rq = p -> se . on_rq ;
2264
+ on_rq = p -> on_rq ;
2267
2265
ncsw = 0 ;
2268
2266
if (!match_state || p -> state == match_state )
2269
2267
ncsw = p -> nvcsw | LONG_MIN ; /* sets MSB */
@@ -2444,6 +2442,7 @@ ttwu_stat(struct rq *rq, struct task_struct *p, int cpu, int wake_flags)
2444
2442
static void ttwu_activate (struct rq * rq , struct task_struct * p , int en_flags )
2445
2443
{
2446
2444
activate_task (rq , p , en_flags );
2445
+ p -> on_rq = 1 ;
2447
2446
2448
2447
/* if a worker is waking up, notify workqueue */
2449
2448
if (p -> flags & PF_WQ_WORKER )
@@ -2506,7 +2505,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2506
2505
2507
2506
cpu = task_cpu (p );
2508
2507
2509
- if (p -> se . on_rq )
2508
+ if (p -> on_rq )
2510
2509
goto out_running ;
2511
2510
2512
2511
orig_cpu = cpu ;
@@ -2583,7 +2582,7 @@ static void try_to_wake_up_local(struct task_struct *p)
2583
2582
if (!(p -> state & TASK_NORMAL ))
2584
2583
return ;
2585
2584
2586
- if (!p -> se . on_rq )
2585
+ if (!p -> on_rq )
2587
2586
ttwu_activate (rq , p , ENQUEUE_WAKEUP );
2588
2587
2589
2588
ttwu_post_activation (p , rq , 0 );
@@ -2620,19 +2619,21 @@ int wake_up_state(struct task_struct *p, unsigned int state)
2620
2619
*/
2621
2620
static void __sched_fork (struct task_struct * p )
2622
2621
{
2622
+ p -> on_rq = 0 ;
2623
+
2624
+ p -> se .on_rq = 0 ;
2623
2625
p -> se .exec_start = 0 ;
2624
2626
p -> se .sum_exec_runtime = 0 ;
2625
2627
p -> se .prev_sum_exec_runtime = 0 ;
2626
2628
p -> se .nr_migrations = 0 ;
2627
2629
p -> se .vruntime = 0 ;
2630
+ INIT_LIST_HEAD (& p -> se .group_node );
2628
2631
2629
2632
#ifdef CONFIG_SCHEDSTATS
2630
2633
memset (& p -> se .statistics , 0 , sizeof (p -> se .statistics ));
2631
2634
#endif
2632
2635
2633
2636
INIT_LIST_HEAD (& p -> rt .run_list );
2634
- p -> se .on_rq = 0 ;
2635
- INIT_LIST_HEAD (& p -> se .group_node );
2636
2637
2637
2638
#ifdef CONFIG_PREEMPT_NOTIFIERS
2638
2639
INIT_HLIST_HEAD (& p -> preempt_notifiers );
@@ -2750,6 +2751,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2750
2751
2751
2752
rq = task_rq_lock (p , & flags );
2752
2753
activate_task (rq , p , 0 );
2754
+ p -> on_rq = 1 ;
2753
2755
trace_sched_wakeup_new (p , true);
2754
2756
check_preempt_curr (rq , p , WF_FORK );
2755
2757
#ifdef CONFIG_SMP
@@ -4051,7 +4053,7 @@ static inline void schedule_debug(struct task_struct *prev)
4051
4053
4052
4054
static void put_prev_task (struct rq * rq , struct task_struct * prev )
4053
4055
{
4054
- if (prev -> se . on_rq )
4056
+ if (prev -> on_rq )
4055
4057
update_rq_clock (rq );
4056
4058
prev -> sched_class -> put_prev_task (rq , prev );
4057
4059
}
@@ -4126,7 +4128,9 @@ asmlinkage void __sched schedule(void)
4126
4128
if (to_wakeup )
4127
4129
try_to_wake_up_local (to_wakeup );
4128
4130
}
4131
+
4129
4132
deactivate_task (rq , prev , DEQUEUE_SLEEP );
4133
+ prev -> on_rq = 0 ;
4130
4134
4131
4135
/*
4132
4136
* If we are going to sleep and we have plugged IO queued, make
@@ -4695,7 +4699,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4695
4699
trace_sched_pi_setprio (p , prio );
4696
4700
oldprio = p -> prio ;
4697
4701
prev_class = p -> sched_class ;
4698
- on_rq = p -> se . on_rq ;
4702
+ on_rq = p -> on_rq ;
4699
4703
running = task_current (rq , p );
4700
4704
if (on_rq )
4701
4705
dequeue_task (rq , p , 0 );
@@ -4743,7 +4747,7 @@ void set_user_nice(struct task_struct *p, long nice)
4743
4747
p -> static_prio = NICE_TO_PRIO (nice );
4744
4748
goto out_unlock ;
4745
4749
}
4746
- on_rq = p -> se . on_rq ;
4750
+ on_rq = p -> on_rq ;
4747
4751
if (on_rq )
4748
4752
dequeue_task (rq , p , 0 );
4749
4753
@@ -4877,8 +4881,6 @@ static struct task_struct *find_process_by_pid(pid_t pid)
4877
4881
static void
4878
4882
__setscheduler (struct rq * rq , struct task_struct * p , int policy , int prio )
4879
4883
{
4880
- BUG_ON (p -> se .on_rq );
4881
-
4882
4884
p -> policy = policy ;
4883
4885
p -> rt_priority = prio ;
4884
4886
p -> normal_prio = normal_prio (p );
@@ -5044,7 +5046,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
5044
5046
raw_spin_unlock_irqrestore (& p -> pi_lock , flags );
5045
5047
goto recheck ;
5046
5048
}
5047
- on_rq = p -> se . on_rq ;
5049
+ on_rq = p -> on_rq ;
5048
5050
running = task_current (rq , p );
5049
5051
if (on_rq )
5050
5052
deactivate_task (rq , p , 0 );
@@ -5965,7 +5967,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5965
5967
* If we're not on a rq, the next wake-up will ensure we're
5966
5968
* placed properly.
5967
5969
*/
5968
- if (p -> se . on_rq ) {
5970
+ if (p -> on_rq ) {
5969
5971
deactivate_task (rq_src , p , 0 );
5970
5972
set_task_cpu (p , dest_cpu );
5971
5973
activate_task (rq_dest , p , 0 );
@@ -8339,7 +8341,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
8339
8341
int old_prio = p -> prio ;
8340
8342
int on_rq ;
8341
8343
8342
- on_rq = p -> se . on_rq ;
8344
+ on_rq = p -> on_rq ;
8343
8345
if (on_rq )
8344
8346
deactivate_task (rq , p , 0 );
8345
8347
__setscheduler (rq , p , SCHED_NORMAL , 0 );
@@ -8682,7 +8684,7 @@ void sched_move_task(struct task_struct *tsk)
8682
8684
rq = task_rq_lock (tsk , & flags );
8683
8685
8684
8686
running = task_current (rq , tsk );
8685
- on_rq = tsk -> se . on_rq ;
8687
+ on_rq = tsk -> on_rq ;
8686
8688
8687
8689
if (on_rq )
8688
8690
dequeue_task (rq , tsk , 0 );
0 commit comments