Skip to content

Commit 1856e22

Browse files
Andy Rossnashif
authored andcommitted
kernel/sched: Don't preempt cooperative threads
The scheduler rewrite added a regression in uniprocessor mode where cooperative threads would be unexpectedly preempted, because nothing was checking the preemption status of _current at the point where the next-thread cache pointer was being updated. Note that update_cache() needs a little more context: spots like k_yield() that leave _current runable need to be able to tell it that "yes, preemption is OK here even though the thread is cooperative'. So it has a "preempt_ok" argument now. Interestingly this didn't get caught because we don't test that. We have lots and lots of tests of the converse cases (i.e. making sure that threads get preempted when we expect them to), but nothing that explicitly tries to jump in front of a cooperative thread. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
1 parent a149232 commit 1856e22

File tree

1 file changed

+30
-14
lines changed

1 file changed

+30
-14
lines changed

kernel/sched.c

Lines changed: 30 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -126,10 +126,21 @@ static struct k_thread *next_up(void)
126126
#endif
127127
}
128128

129-
static void update_cache(void)
129+
static void update_cache(int preempt_ok)
130130
{
131131
#ifndef CONFIG_SMP
132-
_kernel.ready_q.cache = next_up();
132+
struct k_thread *th = next_up();
133+
134+
if (_current && !_is_idle(_current) && !_is_thread_dummy(_current)) {
135+
/* Don't preempt cooperative threads unless the caller allows
136+
* it (i.e. k_yield())
137+
*/
138+
if (!preempt_ok && !_is_preempt(_current)) {
139+
th = _current;
140+
}
141+
}
142+
143+
_kernel.ready_q.cache = th;
133144
#endif
134145
}
135146

@@ -138,7 +149,7 @@ void _add_thread_to_ready_q(struct k_thread *thread)
138149
LOCKED(&sched_lock) {
139150
_priq_run_add(&_kernel.ready_q.runq, thread);
140151
_mark_thread_as_queued(thread);
141-
update_cache();
152+
update_cache(0);
142153
}
143154
}
144155

@@ -148,7 +159,7 @@ void _move_thread_to_end_of_prio_q(struct k_thread *thread)
148159
_priq_run_remove(&_kernel.ready_q.runq, thread);
149160
_priq_run_add(&_kernel.ready_q.runq, thread);
150161
_mark_thread_as_queued(thread);
151-
update_cache();
162+
update_cache(0);
152163
}
153164
}
154165

@@ -158,7 +169,7 @@ void _remove_thread_from_ready_q(struct k_thread *thread)
158169
if (_is_thread_queued(thread)) {
159170
_priq_run_remove(&_kernel.ready_q.runq, thread);
160171
_mark_thread_as_not_queued(thread);
161-
update_cache();
172+
update_cache(thread == _current);
162173
}
163174
}
164175
}
@@ -277,7 +288,7 @@ void _thread_priority_set(struct k_thread *thread, int prio)
277288
_priq_run_remove(&_kernel.ready_q.runq, thread);
278289
thread->base.prio = prio;
279290
_priq_run_add(&_kernel.ready_q.runq, thread);
280-
update_cache();
291+
update_cache(1);
281292
} else {
282293
thread->base.prio = prio;
283294
}
@@ -302,7 +313,9 @@ int _reschedule(int key)
302313

303314
void k_sched_lock(void)
304315
{
305-
_sched_lock();
316+
LOCKED(&sched_lock) {
317+
_sched_lock();
318+
}
306319
}
307320

308321
void k_sched_unlock(void)
@@ -311,16 +324,15 @@ void k_sched_unlock(void)
311324
__ASSERT(_current->base.sched_locked != 0, "");
312325
__ASSERT(!_is_in_isr(), "");
313326

314-
int key = irq_lock();
315-
316-
/* compiler_barrier() not needed, comes from irq_lock() */
317-
318-
++_current->base.sched_locked;
327+
LOCKED(&sched_lock) {
328+
++_current->base.sched_locked;
329+
update_cache(1);
330+
}
319331

320332
K_DEBUG("scheduler unlocked (%p:%d)\n",
321333
_current, _current->base.sched_locked);
322334

323-
_reschedule(key);
335+
_reschedule(irq_lock());
324336
#endif
325337
}
326338

@@ -588,7 +600,11 @@ void _impl_k_yield(void)
588600
__ASSERT(!_is_in_isr(), "");
589601

590602
if (!_is_idle(_current)) {
591-
_move_thread_to_end_of_prio_q(_current);
603+
LOCKED(&sched_lock) {
604+
_priq_run_remove(&_kernel.ready_q.runq, _current);
605+
_priq_run_add(&_kernel.ready_q.runq, _current);
606+
update_cache(1);
607+
}
592608
}
593609

594610
if (_get_next_ready_thread() != _current) {

0 commit comments

Comments
 (0)