Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

tests: z_test_1cpu_start() makes only CPU0 active #70579

Merged
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
tests: z_test_1cpu_start() makes only CPU0 active
When z_test_1cpu_start() is called to ensure that only a single CPU
on an SMP system is available for use in a test, this commit will
ensure that that CPU is the primary CPU--CPU0. This is done because
some timer drivers only have the timer interrupt processed by one CPU.

A bit of a song and dance is performed to achieve this without enabling
the CPU mask/affinity pinning API. If the cpuhold thread is found to
be executing on CPU0, then a new copy of cpuhold thread is created. Once
the new copy is executing (incidentally guaranteed to be on another CPU)
then it informs the original copy and busy waits until it the original
copy is switched out of CPU0. At this point, we can create the next
cpuhold thread to occupy another CPU if needed.

During this song and dance, it is critical that the 'copy' not pend. If
it pends, we can not guarantee which CPU it will execute on when it
unpends. As the cpuhold threads have the highest priority, nothing is
going to cause them to execute on another CPU for as long as they do
not pend.

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
  • Loading branch information
peter-mitsis committed Mar 22, 2024
commit 7af29182734a11a547cd871e894cecc06b44c6e1
131 changes: 114 additions & 17 deletions subsys/testsuite/ztest/src/ztest.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@

#include <zephyr/llext/symbol.h>

#include <zephyr/sys/barrier.h>

#ifdef KERNEL
static struct k_thread ztest_thread;
#endif
Expand Down Expand Up @@ -103,11 +105,57 @@ static int cleanup_test(struct ztest_unit_test *test)
#if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
#define MAX_NUM_CPUHOLD (CONFIG_MP_MAX_NUM_CPUS - 1)
#define CPUHOLD_STACK_SZ (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
static struct k_thread cpuhold_threads[MAX_NUM_CPUHOLD];
K_KERNEL_STACK_ARRAY_DEFINE(cpuhold_stacks, MAX_NUM_CPUHOLD, CPUHOLD_STACK_SZ);

struct cpuhold_pool_item {
struct k_thread thread;
bool used;
};

static struct cpuhold_pool_item cpuhold_pool_items[MAX_NUM_CPUHOLD + 1];

K_KERNEL_STACK_ARRAY_DEFINE(cpuhold_stacks, MAX_NUM_CPUHOLD + 1, CPUHOLD_STACK_SZ);

static struct k_sem cpuhold_sem;

volatile int cpuhold_active;
volatile bool cpuhold_spawned;

static int find_unused_thread(void)
{
for (unsigned int i = 0; i <= MAX_NUM_CPUHOLD; i++) {
if (!cpuhold_pool_items[i].used) {
return i;
}
}

return -1;
}

static void mark_thread_unused(struct k_thread *thread)
{
for (unsigned int i = 0; i <= MAX_NUM_CPUHOLD; i++) {
if (&cpuhold_pool_items[i].thread == thread) {
cpuhold_pool_items[i].used = false;
}
}
}

static inline void wait_for_thread_to_switch_out(struct k_thread *thread)
{
unsigned int key = arch_irq_lock();
volatile void **shp = (void *)&thread->switch_handle;

while (*shp == NULL) {
arch_spin_relax();
}
/* Read barrier: don't allow any subsequent loads in the
* calling code to reorder before we saw switch_handle go
* non-null.
*/
barrier_dmem_fence_full();

arch_irq_unlock(key);
}

/* "Holds" a CPU for use with the "1cpu" test cases. Note that we
* can't use tools like the cpumask feature because we have tests that
Expand All @@ -116,12 +164,58 @@ volatile int cpuhold_active;
*/
static void cpu_hold(void *arg1, void *arg2, void *arg3)
{
ARG_UNUSED(arg1);
ARG_UNUSED(arg2);
struct k_thread *thread = arg1;
unsigned int idx = (unsigned int)(uintptr_t)arg2;
char tname[CONFIG_THREAD_MAX_NAME_LEN];

ARG_UNUSED(arg3);

unsigned int key = arch_irq_lock();
if (arch_proc_id() == 0) {
int i;

i = find_unused_thread();

__ASSERT_NO_MSG(i != -1);

cpuhold_spawned = false;

cpuhold_pool_items[i].used = true;
k_thread_create(&cpuhold_pool_items[i].thread,
cpuhold_stacks[i], CPUHOLD_STACK_SZ,
cpu_hold, k_current_get(),
(void *)(uintptr_t)idx, NULL,
K_HIGHEST_THREAD_PRIO, 0, K_NO_WAIT);

/*
* Busy-wait until we know the spawned thread is running to
* ensure it does not spawn on CPU0.
*/

while (!cpuhold_spawned) {
k_busy_wait(1000);
}

return;
}

if (thread != NULL) {
cpuhold_spawned = true;

/* Busywait until a new thread is scheduled in on CPU0 */

wait_for_thread_to_switch_out(thread);

mark_thread_unused(thread);
}

if (IS_ENABLED(CONFIG_THREAD_NAME)) {
snprintk(tname, CONFIG_THREAD_MAX_NAME_LEN, "cpuhold%02d", idx);
k_thread_name_set(k_current_get(), tname);
}


uint32_t dt, start_ms = k_uptime_get_32();
unsigned int key = arch_irq_lock();

k_sem_give(&cpuhold_sem);

Expand Down Expand Up @@ -155,23 +249,25 @@ void z_impl_z_test_1cpu_start(void)
{
#if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
unsigned int num_cpus = arch_num_cpus();
int j;

cpuhold_active = 1;
char tname[CONFIG_THREAD_MAX_NAME_LEN];

k_sem_init(&cpuhold_sem, 0, 999);

/* Spawn N-1 threads to "hold" the other CPUs, waiting for
* each to signal us that it's locked and spinning.
*/
for (int i = 0; i < num_cpus - 1; i++) {
k_thread_create(&cpuhold_threads[i], cpuhold_stacks[i], CPUHOLD_STACK_SZ,
cpu_hold, NULL, NULL, NULL, K_HIGHEST_THREAD_PRIO,
0, K_NO_WAIT);
if (IS_ENABLED(CONFIG_THREAD_NAME)) {
snprintk(tname, CONFIG_THREAD_MAX_NAME_LEN, "cpuhold%02d", i);
k_thread_name_set(&cpuhold_threads[i], tname);
}
j = find_unused_thread();

__ASSERT_NO_MSG(j != -1);

cpuhold_pool_items[j].used = true;
k_thread_create(&cpuhold_pool_items[j].thread,
cpuhold_stacks[j], CPUHOLD_STACK_SZ,
cpu_hold, NULL, (void *)(uintptr_t)i, NULL,
K_HIGHEST_THREAD_PRIO, 0, K_NO_WAIT);
k_sem_take(&cpuhold_sem, K_FOREVER);
}
#endif
Expand All @@ -180,12 +276,13 @@ void z_impl_z_test_1cpu_start(void)
void z_impl_z_test_1cpu_stop(void)
{
#if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
unsigned int num_cpus = arch_num_cpus();

cpuhold_active = 0;

for (int i = 0; i < num_cpus - 1; i++) {
k_thread_abort(&cpuhold_threads[i]);
for (int i = 0; i <= MAX_NUM_CPUHOLD; i++) {
if (cpuhold_pool_items[i].used) {
k_thread_abort(&cpuhold_pool_items[i].thread);
cpuhold_pool_items[i].used = false;
}
}
#endif
}
Expand Down
Loading