@@ -17,7 +17,11 @@ static K_THREAD_STACK_DEFINE(wq_stack, STACK_SIZE);
1717static K_THREAD_STACK_DEFINE (tstack , STACK_SIZE ) ;
1818static struct k_thread tdata ;
1919
20+ static struct k_sem sync_sem ;
2021static struct k_sem end_sem ;
22+ static bool wait_for_end ;
23+ static atomic_t submit_success ;
24+ static atomic_t offload_job_cnt ;
2125
2226/*
2327 * This global variable control if the priority of offload job
@@ -54,23 +58,26 @@ static void entry_offload_job(struct k_work *work)
5458 "the offload did not run immediately." );
5559 }
5660
61+ atomic_inc (& offload_job_cnt );
5762 k_sem_give (& end_sem );
5863}
5964
6065/* offload work to work queue */
6166void isr_handler (const void * param )
6267{
6368 struct k_work * work = ((struct interrupt_param * )param )-> work ;
64- int ret ;
6569
6670 zassert_not_null (work , "kwork should not be NULL" );
6771
6872 orig_t_keep_run = 0 ;
6973
70- ret = k_work_submit_to_queue (& wq_queue , work );
71- zassert_true ((ret == 0 ) || (ret == 1 ),
72- "kwork not sumbmitted or queued" );
74+ /* If the work is busy, we don't sumbit it. */
75+ if (!k_work_busy_get (work )) {
76+ zassert_equal (k_work_submit_to_queue (& wq_queue , work ),
77+ 1 , "kwork not sumbmitted or queued" );
7378
79+ atomic_inc (& submit_success );
80+ }
7481}
7582
7683#if defined(CONFIG_DYNAMIC_INTERRUPTS )
@@ -125,55 +132,66 @@ static void trigger_offload_interrupt(const bool real_irq, void *work)
125132
126133static void t_running (void * p1 , void * p2 , void * p3 )
127134{
128- while (1 ) {
135+ k_sem_give (& sync_sem );
136+
137+ while (wait_for_end == false) {
129138 orig_t_keep_run = 1 ;
130- k_usleep (1 );
139+ k_usleep (150 );
131140 }
132141}
133142
134- static void run_test_offload ( int case_type , int real_irq )
143+ static void init_env ( int real_irq )
135144{
136145 static bool wq_already_start ;
137- int thread_prio = 1 ;
138-
139- TC_PRINT ("case %d\n" , case_type );
140146
141147 /* semaphore used to sync the end */
148+ k_sem_init (& sync_sem , 0 , 1 );
142149 k_sem_init (& end_sem , 0 , NUM_WORK );
143150
144- if (offload_job_prio_higher ) {
145- /* priority of offload job higher than thread */
146- thread_prio = 2 ;
147- } else {
148- /* priority of offload job lower than thread */
149- thread_prio = 0 ;
150- }
151+ /* initialize global variables */
152+ submit_success = 0 ;
153+ offload_job_cnt = 0 ;
154+ orig_t_keep_run = 0 ;
155+ wait_for_end = false;
151156
157+ /* initialize the dynamic interrupt while using it */
152158 if (real_irq && !vector_num ) {
153159 init_dyn_interrupt ();
154160 }
155161
156- k_tid_t tid = k_thread_create (& tdata , tstack , STACK_SIZE ,
157- (k_thread_entry_t )t_running ,
158- NULL , NULL , NULL ,
159- K_PRIO_PREEMPT (thread_prio ),
160- K_INHERIT_PERMS , K_NO_WAIT );
162+ /* initialize all the k_work */
163+ for (int i = 0 ; i < NUM_WORK ; i ++ ) {
164+ k_work_init (& offload_work [i ], entry_offload_job );
165+ }
161166
162167 /* start a work queue thread if not existing */
163168 if (!wq_already_start ) {
164169 k_work_queue_start (& wq_queue , wq_stack , STACK_SIZE ,
165- 1 , NULL );
170+ K_PRIO_PREEMPT ( 1 ) , NULL );
166171
167172 wq_already_start = true;
168173 }
174+ }
169175
170- /* initialize all the k_work */
171- for (int i = 0 ; i < NUM_WORK ; i ++ ) {
172- k_work_init (& offload_work [i ], entry_offload_job );
176+ static void run_test_offload (int case_type , int real_irq )
177+ {
178+ int thread_prio = K_PRIO_PREEMPT (0 );
179+
180+ /* initialize the global variables */
181+ init_env (real_irq );
182+
183+ /* set priority of offload job higher than thread */
184+ if (offload_job_prio_higher ) {
185+ thread_prio = K_PRIO_PREEMPT (2 );
173186 }
174187
188+ k_tid_t tid = k_thread_create (& tdata , tstack , STACK_SIZE ,
189+ (k_thread_entry_t )t_running ,
190+ NULL , NULL , NULL , thread_prio ,
191+ K_INHERIT_PERMS , K_NO_WAIT );
192+
175193 /* wait for thread start */
176- k_usleep ( 10 );
194+ k_sem_take ( & sync_sem , K_FOREVER );
177195
178196 for (int i = 0 ; i < NUM_WORK ; i ++ ) {
179197
@@ -190,16 +208,18 @@ static void run_test_offload(int case_type, int real_irq)
190208 ztest_test_fail ();
191209 }
192210 }
193-
194211 /* wait for all offload job complete */
195- k_sem_take (& end_sem , K_FOREVER );
212+ for (int i = 0 ; i < atomic_get (& submit_success ); i ++ ) {
213+ k_sem_take (& end_sem , K_FOREVER );
214+ }
196215
197- k_usleep (1 );
216+ zassert_equal (submit_success , offload_job_cnt ,
217+ "submitted job unmatch offload" );
198218
199- zassert_equal ( orig_t_keep_run , 1 ,
200- "offload job done, the original thread run" ) ;
219+ /* notify the running thread to end */
220+ wait_for_end = true ;
201221
202- k_thread_abort (tid );
222+ k_thread_join (tid , K_FOREVER );
203223}
204224
205225/**
0 commit comments