66
77#include "rxe.h"
88
9+ static struct workqueue_struct * rxe_wq ;
10+
11+ int rxe_alloc_wq (void )
12+ {
13+ rxe_wq = alloc_workqueue ("rxe_wq" , WQ_UNBOUND , WQ_MAX_ACTIVE );
14+ if (!rxe_wq )
15+ return - ENOMEM ;
16+
17+ return 0 ;
18+ }
19+
20+ void rxe_destroy_wq (void )
21+ {
22+ destroy_workqueue (rxe_wq );
23+ }
24+
925/* Check if task is idle i.e. not running, not scheduled in
10- * tasklet queue and not draining. If so move to busy to
26+ * work queue and not draining. If so move to busy to
1127 * reserve a slot in do_task() by setting to busy and taking
1228 * a qp reference to cover the gap from now until the task finishes.
1329 * state will move out of busy if task returns a non zero value
@@ -21,9 +37,6 @@ static bool __reserve_if_idle(struct rxe_task *task)
2137{
2238 WARN_ON (rxe_read (task -> qp ) <= 0 );
2339
24- if (task -> tasklet .state & BIT (TASKLET_STATE_SCHED ))
25- return false;
26-
2740 if (task -> state == TASK_STATE_IDLE ) {
2841 rxe_get (task -> qp );
2942 task -> state = TASK_STATE_BUSY ;
@@ -38,15 +51,15 @@ static bool __reserve_if_idle(struct rxe_task *task)
3851}
3952
4053/* check if task is idle or drained and not currently
41- * scheduled in the tasklet queue. This routine is
54+ * scheduled in the work queue. This routine is
4255 * called by rxe_cleanup_task or rxe_disable_task to
4356 * see if the queue is empty.
4457 * Context: caller should hold task->lock.
4558 * Returns true if done else false.
4659 */
4760static bool __is_done (struct rxe_task * task )
4861{
49- if (task -> tasklet . state & BIT ( TASKLET_STATE_SCHED ))
62+ if (work_pending ( & task -> work ))
5063 return false;
5164
5265 if (task -> state == TASK_STATE_IDLE ||
@@ -77,23 +90,23 @@ static bool is_done(struct rxe_task *task)
7790 * schedules the task. They must call __reserve_if_idle to
7891 * move the task to busy before calling or scheduling.
7992 * The task can also be moved to drained or invalid
80- * by calls to rxe-cleanup_task or rxe_disable_task.
93+ * by calls to rxe_cleanup_task or rxe_disable_task.
8194 * In that case tasks which get here are not executed but
8295 * just flushed. The tasks are designed to look to see if
83- * there is work to do and do part of it before returning
96+ * there is work to do and then do part of it before returning
8497 * here with a return value of zero until all the work
85- * has been consumed then it retuens a non-zero value.
98+ * has been consumed then it returns a non-zero value.
8699 * The number of times the task can be run is limited by
87100 * max iterations so one task cannot hold the cpu forever.
101+ * If the limit is hit and work remains the task is rescheduled.
88102 */
89- static void do_task (struct tasklet_struct * t )
103+ static void do_task (struct rxe_task * task )
90104{
91- int cont ;
92- int ret ;
93- struct rxe_task * task = from_tasklet (task , t , tasklet );
94105 unsigned int iterations ;
95106 unsigned long flags ;
96107 int resched = 0 ;
108+ int cont ;
109+ int ret ;
97110
98111 WARN_ON (rxe_read (task -> qp ) <= 0 );
99112
@@ -115,48 +128,47 @@ static void do_task(struct tasklet_struct *t)
115128 } while (ret == 0 && iterations -- > 0 );
116129
117130 spin_lock_irqsave (& task -> lock , flags );
131+ /* we're not done yet but we ran out of iterations.
132+ * yield the cpu and reschedule the task
133+ */
134+ if (!ret ) {
135+ task -> state = TASK_STATE_IDLE ;
136+ resched = 1 ;
137+ goto exit ;
138+ }
139+
118140 switch (task -> state ) {
119141 case TASK_STATE_BUSY :
120- if (ret ) {
121- task -> state = TASK_STATE_IDLE ;
122- } else {
123- /* This can happen if the client
124- * can add work faster than the
125- * tasklet can finish it.
126- * Reschedule the tasklet and exit
127- * the loop to give up the cpu
128- */
129- task -> state = TASK_STATE_IDLE ;
130- resched = 1 ;
131- }
142+ task -> state = TASK_STATE_IDLE ;
132143 break ;
133144
134- /* someone tried to run the task since the last time we called
135- * func, so we will call one more time regardless of the
136- * return value
145+ /* someone tried to schedule the task while we
146+ * were running, keep going
137147 */
138148 case TASK_STATE_ARMED :
139149 task -> state = TASK_STATE_BUSY ;
140150 cont = 1 ;
141151 break ;
142152
143153 case TASK_STATE_DRAINING :
144- if (ret )
145- task -> state = TASK_STATE_DRAINED ;
146- else
147- cont = 1 ;
154+ task -> state = TASK_STATE_DRAINED ;
148155 break ;
149156
150157 default :
151158 WARN_ON (1 );
152- rxe_info_qp (task -> qp , "unexpected task state = %d" , task -> state );
159+ rxe_dbg_qp (task -> qp , "unexpected task state = %d" ,
160+ task -> state );
161+ task -> state = TASK_STATE_IDLE ;
153162 }
154163
164+ exit :
155165 if (!cont ) {
156166 task -> num_done ++ ;
157167 if (WARN_ON (task -> num_done != task -> num_sched ))
158- rxe_err_qp (task -> qp , "%ld tasks scheduled, %ld tasks done" ,
159- task -> num_sched , task -> num_done );
168+ rxe_dbg_qp (
169+ task -> qp ,
170+ "%ld tasks scheduled, %ld tasks done" ,
171+ task -> num_sched , task -> num_done );
160172 }
161173 spin_unlock_irqrestore (& task -> lock , flags );
162174 } while (cont );
@@ -169,18 +181,22 @@ static void do_task(struct tasklet_struct *t)
169181 rxe_put (task -> qp );
170182}
171183
184+ /* wrapper around do_task to fix argument for work queue */
185+ static void do_work (struct work_struct * work )
186+ {
187+ do_task (container_of (work , struct rxe_task , work ));
188+ }
189+
172190int rxe_init_task (struct rxe_task * task , struct rxe_qp * qp ,
173191 int (* func )(struct rxe_qp * ))
174192{
175193 WARN_ON (rxe_read (qp ) <= 0 );
176194
177195 task -> qp = qp ;
178196 task -> func = func ;
179-
180- tasklet_setup (& task -> tasklet , do_task );
181-
182197 task -> state = TASK_STATE_IDLE ;
183198 spin_lock_init (& task -> lock );
199+ INIT_WORK (& task -> work , do_work );
184200
185201 return 0 ;
186202}
@@ -213,8 +229,6 @@ void rxe_cleanup_task(struct rxe_task *task)
213229 while (!is_done (task ))
214230 cond_resched ();
215231
216- tasklet_kill (& task -> tasklet );
217-
218232 spin_lock_irqsave (& task -> lock , flags );
219233 task -> state = TASK_STATE_INVALID ;
220234 spin_unlock_irqrestore (& task -> lock , flags );
@@ -226,7 +240,7 @@ void rxe_cleanup_task(struct rxe_task *task)
226240void rxe_run_task (struct rxe_task * task )
227241{
228242 unsigned long flags ;
229- int run ;
243+ bool run ;
230244
231245 WARN_ON (rxe_read (task -> qp ) <= 0 );
232246
@@ -235,11 +249,11 @@ void rxe_run_task(struct rxe_task *task)
235249 spin_unlock_irqrestore (& task -> lock , flags );
236250
237251 if (run )
238- do_task (& task -> tasklet );
252+ do_task (task );
239253}
240254
241- /* schedule the task to run later as a tasklet .
242- * the tasklet)schedule call can be called holding
255+ /* schedule the task to run later as a work queue entry .
256+ * the queue_work call can be called holding
243257 * the lock.
244258 */
245259void rxe_sched_task (struct rxe_task * task )
@@ -250,7 +264,7 @@ void rxe_sched_task(struct rxe_task *task)
250264
251265 spin_lock_irqsave (& task -> lock , flags );
252266 if (__reserve_if_idle (task ))
253- tasklet_schedule ( & task -> tasklet );
267+ queue_work ( rxe_wq , & task -> work );
254268 spin_unlock_irqrestore (& task -> lock , flags );
255269}
256270
@@ -277,7 +291,9 @@ void rxe_disable_task(struct rxe_task *task)
277291 while (!is_done (task ))
278292 cond_resched ();
279293
280- tasklet_disable (& task -> tasklet );
294+ spin_lock_irqsave (& task -> lock , flags );
295+ task -> state = TASK_STATE_DRAINED ;
296+ spin_unlock_irqrestore (& task -> lock , flags );
281297}
282298
283299void rxe_enable_task (struct rxe_task * task )
@@ -291,7 +307,7 @@ void rxe_enable_task(struct rxe_task *task)
291307 spin_unlock_irqrestore (& task -> lock , flags );
292308 return ;
293309 }
310+
294311 task -> state = TASK_STATE_IDLE ;
295- tasklet_enable (& task -> tasklet );
296312 spin_unlock_irqrestore (& task -> lock , flags );
297313}
0 commit comments