@@ -219,29 +219,27 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
219219 return IRQ_HANDLED ;
220220}
221221
222- static bool process_fault (struct idxd_desc * desc , u64 fault_addr )
222+ static inline bool match_fault (struct idxd_desc * desc , u64 fault_addr )
223223{
224224 /*
225225 * Completion address can be bad as well. Check fault address match for descriptor
226226 * and completion address.
227227 */
228- if ((u64 )desc -> hw == fault_addr ||
229- (u64 )desc -> completion == fault_addr ) {
230- idxd_dma_complete_txd (desc , IDXD_COMPLETE_DEV_FAIL );
228+ if ((u64 )desc -> hw == fault_addr || (u64 )desc -> completion == fault_addr ) {
229+ struct idxd_device * idxd = desc -> wq -> idxd ;
230+ struct device * dev = & idxd -> pdev -> dev ;
231+
232+ dev_warn (dev , "desc with fault address: %#llx\n" , fault_addr );
231233 return true;
232234 }
233235
234236 return false;
235237}
236238
237- static bool complete_desc (struct idxd_desc * desc )
239+ static inline void complete_desc (struct idxd_desc * desc , enum idxd_complete_type reason )
238240{
239- if (desc -> completion -> status ) {
240- idxd_dma_complete_txd (desc , IDXD_COMPLETE_NORMAL );
241- return true;
242- }
243-
244- return false;
241+ idxd_dma_complete_txd (desc , reason );
242+ idxd_free_desc (desc -> wq , desc );
245243}
246244
247245static int irq_process_pending_llist (struct idxd_irq_entry * irq_entry ,
@@ -251,25 +249,25 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
251249 struct idxd_desc * desc , * t ;
252250 struct llist_node * head ;
253251 int queued = 0 ;
254- bool completed = false;
255252 unsigned long flags ;
253+ enum idxd_complete_type reason ;
256254
257255 * processed = 0 ;
258256 head = llist_del_all (& irq_entry -> pending_llist );
259257 if (!head )
260258 goto out ;
261259
262- llist_for_each_entry_safe (desc , t , head , llnode ) {
263- if (wtype == IRQ_WORK_NORMAL )
264- completed = complete_desc (desc );
265- else if (wtype == IRQ_WORK_PROCESS_FAULT )
266- completed = process_fault (desc , data );
260+ if (wtype == IRQ_WORK_NORMAL )
261+ reason = IDXD_COMPLETE_NORMAL ;
262+ else
263+ reason = IDXD_COMPLETE_DEV_FAIL ;
267264
268- if (completed ) {
269- idxd_free_desc (desc -> wq , desc );
265+ llist_for_each_entry_safe (desc , t , head , llnode ) {
266+ if (desc -> completion -> status ) {
267+ if ((desc -> completion -> status & DSA_COMP_STATUS_MASK ) != DSA_COMP_SUCCESS )
268+ match_fault (desc , data );
269+ complete_desc (desc , reason );
270270 (* processed )++ ;
271- if (wtype == IRQ_WORK_PROCESS_FAULT )
272- break ;
273271 } else {
274272 spin_lock_irqsave (& irq_entry -> list_lock , flags );
275273 list_add_tail (& desc -> list ,
@@ -287,42 +285,46 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
287285 enum irq_work_type wtype ,
288286 int * processed , u64 data )
289287{
290- struct list_head * node , * next ;
291288 int queued = 0 ;
292- bool completed = false;
293289 unsigned long flags ;
290+ LIST_HEAD (flist );
291+ struct idxd_desc * desc , * n ;
292+ enum idxd_complete_type reason ;
294293
295294 * processed = 0 ;
296- spin_lock_irqsave (& irq_entry -> list_lock , flags );
297- if (list_empty (& irq_entry -> work_list ))
298- goto out ;
299-
300- list_for_each_safe (node , next , & irq_entry -> work_list ) {
301- struct idxd_desc * desc =
302- container_of (node , struct idxd_desc , list );
295+ if (wtype == IRQ_WORK_NORMAL )
296+ reason = IDXD_COMPLETE_NORMAL ;
297+ else
298+ reason = IDXD_COMPLETE_DEV_FAIL ;
303299
300+ /*
301+ * This lock protects list corruption from access of list outside of the irq handler
302+ * thread.
303+ */
304+ spin_lock_irqsave (& irq_entry -> list_lock , flags );
305+ if (list_empty (& irq_entry -> work_list )) {
304306 spin_unlock_irqrestore (& irq_entry -> list_lock , flags );
305- if (wtype == IRQ_WORK_NORMAL )
306- completed = complete_desc (desc );
307- else if (wtype == IRQ_WORK_PROCESS_FAULT )
308- completed = process_fault (desc , data );
307+ return 0 ;
308+ }
309309
310- if ( completed ) {
311- spin_lock_irqsave ( & irq_entry -> list_lock , flags );
310+ list_for_each_entry_safe ( desc , n , & irq_entry -> work_list , list ) {
311+ if ( desc -> completion -> status ) {
312312 list_del (& desc -> list );
313- spin_unlock_irqrestore (& irq_entry -> list_lock , flags );
314- idxd_free_desc (desc -> wq , desc );
315313 (* processed )++ ;
316- if (wtype == IRQ_WORK_PROCESS_FAULT )
317- return queued ;
314+ list_add_tail (& desc -> list , & flist );
318315 } else {
319316 queued ++ ;
320317 }
321- spin_lock_irqsave (& irq_entry -> list_lock , flags );
322318 }
323319
324- out :
325320 spin_unlock_irqrestore (& irq_entry -> list_lock , flags );
321+
322+ list_for_each_entry (desc , & flist , list ) {
323+ if ((desc -> completion -> status & DSA_COMP_STATUS_MASK ) != DSA_COMP_SUCCESS )
324+ match_fault (desc , data );
325+ complete_desc (desc , reason );
326+ }
327+
326328 return queued ;
327329}
328330
0 commit comments