@@ -29,8 +29,7 @@ struct mlx5_irq {
2929 char name [MLX5_MAX_IRQ_NAME ];
3030 struct mlx5_irq_pool * pool ;
3131 int refcount ;
32- u32 index ;
33- int irqn ;
32+ struct msi_map map ;
3433};
3534
3635struct mlx5_irq_table {
@@ -128,14 +127,14 @@ static void irq_release(struct mlx5_irq *irq)
128127{
129128 struct mlx5_irq_pool * pool = irq -> pool ;
130129
131- xa_erase (& pool -> irqs , irq -> index );
130+ xa_erase (& pool -> irqs , irq -> map . index );
132131 /* free_irq requires that affinity_hint and rmap will be cleared
133132 * before calling it. This is why there is asymmetry with set_rmap
134133 * which should be called after alloc_irq but before request_irq.
135134 */
136- irq_update_affinity_hint (irq -> irqn , NULL );
135+ irq_update_affinity_hint (irq -> map . virq , NULL );
137136 free_cpumask_var (irq -> mask );
138- free_irq (irq -> irqn , & irq -> nh );
137+ free_irq (irq -> map . virq , & irq -> nh );
139138 kfree (irq );
140139}
141140
@@ -217,15 +216,15 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
217216 irq = kzalloc (sizeof (* irq ), GFP_KERNEL );
218217 if (!irq )
219218 return ERR_PTR (- ENOMEM );
220- irq -> irqn = pci_irq_vector (dev -> pdev , i );
219+ irq -> map . virq = pci_irq_vector (dev -> pdev , i );
221220 if (!mlx5_irq_pool_is_sf_pool (pool ))
222221 irq_set_name (pool , name , i );
223222 else
224223 irq_sf_set_name (pool , name , i );
225224 ATOMIC_INIT_NOTIFIER_HEAD (& irq -> nh );
226225 snprintf (irq -> name , MLX5_MAX_IRQ_NAME ,
227226 "%s@pci:%s" , name , pci_name (dev -> pdev ));
228- err = request_irq (irq -> irqn , irq_int_handler , 0 , irq -> name ,
227+ err = request_irq (irq -> map . virq , irq_int_handler , 0 , irq -> name ,
229228 & irq -> nh );
230229 if (err ) {
231230 mlx5_core_err (dev , "Failed to request irq. err = %d\n" , err );
@@ -238,23 +237,23 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
238237 }
239238 if (affinity ) {
240239 cpumask_copy (irq -> mask , affinity );
241- irq_set_affinity_and_hint (irq -> irqn , irq -> mask );
240+ irq_set_affinity_and_hint (irq -> map . virq , irq -> mask );
242241 }
243242 irq -> pool = pool ;
244243 irq -> refcount = 1 ;
245- irq -> index = i ;
246- err = xa_err (xa_store (& pool -> irqs , irq -> index , irq , GFP_KERNEL ));
244+ irq -> map . index = i ;
245+ err = xa_err (xa_store (& pool -> irqs , irq -> map . index , irq , GFP_KERNEL ));
247246 if (err ) {
248247 mlx5_core_err (dev , "Failed to alloc xa entry for irq(%u). err = %d\n" ,
249- irq -> index , err );
248+ irq -> map . index , err );
250249 goto err_xa ;
251250 }
252251 return irq ;
253252err_xa :
254- irq_update_affinity_hint (irq -> irqn , NULL );
253+ irq_update_affinity_hint (irq -> map . virq , NULL );
255254 free_cpumask_var (irq -> mask );
256255err_cpumask :
257- free_irq (irq -> irqn , & irq -> nh );
256+ free_irq (irq -> map . virq , & irq -> nh );
258257err_req_irq :
259258 kfree (irq );
260259 return ERR_PTR (err );
@@ -292,7 +291,7 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
292291
293292int mlx5_irq_get_index (struct mlx5_irq * irq )
294293{
295- return irq -> index ;
294+ return irq -> map . index ;
296295}
297296
298297/* irq_pool API */
@@ -364,7 +363,7 @@ static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs)
364363 int i ;
365364
366365 for (i = 0 ; i < nirqs ; i ++ ) {
367- synchronize_irq (irqs [i ]-> irqn );
366+ synchronize_irq (irqs [i ]-> map . virq );
368367 mlx5_irq_put (irqs [i ]);
369368 }
370369}
@@ -433,7 +432,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
433432 if (IS_ERR (irq ))
434433 return irq ;
435434 mlx5_core_dbg (dev , "irq %u mapped to cpu %*pbl, %u EQs on this irq\n" ,
436- irq -> irqn , cpumask_pr_args (affinity ),
435+ irq -> map . virq , cpumask_pr_args (affinity ),
437436 irq -> refcount / MLX5_EQ_REFS_PER_IRQ );
438437 return irq ;
439438}
0 commit comments