@@ -59,6 +59,8 @@ struct mlx5_eq_table {
59
59
struct mutex lock ; /* sync async eqs creations */
60
60
int num_comp_eqs ;
61
61
struct mlx5_irq_table * irq_table ;
62
+ struct mlx5_irq * * comp_irqs ;
63
+ struct mlx5_irq * ctrl_irq ;
62
64
#ifdef CONFIG_RFS_ACCEL
63
65
struct cpu_rmap * rmap ;
64
66
#endif
@@ -266,8 +268,8 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
266
268
u32 out [MLX5_ST_SZ_DW (create_eq_out )] = {0 };
267
269
u8 log_eq_stride = ilog2 (MLX5_EQE_SIZE );
268
270
struct mlx5_priv * priv = & dev -> priv ;
269
- u16 vecidx = param -> irq_index ;
270
271
__be64 * pas ;
272
+ u16 vecidx ;
271
273
void * eqc ;
272
274
int inlen ;
273
275
u32 * in ;
@@ -289,23 +291,16 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
289
291
mlx5_init_fbc (eq -> frag_buf .frags , log_eq_stride , log_eq_size , & eq -> fbc );
290
292
init_eq_buf (eq );
291
293
292
- if (vecidx == MLX5_IRQ_EQ_CTRL )
293
- eq -> irq = mlx5_ctrl_irq_request (dev );
294
- else
295
- eq -> irq = mlx5_irq_request (dev , vecidx , param -> affinity );
296
- if (IS_ERR (eq -> irq )) {
297
- err = PTR_ERR (eq -> irq );
298
- goto err_buf ;
299
- }
300
-
294
+ eq -> irq = param -> irq ;
301
295
vecidx = mlx5_irq_get_index (eq -> irq );
296
+
302
297
inlen = MLX5_ST_SZ_BYTES (create_eq_in ) +
303
298
MLX5_FLD_SZ_BYTES (create_eq_in , pas [0 ]) * eq -> frag_buf .npages ;
304
299
305
300
in = kvzalloc (inlen , GFP_KERNEL );
306
301
if (!in ) {
307
302
err = - ENOMEM ;
308
- goto err_irq ;
303
+ goto err_buf ;
309
304
}
310
305
311
306
pas = (__be64 * )MLX5_ADDR_OF (create_eq_in , in , pas );
@@ -349,8 +344,6 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
349
344
err_in :
350
345
kvfree (in );
351
346
352
- err_irq :
353
- mlx5_irq_release (eq -> irq );
354
347
err_buf :
355
348
mlx5_frag_buf_free (dev , & eq -> frag_buf );
356
349
return err ;
@@ -404,7 +397,6 @@ static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
404
397
if (err )
405
398
mlx5_core_warn (dev , "failed to destroy a previously created eq: eqn %d\n" ,
406
399
eq -> eqn );
407
- mlx5_irq_release (eq -> irq );
408
400
409
401
mlx5_frag_buf_free (dev , & eq -> frag_buf );
410
402
return err ;
@@ -597,11 +589,8 @@ setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
597
589
598
590
eq -> irq_nb .notifier_call = mlx5_eq_async_int ;
599
591
spin_lock_init (& eq -> lock );
600
- if (!zalloc_cpumask_var (& param -> affinity , GFP_KERNEL ))
601
- return - ENOMEM ;
602
592
603
593
err = create_async_eq (dev , & eq -> core , param );
604
- free_cpumask_var (param -> affinity );
605
594
if (err ) {
606
595
mlx5_core_warn (dev , "failed to create %s EQ %d\n" , name , err );
607
596
return err ;
@@ -646,11 +635,18 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
646
635
struct mlx5_eq_param param = {};
647
636
int err ;
648
637
638
+ /* All the async_eqs are using single IRQ, request one IRQ and share its
639
+ * index among all the async_eqs of this device.
640
+ */
641
+ table -> ctrl_irq = mlx5_ctrl_irq_request (dev );
642
+ if (IS_ERR (table -> ctrl_irq ))
643
+ return PTR_ERR (table -> ctrl_irq );
644
+
649
645
MLX5_NB_INIT (& table -> cq_err_nb , cq_err_event_notifier , CQ_ERROR );
650
646
mlx5_eq_notifier_register (dev , & table -> cq_err_nb );
651
647
652
648
param = (struct mlx5_eq_param ) {
653
- .irq_index = MLX5_IRQ_EQ_CTRL ,
649
+ .irq = table -> ctrl_irq ,
654
650
.nent = MLX5_NUM_CMD_EQE ,
655
651
.mask [0 ] = 1ull << MLX5_EVENT_TYPE_CMD ,
656
652
};
@@ -663,7 +659,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
663
659
mlx5_cmd_allowed_opcode (dev , CMD_ALLOWED_OPCODE_ALL );
664
660
665
661
param = (struct mlx5_eq_param ) {
666
- .irq_index = MLX5_IRQ_EQ_CTRL ,
662
+ .irq = table -> ctrl_irq ,
667
663
.nent = async_eq_depth_devlink_param_get (dev ),
668
664
};
669
665
@@ -673,7 +669,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
673
669
goto err2 ;
674
670
675
671
param = (struct mlx5_eq_param ) {
676
- .irq_index = MLX5_IRQ_EQ_CTRL ,
672
+ .irq = table -> ctrl_irq ,
677
673
.nent = /* TODO: sriov max_vf + */ 1 ,
678
674
.mask [0 ] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST ,
679
675
};
@@ -692,6 +688,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
692
688
err1 :
693
689
mlx5_cmd_allowed_opcode (dev , CMD_ALLOWED_OPCODE_ALL );
694
690
mlx5_eq_notifier_unregister (dev , & table -> cq_err_nb );
691
+ mlx5_ctrl_irq_release (table -> ctrl_irq );
695
692
return err ;
696
693
}
697
694
@@ -706,6 +703,7 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
706
703
cleanup_async_eq (dev , & table -> cmd_eq , "cmd" );
707
704
mlx5_cmd_allowed_opcode (dev , CMD_ALLOWED_OPCODE_ALL );
708
705
mlx5_eq_notifier_unregister (dev , & table -> cq_err_nb );
706
+ mlx5_ctrl_irq_release (table -> ctrl_irq );
709
707
}
710
708
711
709
struct mlx5_eq * mlx5_get_async_eq (struct mlx5_core_dev * dev )
@@ -733,12 +731,10 @@ mlx5_eq_create_generic(struct mlx5_core_dev *dev,
733
731
struct mlx5_eq * eq = kvzalloc (sizeof (* eq ), GFP_KERNEL );
734
732
int err ;
735
733
736
- if (!cpumask_available (param -> affinity ))
737
- return ERR_PTR (- EINVAL );
738
-
739
734
if (!eq )
740
735
return ERR_PTR (- ENOMEM );
741
736
737
+ param -> irq = dev -> priv .eq_table -> ctrl_irq ;
742
738
err = create_async_eq (dev , eq , param );
743
739
if (err ) {
744
740
kvfree (eq );
@@ -798,6 +794,45 @@ void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
798
794
}
799
795
EXPORT_SYMBOL (mlx5_eq_update_ci );
800
796
797
+ static void comp_irqs_release (struct mlx5_core_dev * dev )
798
+ {
799
+ struct mlx5_eq_table * table = dev -> priv .eq_table ;
800
+
801
+ mlx5_irqs_release_vectors (table -> comp_irqs , table -> num_comp_eqs );
802
+ kfree (table -> comp_irqs );
803
+ }
804
+
805
+ static int comp_irqs_request (struct mlx5_core_dev * dev )
806
+ {
807
+ struct mlx5_eq_table * table = dev -> priv .eq_table ;
808
+ int ncomp_eqs = table -> num_comp_eqs ;
809
+ u16 * cpus ;
810
+ int ret ;
811
+ int i ;
812
+
813
+ ncomp_eqs = table -> num_comp_eqs ;
814
+ table -> comp_irqs = kcalloc (ncomp_eqs , sizeof (* table -> comp_irqs ), GFP_KERNEL );
815
+ if (!table -> comp_irqs )
816
+ return - ENOMEM ;
817
+
818
+ cpus = kcalloc (ncomp_eqs , sizeof (* cpus ), GFP_KERNEL );
819
+ if (!cpus ) {
820
+ ret = - ENOMEM ;
821
+ goto free_irqs ;
822
+ }
823
+ for (i = 0 ; i < ncomp_eqs ; i ++ )
824
+ cpus [i ] = cpumask_local_spread (i , dev -> priv .numa_node );
825
+ ret = mlx5_irqs_request_vectors (dev , cpus , ncomp_eqs , table -> comp_irqs );
826
+ kfree (cpus );
827
+ if (ret < 0 )
828
+ goto free_irqs ;
829
+ return ret ;
830
+
831
+ free_irqs :
832
+ kfree (table -> comp_irqs );
833
+ return ret ;
834
+ }
835
+
801
836
static void destroy_comp_eqs (struct mlx5_core_dev * dev )
802
837
{
803
838
struct mlx5_eq_table * table = dev -> priv .eq_table ;
@@ -812,6 +847,7 @@ static void destroy_comp_eqs(struct mlx5_core_dev *dev)
812
847
tasklet_disable (& eq -> tasklet_ctx .task );
813
848
kfree (eq );
814
849
}
850
+ comp_irqs_release (dev );
815
851
}
816
852
817
853
static u16 comp_eq_depth_devlink_param_get (struct mlx5_core_dev * dev )
@@ -838,12 +874,13 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
838
874
int err ;
839
875
int i ;
840
876
877
+ ncomp_eqs = comp_irqs_request (dev );
878
+ if (ncomp_eqs < 0 )
879
+ return ncomp_eqs ;
841
880
INIT_LIST_HEAD (& table -> comp_eqs_list );
842
- ncomp_eqs = table -> num_comp_eqs ;
843
881
nent = comp_eq_depth_devlink_param_get (dev );
844
882
for (i = 0 ; i < ncomp_eqs ; i ++ ) {
845
883
struct mlx5_eq_param param = {};
846
- int vecidx = i ;
847
884
848
885
eq = kzalloc (sizeof (* eq ), GFP_KERNEL );
849
886
if (!eq ) {
@@ -858,18 +895,11 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
858
895
859
896
eq -> irq_nb .notifier_call = mlx5_eq_comp_int ;
860
897
param = (struct mlx5_eq_param ) {
861
- .irq_index = vecidx ,
898
+ .irq = table -> comp_irqs [ i ] ,
862
899
.nent = nent ,
863
900
};
864
901
865
- if (!zalloc_cpumask_var (& param .affinity , GFP_KERNEL )) {
866
- err = - ENOMEM ;
867
- goto clean_eq ;
868
- }
869
- cpumask_set_cpu (cpumask_local_spread (i , dev -> priv .numa_node ),
870
- param .affinity );
871
902
err = create_map_eq (dev , & eq -> core , & param );
872
- free_cpumask_var (param .affinity );
873
903
if (err )
874
904
goto clean_eq ;
875
905
err = mlx5_eq_enable (dev , & eq -> core , & eq -> irq_nb );
@@ -883,7 +913,9 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
883
913
list_add_tail (& eq -> list , & table -> comp_eqs_list );
884
914
}
885
915
916
+ table -> num_comp_eqs = ncomp_eqs ;
886
917
return 0 ;
918
+
887
919
clean_eq :
888
920
kfree (eq );
889
921
clean :
0 commit comments