@@ -786,6 +786,85 @@ void efx_remove_channels(struct efx_nic *efx)
786
786
kfree (efx -> xdp_tx_queues );
787
787
}
788
788
789
+ static int efx_set_xdp_tx_queue (struct efx_nic * efx , int xdp_queue_number ,
790
+ struct efx_tx_queue * tx_queue )
791
+ {
792
+ if (xdp_queue_number >= efx -> xdp_tx_queue_count )
793
+ return - EINVAL ;
794
+
795
+ netif_dbg (efx , drv , efx -> net_dev ,
796
+ "Channel %u TXQ %u is XDP %u, HW %u\n" ,
797
+ tx_queue -> channel -> channel , tx_queue -> label ,
798
+ xdp_queue_number , tx_queue -> queue );
799
+ efx -> xdp_tx_queues [xdp_queue_number ] = tx_queue ;
800
+ return 0 ;
801
+ }
802
+
803
+ static void efx_set_xdp_channels (struct efx_nic * efx )
804
+ {
805
+ struct efx_tx_queue * tx_queue ;
806
+ struct efx_channel * channel ;
807
+ unsigned int next_queue = 0 ;
808
+ int xdp_queue_number = 0 ;
809
+ int rc ;
810
+
811
+ /* We need to mark which channels really have RX and TX
812
+ * queues, and adjust the TX queue numbers if we have separate
813
+ * RX-only and TX-only channels.
814
+ */
815
+ efx_for_each_channel (channel , efx ) {
816
+ if (channel -> channel < efx -> tx_channel_offset )
817
+ continue ;
818
+
819
+ if (efx_channel_is_xdp_tx (channel )) {
820
+ efx_for_each_channel_tx_queue (tx_queue , channel ) {
821
+ tx_queue -> queue = next_queue ++ ;
822
+ rc = efx_set_xdp_tx_queue (efx , xdp_queue_number ,
823
+ tx_queue );
824
+ if (rc == 0 )
825
+ xdp_queue_number ++ ;
826
+ }
827
+ } else {
828
+ efx_for_each_channel_tx_queue (tx_queue , channel ) {
829
+ tx_queue -> queue = next_queue ++ ;
830
+ netif_dbg (efx , drv , efx -> net_dev ,
831
+ "Channel %u TXQ %u is HW %u\n" ,
832
+ channel -> channel , tx_queue -> label ,
833
+ tx_queue -> queue );
834
+ }
835
+
836
+ /* If XDP is borrowing queues from net stack, it must
837
+ * use the queue with no csum offload, which is the
838
+ * first one of the channel
839
+ * (note: tx_queue_by_type is not initialized yet)
840
+ */
841
+ if (efx -> xdp_txq_queues_mode ==
842
+ EFX_XDP_TX_QUEUES_BORROWED ) {
843
+ tx_queue = & channel -> tx_queue [0 ];
844
+ rc = efx_set_xdp_tx_queue (efx , xdp_queue_number ,
845
+ tx_queue );
846
+ if (rc == 0 )
847
+ xdp_queue_number ++ ;
848
+ }
849
+ }
850
+ }
851
+ WARN_ON (efx -> xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
852
+ xdp_queue_number != efx -> xdp_tx_queue_count );
853
+ WARN_ON (efx -> xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
854
+ xdp_queue_number > efx -> xdp_tx_queue_count );
855
+
856
+ /* If we have more CPUs than assigned XDP TX queues, assign the already
857
+ * existing queues to the exceeding CPUs
858
+ */
859
+ next_queue = 0 ;
860
+ while (xdp_queue_number < efx -> xdp_tx_queue_count ) {
861
+ tx_queue = efx -> xdp_tx_queues [next_queue ++ ];
862
+ rc = efx_set_xdp_tx_queue (efx , xdp_queue_number , tx_queue );
863
+ if (rc == 0 )
864
+ xdp_queue_number ++ ;
865
+ }
866
+ }
867
+
789
868
int efx_realloc_channels (struct efx_nic * efx , u32 rxq_entries , u32 txq_entries )
790
869
{
791
870
struct efx_channel * other_channel [EFX_MAX_CHANNELS ], * channel ;
@@ -857,6 +936,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
857
936
efx_init_napi_channel (efx -> channel [i ]);
858
937
}
859
938
939
+ efx_set_xdp_channels (efx );
860
940
out :
861
941
/* Destroy unused channel structures */
862
942
for (i = 0 ; i < efx -> n_channels ; i ++ ) {
@@ -889,26 +969,9 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
889
969
goto out ;
890
970
}
891
971
892
- static inline int
893
- efx_set_xdp_tx_queue (struct efx_nic * efx , int xdp_queue_number ,
894
- struct efx_tx_queue * tx_queue )
895
- {
896
- if (xdp_queue_number >= efx -> xdp_tx_queue_count )
897
- return - EINVAL ;
898
-
899
- netif_dbg (efx , drv , efx -> net_dev , "Channel %u TXQ %u is XDP %u, HW %u\n" ,
900
- tx_queue -> channel -> channel , tx_queue -> label ,
901
- xdp_queue_number , tx_queue -> queue );
902
- efx -> xdp_tx_queues [xdp_queue_number ] = tx_queue ;
903
- return 0 ;
904
- }
905
-
906
972
int efx_set_channels (struct efx_nic * efx )
907
973
{
908
- struct efx_tx_queue * tx_queue ;
909
974
struct efx_channel * channel ;
910
- unsigned int next_queue = 0 ;
911
- int xdp_queue_number ;
912
975
int rc ;
913
976
914
977
efx -> tx_channel_offset =
@@ -926,61 +989,14 @@ int efx_set_channels(struct efx_nic *efx)
926
989
return - ENOMEM ;
927
990
}
928
991
929
- /* We need to mark which channels really have RX and TX
930
- * queues, and adjust the TX queue numbers if we have separate
931
- * RX-only and TX-only channels.
932
- */
933
- xdp_queue_number = 0 ;
934
992
efx_for_each_channel (channel , efx ) {
935
993
if (channel -> channel < efx -> n_rx_channels )
936
994
channel -> rx_queue .core_index = channel -> channel ;
937
995
else
938
996
channel -> rx_queue .core_index = -1 ;
939
-
940
- if (channel -> channel >= efx -> tx_channel_offset ) {
941
- if (efx_channel_is_xdp_tx (channel )) {
942
- efx_for_each_channel_tx_queue (tx_queue , channel ) {
943
- tx_queue -> queue = next_queue ++ ;
944
- rc = efx_set_xdp_tx_queue (efx , xdp_queue_number , tx_queue );
945
- if (rc == 0 )
946
- xdp_queue_number ++ ;
947
- }
948
- } else {
949
- efx_for_each_channel_tx_queue (tx_queue , channel ) {
950
- tx_queue -> queue = next_queue ++ ;
951
- netif_dbg (efx , drv , efx -> net_dev , "Channel %u TXQ %u is HW %u\n" ,
952
- channel -> channel , tx_queue -> label ,
953
- tx_queue -> queue );
954
- }
955
-
956
- /* If XDP is borrowing queues from net stack, it must use the queue
957
- * with no csum offload, which is the first one of the channel
958
- * (note: channel->tx_queue_by_type is not initialized yet)
959
- */
960
- if (efx -> xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED ) {
961
- tx_queue = & channel -> tx_queue [0 ];
962
- rc = efx_set_xdp_tx_queue (efx , xdp_queue_number , tx_queue );
963
- if (rc == 0 )
964
- xdp_queue_number ++ ;
965
- }
966
- }
967
- }
968
997
}
969
- WARN_ON (efx -> xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
970
- xdp_queue_number != efx -> xdp_tx_queue_count );
971
- WARN_ON (efx -> xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
972
- xdp_queue_number > efx -> xdp_tx_queue_count );
973
998
974
- /* If we have more CPUs than assigned XDP TX queues, assign the already
975
- * existing queues to the exceeding CPUs
976
- */
977
- next_queue = 0 ;
978
- while (xdp_queue_number < efx -> xdp_tx_queue_count ) {
979
- tx_queue = efx -> xdp_tx_queues [next_queue ++ ];
980
- rc = efx_set_xdp_tx_queue (efx , xdp_queue_number , tx_queue );
981
- if (rc == 0 )
982
- xdp_queue_number ++ ;
983
- }
999
+ efx_set_xdp_channels (efx );
984
1000
985
1001
rc = netif_set_real_num_tx_queues (efx -> net_dev , efx -> n_tx_channels );
986
1002
if (rc )
0 commit comments