@@ -583,9 +583,14 @@ efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
583
583
int number ;
584
584
585
585
number = channel -> channel ;
586
- if (efx -> tx_channel_offset == 0 ) {
586
+
587
+ if (number >= efx -> xdp_channel_offset &&
588
+ !WARN_ON_ONCE (!efx -> n_xdp_channels )) {
589
+ type = "-xdp" ;
590
+ number -= efx -> xdp_channel_offset ;
591
+ } else if (efx -> tx_channel_offset == 0 ) {
587
592
type = "" ;
588
- } else if (channel -> channel < efx -> tx_channel_offset ) {
593
+ } else if (number < efx -> tx_channel_offset ) {
589
594
type = "-rx" ;
590
595
} else {
591
596
type = "-tx" ;
@@ -803,6 +808,8 @@ static void efx_remove_channels(struct efx_nic *efx)
803
808
804
809
efx_for_each_channel (channel , efx )
805
810
efx_remove_channel (channel );
811
+
812
+ kfree (efx -> xdp_tx_queues );
806
813
}
807
814
808
815
int
@@ -1440,6 +1447,101 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1440
1447
return count ;
1441
1448
}
1442
1449
1450
+ static int efx_allocate_msix_channels (struct efx_nic * efx ,
1451
+ unsigned int max_channels ,
1452
+ unsigned int extra_channels ,
1453
+ unsigned int parallelism )
1454
+ {
1455
+ unsigned int n_channels = parallelism ;
1456
+ int vec_count ;
1457
+ int n_xdp_tx ;
1458
+ int n_xdp_ev ;
1459
+
1460
+ if (efx_separate_tx_channels )
1461
+ n_channels *= 2 ;
1462
+ n_channels += extra_channels ;
1463
+
1464
+ /* To allow XDP transmit to happen from arbitrary NAPI contexts
1465
+ * we allocate a TX queue per CPU. We share event queues across
1466
+ * multiple tx queues, assuming tx and ev queues are both
1467
+ * maximum size.
1468
+ */
1469
+
1470
+ n_xdp_tx = num_possible_cpus ();
1471
+ n_xdp_ev = DIV_ROUND_UP (n_xdp_tx , EFX_TXQ_TYPES );
1472
+
1473
+ /* Check resources.
1474
+ * We need a channel per event queue, plus a VI per tx queue.
1475
+ * This may be more pessimistic than it needs to be.
1476
+ */
1477
+ if (n_channels + n_xdp_ev > max_channels ) {
1478
+ netif_err (efx , drv , efx -> net_dev ,
1479
+ "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n" ,
1480
+ n_xdp_ev , n_channels , max_channels );
1481
+ efx -> n_xdp_channels = 0 ;
1482
+ efx -> xdp_tx_per_channel = 0 ;
1483
+ efx -> xdp_tx_queue_count = 0 ;
1484
+ } else {
1485
+ efx -> n_xdp_channels = n_xdp_ev ;
1486
+ efx -> xdp_tx_per_channel = EFX_TXQ_TYPES ;
1487
+ efx -> xdp_tx_queue_count = n_xdp_tx ;
1488
+ n_channels += n_xdp_ev ;
1489
+ netif_dbg (efx , drv , efx -> net_dev ,
1490
+ "Allocating %d TX and %d event queues for XDP\n" ,
1491
+ n_xdp_tx , n_xdp_ev );
1492
+ }
1493
+
1494
+ n_channels = min (n_channels , max_channels );
1495
+
1496
+ vec_count = pci_msix_vec_count (efx -> pci_dev );
1497
+ if (vec_count < 0 )
1498
+ return vec_count ;
1499
+ if (vec_count < n_channels ) {
1500
+ netif_err (efx , drv , efx -> net_dev ,
1501
+ "WARNING: Insufficient MSI-X vectors available (%d < %u).\n" ,
1502
+ vec_count , n_channels );
1503
+ netif_err (efx , drv , efx -> net_dev ,
1504
+ "WARNING: Performance may be reduced.\n" );
1505
+ n_channels = vec_count ;
1506
+ }
1507
+
1508
+ efx -> n_channels = n_channels ;
1509
+
1510
+ /* Do not create the PTP TX queue(s) if PTP uses the MC directly. */
1511
+ if (extra_channels && !efx_ptp_use_mac_tx_timestamps (efx ))
1512
+ n_channels -- ;
1513
+
1514
+ /* Ignore XDP tx channels when creating rx channels. */
1515
+ n_channels -= efx -> n_xdp_channels ;
1516
+
1517
+ if (efx_separate_tx_channels ) {
1518
+ efx -> n_tx_channels =
1519
+ min (max (n_channels / 2 , 1U ),
1520
+ efx -> max_tx_channels );
1521
+ efx -> tx_channel_offset =
1522
+ n_channels - efx -> n_tx_channels ;
1523
+ efx -> n_rx_channels =
1524
+ max (n_channels -
1525
+ efx -> n_tx_channels , 1U );
1526
+ } else {
1527
+ efx -> n_tx_channels = min (n_channels , efx -> max_tx_channels );
1528
+ efx -> tx_channel_offset = 0 ;
1529
+ efx -> n_rx_channels = n_channels ;
1530
+ }
1531
+
1532
+ if (efx -> n_xdp_channels )
1533
+ efx -> xdp_channel_offset = efx -> tx_channel_offset +
1534
+ efx -> n_tx_channels ;
1535
+ else
1536
+ efx -> xdp_channel_offset = efx -> n_channels ;
1537
+
1538
+ netif_dbg (efx , drv , efx -> net_dev ,
1539
+ "Allocating %u RX channels\n" ,
1540
+ efx -> n_rx_channels );
1541
+
1542
+ return efx -> n_channels ;
1543
+ }
1544
+
1443
1545
/* Probe the number and type of interrupts we are able to obtain, and
1444
1546
* the resulting numbers of channels and RX queues.
1445
1547
*/
@@ -1454,19 +1556,19 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1454
1556
++ extra_channels ;
1455
1557
1456
1558
if (efx -> interrupt_mode == EFX_INT_MODE_MSIX ) {
1559
+ unsigned int parallelism = efx_wanted_parallelism (efx );
1457
1560
struct msix_entry xentries [EFX_MAX_CHANNELS ];
1458
1561
unsigned int n_channels ;
1459
1562
1460
- n_channels = efx_wanted_parallelism (efx );
1461
- if (efx_separate_tx_channels )
1462
- n_channels *= 2 ;
1463
- n_channels += extra_channels ;
1464
- n_channels = min (n_channels , efx -> max_channels );
1465
-
1466
- for (i = 0 ; i < n_channels ; i ++ )
1467
- xentries [i ].entry = i ;
1468
- rc = pci_enable_msix_range (efx -> pci_dev ,
1469
- xentries , 1 , n_channels );
1563
+ rc = efx_allocate_msix_channels (efx , efx -> max_channels ,
1564
+ extra_channels , parallelism );
1565
+ if (rc >= 0 ) {
1566
+ n_channels = rc ;
1567
+ for (i = 0 ; i < n_channels ; i ++ )
1568
+ xentries [i ].entry = i ;
1569
+ rc = pci_enable_msix_range (efx -> pci_dev , xentries , 1 ,
1570
+ n_channels );
1571
+ }
1470
1572
if (rc < 0 ) {
1471
1573
/* Fall back to single channel MSI */
1472
1574
netif_err (efx , drv , efx -> net_dev ,
@@ -1485,21 +1587,6 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1485
1587
}
1486
1588
1487
1589
if (rc > 0 ) {
1488
- efx -> n_channels = n_channels ;
1489
- if (n_channels > extra_channels )
1490
- n_channels -= extra_channels ;
1491
- if (efx_separate_tx_channels ) {
1492
- efx -> n_tx_channels = min (max (n_channels / 2 ,
1493
- 1U ),
1494
- efx -> max_tx_channels );
1495
- efx -> n_rx_channels = max (n_channels -
1496
- efx -> n_tx_channels ,
1497
- 1U );
1498
- } else {
1499
- efx -> n_tx_channels = min (n_channels ,
1500
- efx -> max_tx_channels );
1501
- efx -> n_rx_channels = n_channels ;
1502
- }
1503
1590
for (i = 0 ; i < efx -> n_channels ; i ++ )
1504
1591
efx_get_channel (efx , i )-> irq =
1505
1592
xentries [i ].vector ;
@@ -1511,6 +1598,8 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1511
1598
efx -> n_channels = 1 ;
1512
1599
efx -> n_rx_channels = 1 ;
1513
1600
efx -> n_tx_channels = 1 ;
1601
+ efx -> n_xdp_channels = 0 ;
1602
+ efx -> xdp_channel_offset = efx -> n_channels ;
1514
1603
rc = pci_enable_msi (efx -> pci_dev );
1515
1604
if (rc == 0 ) {
1516
1605
efx_get_channel (efx , 0 )-> irq = efx -> pci_dev -> irq ;
@@ -1529,12 +1618,14 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1529
1618
efx -> n_channels = 1 + (efx_separate_tx_channels ? 1 : 0 );
1530
1619
efx -> n_rx_channels = 1 ;
1531
1620
efx -> n_tx_channels = 1 ;
1621
+ efx -> n_xdp_channels = 0 ;
1622
+ efx -> xdp_channel_offset = efx -> n_channels ;
1532
1623
efx -> legacy_irq = efx -> pci_dev -> irq ;
1533
1624
}
1534
1625
1535
- /* Assign extra channels if possible */
1626
+ /* Assign extra channels if possible, before XDP channels */
1536
1627
efx -> n_extra_tx_channels = 0 ;
1537
- j = efx -> n_channels ;
1628
+ j = efx -> xdp_channel_offset ;
1538
1629
for (i = 0 ; i < EFX_MAX_EXTRA_CHANNELS ; i ++ ) {
1539
1630
if (!efx -> extra_channel_type [i ])
1540
1631
continue ;
@@ -1729,29 +1820,50 @@ static void efx_remove_interrupts(struct efx_nic *efx)
1729
1820
efx -> legacy_irq = 0 ;
1730
1821
}
1731
1822
1732
- static void efx_set_channels (struct efx_nic * efx )
1823
+ static int efx_set_channels (struct efx_nic * efx )
1733
1824
{
1734
1825
struct efx_channel * channel ;
1735
1826
struct efx_tx_queue * tx_queue ;
1827
+ int xdp_queue_number ;
1736
1828
1737
1829
efx -> tx_channel_offset =
1738
1830
efx_separate_tx_channels ?
1739
1831
efx -> n_channels - efx -> n_tx_channels : 0 ;
1740
1832
1833
+ if (efx -> xdp_tx_queue_count ) {
1834
+ EFX_WARN_ON_PARANOID (efx -> xdp_tx_queues );
1835
+
1836
+ /* Allocate array for XDP TX queue lookup. */
1837
+ efx -> xdp_tx_queues = kcalloc (efx -> xdp_tx_queue_count ,
1838
+ sizeof (* efx -> xdp_tx_queues ),
1839
+ GFP_KERNEL );
1840
+ if (!efx -> xdp_tx_queues )
1841
+ return - ENOMEM ;
1842
+ }
1843
+
1741
1844
/* We need to mark which channels really have RX and TX
1742
1845
* queues, and adjust the TX queue numbers if we have separate
1743
1846
* RX-only and TX-only channels.
1744
1847
*/
1848
+ xdp_queue_number = 0 ;
1745
1849
efx_for_each_channel (channel , efx ) {
1746
1850
if (channel -> channel < efx -> n_rx_channels )
1747
1851
channel -> rx_queue .core_index = channel -> channel ;
1748
1852
else
1749
1853
channel -> rx_queue .core_index = -1 ;
1750
1854
1751
- efx_for_each_channel_tx_queue (tx_queue , channel )
1855
+ efx_for_each_channel_tx_queue (tx_queue , channel ) {
1752
1856
tx_queue -> queue -= (efx -> tx_channel_offset *
1753
1857
EFX_TXQ_TYPES );
1858
+
1859
+ if (efx_channel_is_xdp_tx (channel ) &&
1860
+ xdp_queue_number < efx -> xdp_tx_queue_count ) {
1861
+ efx -> xdp_tx_queues [xdp_queue_number ] = tx_queue ;
1862
+ xdp_queue_number ++ ;
1863
+ }
1864
+ }
1754
1865
}
1866
+ return 0 ;
1755
1867
}
1756
1868
1757
1869
static int efx_probe_nic (struct efx_nic * efx )
@@ -1781,7 +1893,9 @@ static int efx_probe_nic(struct efx_nic *efx)
1781
1893
if (rc )
1782
1894
goto fail1 ;
1783
1895
1784
- efx_set_channels (efx );
1896
+ rc = efx_set_channels (efx );
1897
+ if (rc )
1898
+ goto fail1 ;
1785
1899
1786
1900
/* dimension_resources can fail with EAGAIN */
1787
1901
rc = efx -> type -> dimension_resources (efx );
@@ -2091,6 +2205,8 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
2091
2205
channel -> irq_moderation_us = rx_usecs ;
2092
2206
else if (efx_channel_has_tx_queues (channel ))
2093
2207
channel -> irq_moderation_us = tx_usecs ;
2208
+ else if (efx_channel_is_xdp_tx (channel ))
2209
+ channel -> irq_moderation_us = tx_usecs ;
2094
2210
}
2095
2211
2096
2212
return 0 ;
0 commit comments