@@ -1329,6 +1329,23 @@ static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1329
1329
}
1330
1330
}
1331
1331
1332
+ static void bnxt_tpa_metadata_v2 (struct bnxt_tpa_info * tpa_info ,
1333
+ struct rx_tpa_start_cmp * tpa_start ,
1334
+ struct rx_tpa_start_cmp_ext * tpa_start1 )
1335
+ {
1336
+ tpa_info -> vlan_valid = 0 ;
1337
+ if (TPA_START_VLAN_VALID (tpa_start )) {
1338
+ u32 tpid_sel = TPA_START_VLAN_TPID_SEL (tpa_start );
1339
+ u32 vlan_proto = ETH_P_8021Q ;
1340
+
1341
+ tpa_info -> vlan_valid = 1 ;
1342
+ if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD )
1343
+ vlan_proto = ETH_P_8021AD ;
1344
+ tpa_info -> metadata = vlan_proto << 16 |
1345
+ TPA_START_METADATA0_TCI (tpa_start1 );
1346
+ }
1347
+ }
1348
+
1332
1349
static void bnxt_tpa_start (struct bnxt * bp , struct bnxt_rx_ring_info * rxr ,
1333
1350
u8 cmp_type , struct rx_tpa_start_cmp * tpa_start ,
1334
1351
struct rx_tpa_start_cmp_ext * tpa_start1 )
@@ -1378,12 +1395,13 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1378
1395
le32_to_cpu (tpa_start -> rx_tpa_start_cmp_len_flags_type ) >>
1379
1396
RX_TPA_START_CMP_LEN_SHIFT ;
1380
1397
if (likely (TPA_START_HASH_VALID (tpa_start ))) {
1381
- u32 hash_type = TPA_START_HASH_TYPE (tpa_start );
1382
-
1383
1398
tpa_info -> hash_type = PKT_HASH_TYPE_L4 ;
1384
1399
tpa_info -> gso_type = SKB_GSO_TCPV4 ;
1400
+ if (TPA_START_IS_IPV6 (tpa_start1 ))
1401
+ tpa_info -> gso_type = SKB_GSO_TCPV6 ;
1385
1402
/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1386
- if (hash_type == 3 || TPA_START_IS_IPV6 (tpa_start1 ))
1403
+ else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP &&
1404
+ TPA_START_HASH_TYPE (tpa_start ) == 3 )
1387
1405
tpa_info -> gso_type = SKB_GSO_TCPV6 ;
1388
1406
tpa_info -> rss_hash =
1389
1407
le32_to_cpu (tpa_start -> rx_tpa_start_cmp_rss_hash );
@@ -1394,7 +1412,10 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1394
1412
}
1395
1413
tpa_info -> flags2 = le32_to_cpu (tpa_start1 -> rx_tpa_start_cmp_flags2 );
1396
1414
tpa_info -> hdr_info = le32_to_cpu (tpa_start1 -> rx_tpa_start_cmp_hdr_info );
1397
- bnxt_tpa_metadata (tpa_info , tpa_start , tpa_start1 );
1415
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP )
1416
+ bnxt_tpa_metadata (tpa_info , tpa_start , tpa_start1 );
1417
+ else
1418
+ bnxt_tpa_metadata_v2 (tpa_info , tpa_start , tpa_start1 );
1398
1419
tpa_info -> agg_count = 0 ;
1399
1420
1400
1421
rxr -> rx_prod = NEXT_RX (prod );
@@ -1816,13 +1837,43 @@ static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
1816
1837
__vlan_hwaccel_put_tag (skb , vlan_proto , vtag );
1817
1838
else
1818
1839
goto vlan_err ;
1840
+ } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP ) {
1841
+ if (RX_CMP_VLAN_VALID (rxcmp )) {
1842
+ u32 tpid_sel = RX_CMP_VLAN_TPID_SEL (rxcmp );
1843
+
1844
+ if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q )
1845
+ vlan_proto = htons (ETH_P_8021Q );
1846
+ else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD )
1847
+ vlan_proto = htons (ETH_P_8021AD );
1848
+ else
1849
+ goto vlan_err ;
1850
+ vtag = RX_CMP_METADATA0_TCI (rxcmp1 );
1851
+ __vlan_hwaccel_put_tag (skb , vlan_proto , vtag );
1852
+ }
1819
1853
}
1820
1854
return skb ;
1821
1855
vlan_err :
1822
1856
dev_kfree_skb (skb );
1823
1857
return NULL ;
1824
1858
}
1825
1859
1860
+ static enum pkt_hash_types bnxt_rss_ext_op (struct bnxt * bp ,
1861
+ struct rx_cmp * rxcmp )
1862
+ {
1863
+ u8 ext_op ;
1864
+
1865
+ ext_op = RX_CMP_V3_HASH_TYPE (bp , rxcmp );
1866
+ switch (ext_op ) {
1867
+ case EXT_OP_INNER_4 :
1868
+ case EXT_OP_OUTER_4 :
1869
+ case EXT_OP_INNFL_3 :
1870
+ case EXT_OP_OUTFL_3 :
1871
+ return PKT_HASH_TYPE_L4 ;
1872
+ default :
1873
+ return PKT_HASH_TYPE_L3 ;
1874
+ }
1875
+ }
1876
+
1826
1877
/* returns the following:
1827
1878
* 1 - 1 packet successfully received
1828
1879
* 0 - successful TPA_START, packet not completed yet
@@ -1839,7 +1890,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1839
1890
struct rx_cmp * rxcmp ;
1840
1891
struct rx_cmp_ext * rxcmp1 ;
1841
1892
u32 tmp_raw_cons = * raw_cons ;
1842
- u16 cfa_code , cons , prod , cp_cons = RING_CMP (tmp_raw_cons );
1893
+ u16 cons , prod , cp_cons = RING_CMP (tmp_raw_cons );
1843
1894
struct bnxt_sw_rx_bd * rx_buf ;
1844
1895
unsigned int len ;
1845
1896
u8 * data_ptr , agg_bufs , cmp_type ;
@@ -1875,7 +1926,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1875
1926
dma_rmb ();
1876
1927
prod = rxr -> rx_prod ;
1877
1928
1878
- if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ) {
1929
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
1930
+ cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP ) {
1879
1931
bnxt_tpa_start (bp , rxr , cmp_type ,
1880
1932
(struct rx_tpa_start_cmp * )rxcmp ,
1881
1933
(struct rx_tpa_start_cmp_ext * )rxcmp1 );
@@ -2030,17 +2082,27 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2030
2082
}
2031
2083
2032
2084
if (RX_CMP_HASH_VALID (rxcmp )) {
2033
- u32 hash_type = RX_CMP_HASH_TYPE (rxcmp );
2034
- enum pkt_hash_types type = PKT_HASH_TYPE_L4 ;
2085
+ enum pkt_hash_types type ;
2035
2086
2036
- /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
2037
- if (hash_type != 1 && hash_type != 3 )
2038
- type = PKT_HASH_TYPE_L3 ;
2087
+ if (cmp_type == CMP_TYPE_RX_L2_V3_CMP ) {
2088
+ type = bnxt_rss_ext_op (bp , rxcmp );
2089
+ } else {
2090
+ u32 hash_type = RX_CMP_HASH_TYPE (rxcmp );
2091
+
2092
+ /* RSS profiles 1 and 3 with extract code 0 for inner
2093
+ * 4-tuple
2094
+ */
2095
+ if (hash_type != 1 && hash_type != 3 )
2096
+ type = PKT_HASH_TYPE_L3 ;
2097
+ else
2098
+ type = PKT_HASH_TYPE_L4 ;
2099
+ }
2039
2100
skb_set_hash (skb , le32_to_cpu (rxcmp -> rx_cmp_rss_hash ), type );
2040
2101
}
2041
2102
2042
- cfa_code = RX_CMP_CFA_CODE (rxcmp1 );
2043
- skb -> protocol = eth_type_trans (skb , bnxt_get_pkt_dev (bp , cfa_code ));
2103
+ if (cmp_type == CMP_TYPE_RX_L2_CMP )
2104
+ dev = bnxt_get_pkt_dev (bp , RX_CMP_CFA_CODE (rxcmp1 ));
2105
+ skb -> protocol = eth_type_trans (skb , dev );
2044
2106
2045
2107
if (skb -> dev -> features & BNXT_HW_FEATURE_VLAN_ALL_RX ) {
2046
2108
skb = bnxt_rx_vlan (skb , cmp_type , rxcmp , rxcmp1 );
@@ -2127,7 +2189,8 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
2127
2189
*/
2128
2190
dma_rmb ();
2129
2191
cmp_type = RX_CMP_TYPE (rxcmp );
2130
- if (cmp_type == CMP_TYPE_RX_L2_CMP ) {
2192
+ if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2193
+ cmp_type == CMP_TYPE_RX_L2_V3_CMP ) {
2131
2194
rxcmp1 -> rx_cmp_cfa_code_errors_v2 |=
2132
2195
cpu_to_le32 (RX_CMPL_ERRORS_CRC_ERROR );
2133
2196
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP ) {
@@ -2651,6 +2714,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2651
2714
cpr -> has_more_work = 0 ;
2652
2715
cpr -> had_work_done = 1 ;
2653
2716
while (1 ) {
2717
+ u8 cmp_type ;
2654
2718
int rc ;
2655
2719
2656
2720
cons = RING_CMP (raw_cons );
@@ -2663,7 +2727,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2663
2727
* reading any further.
2664
2728
*/
2665
2729
dma_rmb ();
2666
- if (TX_CMP_TYPE (txcmp ) == CMP_TYPE_TX_L2_CMP ) {
2730
+ cmp_type = TX_CMP_TYPE (txcmp );
2731
+ if (cmp_type == CMP_TYPE_TX_L2_CMP ) {
2667
2732
u32 opaque = txcmp -> tx_cmp_opaque ;
2668
2733
struct bnxt_tx_ring_info * txr ;
2669
2734
u16 tx_freed ;
@@ -2681,7 +2746,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2681
2746
cpr -> has_more_work = 1 ;
2682
2747
break ;
2683
2748
}
2684
- } else if ((TX_CMP_TYPE (txcmp ) & 0x30 ) == 0x10 ) {
2749
+ } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
2750
+ cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP ) {
2685
2751
if (likely (budget ))
2686
2752
rc = bnxt_rx_pkt (bp , cpr , & raw_cons , & event );
2687
2753
else
@@ -2698,12 +2764,9 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2698
2764
rx_pkts ++ ;
2699
2765
else if (rc == - EBUSY ) /* partial completion */
2700
2766
break ;
2701
- } else if (unlikely ((TX_CMP_TYPE (txcmp ) ==
2702
- CMPL_BASE_TYPE_HWRM_DONE ) ||
2703
- (TX_CMP_TYPE (txcmp ) ==
2704
- CMPL_BASE_TYPE_HWRM_FWD_REQ ) ||
2705
- (TX_CMP_TYPE (txcmp ) ==
2706
- CMPL_BASE_TYPE_HWRM_ASYNC_EVENT ))) {
2767
+ } else if (unlikely (cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
2768
+ cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
2769
+ cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT )) {
2707
2770
bnxt_hwrm_handler (bp , txcmp );
2708
2771
}
2709
2772
raw_cons = NEXT_RAW_CMP (raw_cons );
@@ -5826,6 +5889,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5826
5889
bp -> fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP ;
5827
5890
if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP )
5828
5891
bp -> rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA ;
5892
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED )
5893
+ bp -> rss_cap |= BNXT_RSS_CAP_RSS_TCAM ;
5829
5894
bp -> max_tpa_v2 = le16_to_cpu (resp -> max_aggs_supported );
5830
5895
if (bp -> max_tpa_v2 ) {
5831
5896
if (BNXT_CHIP_P5 (bp ))
0 commit comments