@@ -789,24 +789,6 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
789
789
adapter -> num_io_queues );
790
790
}
791
791
792
- static int validate_rx_req_id (struct ena_ring * rx_ring , u16 req_id )
793
- {
794
- if (likely (req_id < rx_ring -> ring_size ))
795
- return 0 ;
796
-
797
- netif_err (rx_ring -> adapter , rx_err , rx_ring -> netdev ,
798
- "Invalid rx req_id: %hu\n" , req_id );
799
-
800
- u64_stats_update_begin (& rx_ring -> syncp );
801
- rx_ring -> rx_stats .bad_req_id ++ ;
802
- u64_stats_update_end (& rx_ring -> syncp );
803
-
804
- /* Trigger device reset */
805
- rx_ring -> adapter -> reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID ;
806
- set_bit (ENA_FLAG_TRIGGER_RESET , & rx_ring -> adapter -> flags );
807
- return - EFAULT ;
808
- }
809
-
810
792
/* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
811
793
* @adapter: network interface device structure
812
794
* @qid: queue index
@@ -926,10 +908,14 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
926
908
static int ena_alloc_rx_page (struct ena_ring * rx_ring ,
927
909
struct ena_rx_buffer * rx_info , gfp_t gfp )
928
910
{
911
+ int headroom = rx_ring -> rx_headroom ;
929
912
struct ena_com_buf * ena_buf ;
930
913
struct page * page ;
931
914
dma_addr_t dma ;
932
915
916
+ /* restore page offset value in case it has been changed by device */
917
+ rx_info -> page_offset = headroom ;
918
+
933
919
/* if previous allocated page is not used */
934
920
if (unlikely (rx_info -> page ))
935
921
return 0 ;
@@ -959,10 +945,9 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
959
945
"Allocate page %p, rx_info %p\n" , page , rx_info );
960
946
961
947
rx_info -> page = page ;
962
- rx_info -> page_offset = 0 ;
963
948
ena_buf = & rx_info -> ena_buf ;
964
- ena_buf -> paddr = dma + rx_ring -> rx_headroom ;
965
- ena_buf -> len = ENA_PAGE_SIZE - rx_ring -> rx_headroom ;
949
+ ena_buf -> paddr = dma + headroom ;
950
+ ena_buf -> len = ENA_PAGE_SIZE - headroom ;
966
951
967
952
return 0 ;
968
953
}
@@ -1356,15 +1341,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1356
1341
struct ena_rx_buffer * rx_info ;
1357
1342
u16 len , req_id , buf = 0 ;
1358
1343
void * va ;
1359
- int rc ;
1360
1344
1361
1345
len = ena_bufs [buf ].len ;
1362
1346
req_id = ena_bufs [buf ].req_id ;
1363
1347
1364
- rc = validate_rx_req_id (rx_ring , req_id );
1365
- if (unlikely (rc < 0 ))
1366
- return NULL ;
1367
-
1368
1348
rx_info = & rx_ring -> rx_buffer_info [req_id ];
1369
1349
1370
1350
if (unlikely (!rx_info -> page )) {
@@ -1379,7 +1359,8 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1379
1359
1380
1360
/* save virt address of first buffer */
1381
1361
va = page_address (rx_info -> page ) + rx_info -> page_offset ;
1382
- prefetch (va + NET_IP_ALIGN );
1362
+
1363
+ prefetch (va );
1383
1364
1384
1365
if (len <= rx_ring -> rx_copybreak ) {
1385
1366
skb = ena_alloc_skb (rx_ring , false);
@@ -1420,8 +1401,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1420
1401
1421
1402
skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags , rx_info -> page ,
1422
1403
rx_info -> page_offset , len , ENA_PAGE_SIZE );
1423
- /* The offset is non zero only for the first buffer */
1424
- rx_info -> page_offset = 0 ;
1425
1404
1426
1405
netif_dbg (rx_ring -> adapter , rx_status , rx_ring -> netdev ,
1427
1406
"RX skb updated. len %d. data_len %d\n" ,
@@ -1440,10 +1419,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1440
1419
len = ena_bufs [buf ].len ;
1441
1420
req_id = ena_bufs [buf ].req_id ;
1442
1421
1443
- rc = validate_rx_req_id (rx_ring , req_id );
1444
- if (unlikely (rc < 0 ))
1445
- return NULL ;
1446
-
1447
1422
rx_info = & rx_ring -> rx_buffer_info [req_id ];
1448
1423
} while (1 );
1449
1424
@@ -1544,8 +1519,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
1544
1519
int ret ;
1545
1520
1546
1521
rx_info = & rx_ring -> rx_buffer_info [rx_ring -> ena_bufs [0 ].req_id ];
1547
- xdp -> data = page_address (rx_info -> page ) +
1548
- rx_info -> page_offset + rx_ring -> rx_headroom ;
1522
+ xdp -> data = page_address (rx_info -> page ) + rx_info -> page_offset ;
1549
1523
xdp_set_data_meta_invalid (xdp );
1550
1524
xdp -> data_hard_start = page_address (rx_info -> page );
1551
1525
xdp -> data_end = xdp -> data + rx_ring -> ena_bufs [0 ].len ;
@@ -1612,8 +1586,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1612
1586
if (unlikely (ena_rx_ctx .descs == 0 ))
1613
1587
break ;
1614
1588
1589
+ /* First descriptor might have an offset set by the device */
1615
1590
rx_info = & rx_ring -> rx_buffer_info [rx_ring -> ena_bufs [0 ].req_id ];
1616
- rx_info -> page_offset = ena_rx_ctx .pkt_offset ;
1591
+ rx_info -> page_offset + = ena_rx_ctx .pkt_offset ;
1617
1592
1618
1593
netif_dbg (rx_ring -> adapter , rx_status , rx_ring -> netdev ,
1619
1594
"rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n" ,
@@ -1697,12 +1672,18 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1697
1672
error :
1698
1673
adapter = netdev_priv (rx_ring -> netdev );
1699
1674
1700
- u64_stats_update_begin (& rx_ring -> syncp );
1701
- rx_ring -> rx_stats .bad_desc_num ++ ;
1702
- u64_stats_update_end (& rx_ring -> syncp );
1675
+ if (rc == - ENOSPC ) {
1676
+ u64_stats_update_begin (& rx_ring -> syncp );
1677
+ rx_ring -> rx_stats .bad_desc_num ++ ;
1678
+ u64_stats_update_end (& rx_ring -> syncp );
1679
+ adapter -> reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS ;
1680
+ } else {
1681
+ u64_stats_update_begin (& rx_ring -> syncp );
1682
+ rx_ring -> rx_stats .bad_req_id ++ ;
1683
+ u64_stats_update_end (& rx_ring -> syncp );
1684
+ adapter -> reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID ;
1685
+ }
1703
1686
1704
- /* Too many desc from the device. Trigger reset */
1705
- adapter -> reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS ;
1706
1687
set_bit (ENA_FLAG_TRIGGER_RESET , & adapter -> flags );
1707
1688
1708
1689
return 0 ;
@@ -3388,16 +3369,9 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
3388
3369
goto err_mmio_read_less ;
3389
3370
}
3390
3371
3391
- rc = pci_set_dma_mask ( pdev , DMA_BIT_MASK (dma_width ));
3372
+ rc = dma_set_mask_and_coherent ( dev , DMA_BIT_MASK (dma_width ));
3392
3373
if (rc ) {
3393
- dev_err (dev , "pci_set_dma_mask failed 0x%x\n" , rc );
3394
- goto err_mmio_read_less ;
3395
- }
3396
-
3397
- rc = pci_set_consistent_dma_mask (pdev , DMA_BIT_MASK (dma_width ));
3398
- if (rc ) {
3399
- dev_err (dev , "err_pci_set_consistent_dma_mask failed 0x%x\n" ,
3400
- rc );
3374
+ dev_err (dev , "dma_set_mask_and_coherent failed %d\n" , rc );
3401
3375
goto err_mmio_read_less ;
3402
3376
}
3403
3377
@@ -4167,6 +4141,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4167
4141
return rc ;
4168
4142
}
4169
4143
4144
+ rc = dma_set_mask_and_coherent (& pdev -> dev , DMA_BIT_MASK (ENA_MAX_PHYS_ADDR_SIZE_BITS ));
4145
+ if (rc ) {
4146
+ dev_err (& pdev -> dev , "dma_set_mask_and_coherent failed %d\n" , rc );
4147
+ goto err_disable_device ;
4148
+ }
4149
+
4170
4150
pci_set_master (pdev );
4171
4151
4172
4152
ena_dev = vzalloc (sizeof (* ena_dev ));
0 commit comments