@@ -1561,7 +1561,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1561
1561
1562
1562
#ifdef ENA_XDP_SUPPORT
1563
1563
if (xdp_flags & ENA_XDP_REDIRECT )
1564
- xdp_do_flush_map ();
1564
+ xdp_do_flush ();
1565
1565
if (xdp_flags & ENA_XDP_TX )
1566
1566
ena_ring_tx_doorbell (rx_ring -> xdp_ring );
1567
1567
#endif
@@ -3462,15 +3462,21 @@ static int ena_calc_io_queue_size(struct ena_adapter *adapter,
3462
3462
{
3463
3463
struct ena_admin_feature_llq_desc * llq = & get_feat_ctx -> llq ;
3464
3464
struct ena_com_dev * ena_dev = adapter -> ena_dev ;
3465
- u32 tx_queue_size = ENA_DEFAULT_RING_SIZE ;
3466
3465
u32 max_tx_queue_size ;
3467
3466
u32 max_rx_queue_size ;
3467
+ u32 tx_queue_size ;
3468
3468
3469
3469
/* If this function is called after driver load, the ring sizes have already
3470
3470
* been configured. Take it into account when recalculating ring size.
3471
3471
*/
3472
- if (adapter -> tx_ring -> ring_size )
3472
+ if (adapter -> tx_ring -> ring_size ) {
3473
3473
tx_queue_size = adapter -> tx_ring -> ring_size ;
3474
+ } else if (adapter -> llq_policy == ENA_LLQ_HEADER_SIZE_POLICY_LARGE &&
3475
+ ena_dev -> tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV ) {
3476
+ tx_queue_size = ENA_DEFAULT_WIDE_LLQ_RING_SIZE ;
3477
+ } else {
3478
+ tx_queue_size = ENA_DEFAULT_RING_SIZE ;
3479
+ }
3474
3480
3475
3481
if (adapter -> rx_ring -> ring_size )
3476
3482
rx_queue_size = adapter -> rx_ring -> ring_size ;
@@ -3513,6 +3519,33 @@ static int ena_calc_io_queue_size(struct ena_adapter *adapter,
3513
3519
max_queues -> max_packet_rx_descs );
3514
3520
}
3515
3521
3522
+ if (adapter -> llq_policy == ENA_LLQ_HEADER_SIZE_POLICY_LARGE ) {
3523
+ if (ena_dev -> tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV ) {
3524
+ u32 max_wide_llq_size = max_tx_queue_size ;
3525
+
3526
+ if (llq -> max_wide_llq_depth == 0 ) {
3527
+ /* if there is no large llq max depth from device, we divide
3528
+ * the queue size by 2, leaving the amount of memory
3529
+ * used by the queues unchanged.
3530
+ */
3531
+ max_wide_llq_size /= 2 ;
3532
+ } else if (llq -> max_wide_llq_depth < max_wide_llq_size ) {
3533
+ max_wide_llq_size = llq -> max_wide_llq_depth ;
3534
+ }
3535
+ if (max_wide_llq_size != max_tx_queue_size ) {
3536
+ max_tx_queue_size = max_wide_llq_size ;
3537
+ dev_info (& adapter -> pdev -> dev ,
3538
+ "Forcing large headers and decreasing maximum TX queue size to %d\n" ,
3539
+ max_tx_queue_size );
3540
+ }
3541
+ } else {
3542
+ dev_err (& adapter -> pdev -> dev ,
3543
+ "Forcing large headers failed: LLQ is disabled or device does not support large headers\n" );
3544
+
3545
+ adapter -> llq_policy = ENA_LLQ_HEADER_SIZE_POLICY_NORMAL ;
3546
+ }
3547
+ }
3548
+
3516
3549
max_tx_queue_size = rounddown_pow_of_two (max_tx_queue_size );
3517
3550
max_rx_queue_size = rounddown_pow_of_two (max_rx_queue_size );
3518
3551
@@ -3528,23 +3561,6 @@ static int ena_calc_io_queue_size(struct ena_adapter *adapter,
3528
3561
return - EFAULT ;
3529
3562
}
3530
3563
3531
- /* When forcing large headers, we multiply the entry size by 2, and therefore divide
3532
- * the queue size by 2, leaving the amount of memory used by the queues unchanged.
3533
- */
3534
- if (adapter -> llq_policy == ENA_LLQ_HEADER_SIZE_POLICY_LARGE ) {
3535
- if (ena_dev -> tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV ) {
3536
- max_tx_queue_size /= 2 ;
3537
- dev_info (& adapter -> pdev -> dev ,
3538
- "Forcing large headers and decreasing maximum TX queue size to %d\n" ,
3539
- max_tx_queue_size );
3540
- } else {
3541
- dev_err (& adapter -> pdev -> dev ,
3542
- "Forcing large headers failed: LLQ is disabled or device does not support large headers\n" );
3543
-
3544
- adapter -> llq_policy = ENA_LLQ_HEADER_SIZE_POLICY_NORMAL ;
3545
- }
3546
- }
3547
-
3548
3564
tx_queue_size = clamp_val (tx_queue_size , ENA_MIN_RING_SIZE ,
3549
3565
max_tx_queue_size );
3550
3566
rx_queue_size = clamp_val (rx_queue_size , ENA_MIN_RING_SIZE ,
@@ -3808,6 +3824,11 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
3808
3824
if (unlikely (rc ))
3809
3825
goto err_admin_init ;
3810
3826
3827
+ if (ena_dev -> tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV )
3828
+ dev_info (& pdev -> dev , "ENA Large LLQ is %s\n" ,
3829
+ adapter -> llq_policy == ENA_LLQ_HEADER_SIZE_POLICY_LARGE ?
3830
+ "enabled" : "disabled" );
3831
+
3811
3832
/* Turned on features shouldn't change due to reset. */
3812
3833
prev_netdev_features = adapter -> netdev -> features ;
3813
3834
ena_set_dev_offloads (get_feat_ctx , adapter -> netdev );
@@ -4070,11 +4091,11 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, struct en
4070
4091
struct net_device * netdev = adapter -> netdev ;
4071
4092
unsigned long jiffies_since_last_napi ;
4072
4093
unsigned long jiffies_since_last_intr ;
4094
+ u32 missed_tx = 0 , new_missed_tx = 0 ;
4073
4095
unsigned long graceful_timeout ;
4074
4096
struct ena_tx_buffer * tx_buf ;
4075
4097
unsigned long timeout ;
4076
4098
int napi_scheduled ;
4077
- u32 missed_tx = 0 ;
4078
4099
bool is_expired ;
4079
4100
int i , rc = 0 ;
4080
4101
@@ -4117,20 +4138,24 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, struct en
4117
4138
reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION ;
4118
4139
}
4119
4140
4141
+ missed_tx ++ ;
4142
+
4120
4143
if (tx_buf -> print_once )
4121
4144
continue ;
4122
4145
4146
+ /* Add new TX completions which are missed */
4147
+ new_missed_tx ++ ;
4148
+
4123
4149
netif_notice (adapter , tx_err , netdev ,
4124
4150
"TX hasn't completed, qid %d, index %d. %u msecs since last interrupt, %u msecs since last napi execution, napi scheduled: %d\n" ,
4125
4151
tx_ring -> qid , i , jiffies_to_msecs (jiffies_since_last_intr ),
4126
4152
jiffies_to_msecs (jiffies_since_last_napi ), napi_scheduled );
4127
4153
4128
- missed_tx ++ ;
4129
4154
tx_buf -> print_once = 1 ;
4130
4155
}
4131
4156
}
4132
4157
4133
- /* Checking if this TX ring got to max missing TX completes */
4158
+ /* Checking if this TX ring missing TX completions have passed the threshold */
4134
4159
if (unlikely (missed_tx > missed_tx_thresh )) {
4135
4160
jiffies_since_last_intr = jiffies - READ_ONCE (ena_napi -> last_intr_jiffies );
4136
4161
jiffies_since_last_napi = jiffies - READ_ONCE (tx_ring -> tx_stats .last_napi_jiffies );
@@ -4156,7 +4181,8 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, struct en
4156
4181
rc = - EIO ;
4157
4182
}
4158
4183
4159
- ena_increase_stat (& tx_ring -> tx_stats .missed_tx , missed_tx , & tx_ring -> syncp );
4184
+ /* Add the newly discovered missing TX completions */
4185
+ ena_increase_stat (& tx_ring -> tx_stats .missed_tx , new_missed_tx , & tx_ring -> syncp );
4160
4186
4161
4187
return rc ;
4162
4188
}
@@ -4288,7 +4314,12 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
4288
4314
if (unlikely (!ena_com_get_admin_running_state (adapter -> ena_dev ))) {
4289
4315
netif_err (adapter , drv , adapter -> netdev ,
4290
4316
"ENA admin queue is not in running state!\n" );
4291
- ena_reset_device (adapter , ENA_REGS_RESET_ADMIN_TO );
4317
+ ena_increase_stat (& adapter -> dev_stats .admin_q_pause , 1 ,
4318
+ & adapter -> syncp );
4319
+ if (ena_com_get_missing_admin_interrupt (adapter -> ena_dev ))
4320
+ ena_reset_device (adapter , ENA_REGS_RESET_MISSING_ADMIN_INTERRUPT );
4321
+ else
4322
+ ena_reset_device (adapter , ENA_REGS_RESET_ADMIN_TO );
4292
4323
}
4293
4324
}
4294
4325
0 commit comments