@@ -2449,7 +2449,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
2449
2449
}
2450
2450
}
2451
2451
2452
- if (list_empty (& tp -> rx_done ))
2452
+ if (list_empty (& tp -> rx_done ) || work_done >= budget )
2453
2453
goto out1 ;
2454
2454
2455
2455
clear_bit (RX_EPROTO , & tp -> flags );
@@ -2465,6 +2465,15 @@ static int rx_bottom(struct r8152 *tp, int budget)
2465
2465
struct urb * urb ;
2466
2466
u8 * rx_data ;
2467
2467
2468
+ /* A bulk transfer of USB may contain may packets, so the
2469
+ * total packets may more than the budget. Deal with all
2470
+ * packets in current bulk transfer, and stop to handle the
2471
+ * next bulk transfer until next schedule, if budget is
2472
+ * exhausted.
2473
+ */
2474
+ if (work_done >= budget )
2475
+ break ;
2476
+
2468
2477
list_del_init (cursor );
2469
2478
2470
2479
agg = list_entry (cursor , struct rx_agg , list );
@@ -2481,12 +2490,11 @@ static int rx_bottom(struct r8152 *tp, int budget)
2481
2490
while (urb -> actual_length > len_used ) {
2482
2491
struct net_device * netdev = tp -> netdev ;
2483
2492
struct net_device_stats * stats = & netdev -> stats ;
2484
- unsigned int pkt_len , rx_frag_head_sz ;
2493
+ unsigned int pkt_len , rx_frag_head_sz , len ;
2485
2494
struct sk_buff * skb ;
2495
+ bool use_frags ;
2486
2496
2487
- /* limit the skb numbers for rx_queue */
2488
- if (unlikely (skb_queue_len (& tp -> rx_queue ) >= 1000 ))
2489
- break ;
2497
+ WARN_ON_ONCE (skb_queue_len (& tp -> rx_queue ) >= 1000 );
2490
2498
2491
2499
pkt_len = le32_to_cpu (rx_desc -> opts1 ) & RX_LEN_MASK ;
2492
2500
if (pkt_len < ETH_ZLEN )
@@ -2497,45 +2505,77 @@ static int rx_bottom(struct r8152 *tp, int budget)
2497
2505
break ;
2498
2506
2499
2507
pkt_len -= ETH_FCS_LEN ;
2508
+ len = pkt_len ;
2500
2509
rx_data += sizeof (struct rx_desc );
2501
2510
2502
- if (!agg_free || tp -> rx_copybreak > pkt_len )
2503
- rx_frag_head_sz = pkt_len ;
2511
+ if (!agg_free || tp -> rx_copybreak > len )
2512
+ use_frags = false ;
2504
2513
else
2505
- rx_frag_head_sz = tp -> rx_copybreak ;
2514
+ use_frags = true;
2515
+
2516
+ if (use_frags ) {
2517
+ /* If the budget is exhausted, the packet
2518
+ * would be queued in the driver. That is,
2519
+ * napi_gro_frags() wouldn't be called, so
2520
+ * we couldn't use napi_get_frags().
2521
+ */
2522
+ if (work_done >= budget ) {
2523
+ rx_frag_head_sz = tp -> rx_copybreak ;
2524
+ skb = napi_alloc_skb (napi ,
2525
+ rx_frag_head_sz );
2526
+ } else {
2527
+ rx_frag_head_sz = 0 ;
2528
+ skb = napi_get_frags (napi );
2529
+ }
2530
+ } else {
2531
+ rx_frag_head_sz = 0 ;
2532
+ skb = napi_alloc_skb (napi , len );
2533
+ }
2506
2534
2507
- skb = napi_alloc_skb (napi , rx_frag_head_sz );
2508
2535
if (!skb ) {
2509
2536
stats -> rx_dropped ++ ;
2510
2537
goto find_next_rx ;
2511
2538
}
2512
2539
2513
2540
skb -> ip_summed = r8152_rx_csum (tp , rx_desc );
2514
- memcpy (skb -> data , rx_data , rx_frag_head_sz );
2515
- skb_put (skb , rx_frag_head_sz );
2516
- pkt_len -= rx_frag_head_sz ;
2517
- rx_data += rx_frag_head_sz ;
2518
- if (pkt_len ) {
2541
+ rtl_rx_vlan_tag (rx_desc , skb );
2542
+
2543
+ if (use_frags ) {
2544
+ if (rx_frag_head_sz ) {
2545
+ memcpy (skb -> data , rx_data ,
2546
+ rx_frag_head_sz );
2547
+ skb_put (skb , rx_frag_head_sz );
2548
+ len -= rx_frag_head_sz ;
2549
+ rx_data += rx_frag_head_sz ;
2550
+ skb -> protocol = eth_type_trans (skb ,
2551
+ netdev );
2552
+ }
2553
+
2519
2554
skb_add_rx_frag (skb , 0 , agg -> page ,
2520
2555
agg_offset (agg , rx_data ),
2521
- pkt_len ,
2522
- SKB_DATA_ALIGN (pkt_len ));
2556
+ len , SKB_DATA_ALIGN (len ));
2523
2557
get_page (agg -> page );
2558
+ } else {
2559
+ memcpy (skb -> data , rx_data , len );
2560
+ skb_put (skb , len );
2561
+ skb -> protocol = eth_type_trans (skb , netdev );
2524
2562
}
2525
2563
2526
- skb -> protocol = eth_type_trans (skb , netdev );
2527
- rtl_rx_vlan_tag (rx_desc , skb );
2528
2564
if (work_done < budget ) {
2565
+ if (use_frags )
2566
+ napi_gro_frags (napi );
2567
+ else
2568
+ napi_gro_receive (napi , skb );
2569
+
2529
2570
work_done ++ ;
2530
2571
stats -> rx_packets ++ ;
2531
- stats -> rx_bytes += skb -> len ;
2532
- napi_gro_receive (napi , skb );
2572
+ stats -> rx_bytes += pkt_len ;
2533
2573
} else {
2534
2574
__skb_queue_tail (& tp -> rx_queue , skb );
2535
2575
}
2536
2576
2537
2577
find_next_rx :
2538
- rx_data = rx_agg_align (rx_data + pkt_len + ETH_FCS_LEN );
2578
+ rx_data = rx_agg_align (rx_data + len + ETH_FCS_LEN );
2539
2579
rx_desc = (struct rx_desc * )rx_data ;
2540
2580
len_used = agg_offset (agg , rx_data );
2541
2581
len_used += sizeof (struct rx_desc );
@@ -2564,9 +2604,10 @@ static int rx_bottom(struct r8152 *tp, int budget)
2564
2604
}
2565
2605
}
2566
2606
2607
+ /* Splice the remained list back to rx_done for next schedule */
2567
2608
if (!list_empty (& rx_queue )) {
2568
2609
spin_lock_irqsave (& tp -> rx_lock , flags );
2569
- list_splice_tail (& rx_queue , & tp -> rx_done );
2610
+ list_splice (& rx_queue , & tp -> rx_done );
2570
2611
spin_unlock_irqrestore (& tp -> rx_lock , flags );
2571
2612
}
2572
2613
0 commit comments