@@ -47,7 +47,8 @@ module_param(napi_tx, bool, 0644);
4747#define VIRTIO_XDP_TX BIT(0)
4848#define VIRTIO_XDP_REDIR BIT(1)
4949
50- #define VIRTIO_XDP_FLAG BIT(0)
50+ #define VIRTIO_XDP_FLAG BIT(0)
51+ #define VIRTIO_ORPHAN_FLAG BIT(1)
5152
5253/* RX packet size EWMA. The average packet size is used to determine the packet
5354 * buffer size when refilling RX rings. As the entire RX ring may be refilled
@@ -85,6 +86,8 @@ struct virtnet_stat_desc {
8586struct virtnet_sq_free_stats {
8687 u64 packets ;
8788 u64 bytes ;
89+ u64 napi_packets ;
90+ u64 napi_bytes ;
8891};
8992
9093struct virtnet_sq_stats {
@@ -506,29 +509,50 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)
506509 return (struct xdp_frame * )((unsigned long )ptr & ~VIRTIO_XDP_FLAG );
507510}
508511
509- static void __free_old_xmit (struct send_queue * sq , bool in_napi ,
510- struct virtnet_sq_free_stats * stats )
512+ static bool is_orphan_skb (void * ptr )
513+ {
514+ return (unsigned long )ptr & VIRTIO_ORPHAN_FLAG ;
515+ }
516+
517+ static void * skb_to_ptr (struct sk_buff * skb , bool orphan )
518+ {
519+ return (void * )((unsigned long )skb | (orphan ? VIRTIO_ORPHAN_FLAG : 0 ));
520+ }
521+
522+ static struct sk_buff * ptr_to_skb (void * ptr )
523+ {
524+ return (struct sk_buff * )((unsigned long )ptr & ~VIRTIO_ORPHAN_FLAG );
525+ }
526+
527+ static void __free_old_xmit (struct send_queue * sq , struct netdev_queue * txq ,
528+ bool in_napi , struct virtnet_sq_free_stats * stats )
511529{
512530 unsigned int len ;
513531 void * ptr ;
514532
515533 while ((ptr = virtqueue_get_buf (sq -> vq , & len )) != NULL ) {
516- ++ stats -> packets ;
517-
518534 if (!is_xdp_frame (ptr )) {
519- struct sk_buff * skb = ptr ;
535+ struct sk_buff * skb = ptr_to_skb ( ptr ) ;
520536
521537 pr_debug ("Sent skb %p\n" , skb );
522538
523- stats -> bytes += skb -> len ;
539+ if (is_orphan_skb (ptr )) {
540+ stats -> packets ++ ;
541+ stats -> bytes += skb -> len ;
542+ } else {
543+ stats -> napi_packets ++ ;
544+ stats -> napi_bytes += skb -> len ;
545+ }
524546 napi_consume_skb (skb , in_napi );
525547 } else {
526548 struct xdp_frame * frame = ptr_to_xdp (ptr );
527549
550+ stats -> packets ++ ;
528551 stats -> bytes += xdp_get_frame_len (frame );
529552 xdp_return_frame (frame );
530553 }
531554 }
555+ netdev_tx_completed_queue (txq , stats -> napi_packets , stats -> napi_bytes );
532556}
533557
534558/* Converting between virtqueue no. and kernel tx/rx queue no.
@@ -955,21 +979,22 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
955979 virtnet_rq_free_buf (vi , rq , buf );
956980}
957981
958- static void free_old_xmit (struct send_queue * sq , bool in_napi )
982+ static void free_old_xmit (struct send_queue * sq , struct netdev_queue * txq ,
983+ bool in_napi )
959984{
960985 struct virtnet_sq_free_stats stats = {0 };
961986
962- __free_old_xmit (sq , in_napi , & stats );
987+ __free_old_xmit (sq , txq , in_napi , & stats );
963988
964989 /* Avoid overhead when no packets have been processed
965990 * happens when called speculatively from start_xmit.
966991 */
967- if (!stats .packets )
992+ if (!stats .packets && ! stats . napi_packets )
968993 return ;
969994
970995 u64_stats_update_begin (& sq -> stats .syncp );
971- u64_stats_add (& sq -> stats .bytes , stats .bytes );
972- u64_stats_add (& sq -> stats .packets , stats .packets );
996+ u64_stats_add (& sq -> stats .bytes , stats .bytes + stats . napi_bytes );
997+ u64_stats_add (& sq -> stats .packets , stats .packets + stats . napi_packets );
973998 u64_stats_update_end (& sq -> stats .syncp );
974999}
9751000
@@ -1003,7 +1028,9 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
10031028 * early means 16 slots are typically wasted.
10041029 */
10051030 if (sq -> vq -> num_free < 2 + MAX_SKB_FRAGS ) {
1006- netif_stop_subqueue (dev , qnum );
1031+ struct netdev_queue * txq = netdev_get_tx_queue (dev , qnum );
1032+
1033+ netif_tx_stop_queue (txq );
10071034 u64_stats_update_begin (& sq -> stats .syncp );
10081035 u64_stats_inc (& sq -> stats .stop );
10091036 u64_stats_update_end (& sq -> stats .syncp );
@@ -1012,7 +1039,7 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
10121039 virtqueue_napi_schedule (& sq -> napi , sq -> vq );
10131040 } else if (unlikely (!virtqueue_enable_cb_delayed (sq -> vq ))) {
10141041 /* More just got used, free them then recheck. */
1015- free_old_xmit (sq , false);
1042+ free_old_xmit (sq , txq , false);
10161043 if (sq -> vq -> num_free >= 2 + MAX_SKB_FRAGS ) {
10171044 netif_start_subqueue (dev , qnum );
10181045 u64_stats_update_begin (& sq -> stats .syncp );
@@ -1138,7 +1165,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
11381165 }
11391166
11401167 /* Free up any pending old buffers before queueing new ones. */
1141- __free_old_xmit (sq , false, & stats );
1168+ __free_old_xmit (sq , netdev_get_tx_queue (dev , sq - vi -> sq ),
1169+ false, & stats );
11421170
11431171 for (i = 0 ; i < n ; i ++ ) {
11441172 struct xdp_frame * xdpf = frames [i ];
@@ -2313,7 +2341,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
23132341
23142342 do {
23152343 virtqueue_disable_cb (sq -> vq );
2316- free_old_xmit (sq , true);
2344+ free_old_xmit (sq , txq , true);
23172345 } while (unlikely (!virtqueue_enable_cb_delayed (sq -> vq )));
23182346
23192347 if (sq -> vq -> num_free >= 2 + MAX_SKB_FRAGS ) {
@@ -2412,6 +2440,7 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
24122440 goto err_xdp_reg_mem_model ;
24132441
24142442 virtnet_napi_enable (vi -> rq [qp_index ].vq , & vi -> rq [qp_index ].napi );
2443+ netdev_tx_reset_queue (netdev_get_tx_queue (vi -> dev , qp_index ));
24152444 virtnet_napi_tx_enable (vi , vi -> sq [qp_index ].vq , & vi -> sq [qp_index ].napi );
24162445
24172446 return 0 ;
@@ -2471,7 +2500,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
24712500 txq = netdev_get_tx_queue (vi -> dev , index );
24722501 __netif_tx_lock (txq , raw_smp_processor_id ());
24732502 virtqueue_disable_cb (sq -> vq );
2474- free_old_xmit (sq , true);
2503+ free_old_xmit (sq , txq , true);
24752504
24762505 if (sq -> vq -> num_free >= 2 + MAX_SKB_FRAGS ) {
24772506 if (netif_tx_queue_stopped (txq )) {
@@ -2505,7 +2534,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
25052534 return 0 ;
25062535}
25072536
2508- static int xmit_skb (struct send_queue * sq , struct sk_buff * skb )
2537+ static int xmit_skb (struct send_queue * sq , struct sk_buff * skb , bool orphan )
25092538{
25102539 struct virtio_net_hdr_mrg_rxbuf * hdr ;
25112540 const unsigned char * dest = ((struct ethhdr * )skb -> data )-> h_dest ;
@@ -2549,7 +2578,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
25492578 return num_sg ;
25502579 num_sg ++ ;
25512580 }
2552- return virtqueue_add_outbuf (sq -> vq , sq -> sg , num_sg , skb , GFP_ATOMIC );
2581+ return virtqueue_add_outbuf (sq -> vq , sq -> sg , num_sg ,
2582+ skb_to_ptr (skb , orphan ), GFP_ATOMIC );
25532583}
25542584
25552585static netdev_tx_t start_xmit (struct sk_buff * skb , struct net_device * dev )
@@ -2559,24 +2589,25 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
25592589 struct send_queue * sq = & vi -> sq [qnum ];
25602590 int err ;
25612591 struct netdev_queue * txq = netdev_get_tx_queue (dev , qnum );
2562- bool kick = ! netdev_xmit_more ();
2592+ bool xmit_more = netdev_xmit_more ();
25632593 bool use_napi = sq -> napi .weight ;
2594+ bool kick ;
25642595
25652596 /* Free up any pending old buffers before queueing new ones. */
25662597 do {
25672598 if (use_napi )
25682599 virtqueue_disable_cb (sq -> vq );
25692600
2570- free_old_xmit (sq , false);
2601+ free_old_xmit (sq , txq , false);
25712602
2572- } while (use_napi && kick &&
2603+ } while (use_napi && ! xmit_more &&
25732604 unlikely (!virtqueue_enable_cb_delayed (sq -> vq )));
25742605
25752606 /* timestamp packet in software */
25762607 skb_tx_timestamp (skb );
25772608
25782609 /* Try to transmit */
2579- err = xmit_skb (sq , skb );
2610+ err = xmit_skb (sq , skb , ! use_napi );
25802611
25812612 /* This should not happen! */
25822613 if (unlikely (err )) {
@@ -2598,7 +2629,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
25982629
25992630 check_sq_full_and_disable (vi , dev , sq );
26002631
2601- if (kick || netif_xmit_stopped (txq )) {
2632+ kick = use_napi ? __netdev_tx_sent_queue (txq , skb -> len , xmit_more ) :
2633+ !xmit_more || netif_xmit_stopped (txq );
2634+ if (kick ) {
26022635 if (virtqueue_kick_prepare (sq -> vq ) && virtqueue_notify (sq -> vq )) {
26032636 u64_stats_update_begin (& sq -> stats .syncp );
26042637 u64_stats_inc (& sq -> stats .kicks );
0 commit comments