@@ -553,34 +553,39 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
553553 * @xdp: xdp_buff used as input to the XDP program
554554 * @xdp_prog: XDP program to run
555555 * @xdp_ring: ring to be used for XDP_TX action
556+ * @rx_buf: Rx buffer to store the XDP action
556557 *
557558 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
558559 */
559- static int
560+ static void
560561ice_run_xdp (struct ice_rx_ring * rx_ring , struct xdp_buff * xdp ,
561- struct bpf_prog * xdp_prog , struct ice_tx_ring * xdp_ring )
562+ struct bpf_prog * xdp_prog , struct ice_tx_ring * xdp_ring ,
563+ struct ice_rx_buf * rx_buf )
562564{
563- int err ;
565+ unsigned int ret = ICE_XDP_PASS ;
564566 u32 act ;
565567
568+ if (!xdp_prog )
569+ goto exit ;
570+
566571 act = bpf_prog_run_xdp (xdp_prog , xdp );
567572 switch (act ) {
568573 case XDP_PASS :
569- return ICE_XDP_PASS ;
574+ break ;
570575 case XDP_TX :
571576 if (static_branch_unlikely (& ice_xdp_locking_key ))
572577 spin_lock (& xdp_ring -> tx_lock );
573- err = ice_xmit_xdp_ring (xdp -> data , xdp -> data_end - xdp -> data , xdp_ring );
578+ ret = ice_xmit_xdp_ring (xdp -> data , xdp -> data_end - xdp -> data , xdp_ring );
574579 if (static_branch_unlikely (& ice_xdp_locking_key ))
575580 spin_unlock (& xdp_ring -> tx_lock );
576- if (err == ICE_XDP_CONSUMED )
581+ if (ret == ICE_XDP_CONSUMED )
577582 goto out_failure ;
578- return err ;
583+ break ;
579584 case XDP_REDIRECT :
580- err = xdp_do_redirect (rx_ring -> netdev , xdp , xdp_prog );
581- if (err )
585+ if (xdp_do_redirect (rx_ring -> netdev , xdp , xdp_prog ))
582586 goto out_failure ;
583- return ICE_XDP_REDIR ;
587+ ret = ICE_XDP_REDIR ;
588+ break ;
584589 default :
585590 bpf_warn_invalid_xdp_action (rx_ring -> netdev , xdp_prog , act );
586591 fallthrough ;
@@ -589,8 +594,10 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
589594 trace_xdp_exception (rx_ring -> netdev , xdp_prog , act );
590595 fallthrough ;
591596 case XDP_DROP :
592- return ICE_XDP_CONSUMED ;
597+ ret = ICE_XDP_CONSUMED ;
593598 }
599+ exit :
600+ rx_buf -> act = ret ;
594601}
595602
596603/**
@@ -855,9 +862,6 @@ ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
855862 return ;
856863 skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags , rx_buf -> page ,
857864 rx_buf -> page_offset , size , truesize );
858-
859- /* page is being used so we must update the page offset */
860- ice_rx_buf_adjust_pg_offset (rx_buf , truesize );
861865}
862866
863867/**
@@ -970,9 +974,6 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
970974 if (metasize )
971975 skb_metadata_set (skb , metasize );
972976
973- /* buffer is used by skb, update page_offset */
974- ice_rx_buf_adjust_pg_offset (rx_buf , truesize );
975-
976977 return skb ;
977978}
978979
@@ -1023,14 +1024,13 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
10231024#endif
10241025 skb_add_rx_frag (skb , 0 , rx_buf -> page ,
10251026 rx_buf -> page_offset + headlen , size , truesize );
1026- /* buffer is used by skb, update page_offset */
1027- ice_rx_buf_adjust_pg_offset (rx_buf , truesize );
10281027 } else {
1029- /* buffer is unused, reset bias back to rx_buf; data was copied
1030- * onto skb's linear part so there's no need for adjusting
1031- * page offset and we can reuse this buffer as-is
1028+ /* buffer is unused, change the act that should be taken later
1029+ * on; data was copied onto skb's linear part so there's no
1030+ * need for adjusting page offset and we can reuse this buffer
1031+ * as-is
10321032 */
1033- rx_buf -> pagecnt_bias ++ ;
1033+ rx_buf -> act = ICE_XDP_CONSUMED ;
10341034 }
10351035
10361036 return skb ;
@@ -1084,11 +1084,12 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
10841084 unsigned int offset = rx_ring -> rx_offset ;
10851085 struct xdp_buff * xdp = & rx_ring -> xdp ;
10861086 struct ice_tx_ring * xdp_ring = NULL ;
1087- unsigned int xdp_res , xdp_xmit = 0 ;
10881087 struct sk_buff * skb = rx_ring -> skb ;
10891088 struct bpf_prog * xdp_prog = NULL ;
10901089 u32 ntc = rx_ring -> next_to_clean ;
10911090 u32 cnt = rx_ring -> count ;
1091+ u32 cached_ntc = ntc ;
1092+ u32 xdp_xmit = 0 ;
10921093 bool failure ;
10931094
10941095 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
@@ -1137,7 +1138,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
11371138 ice_vc_fdir_irq_handler (ctrl_vsi , rx_desc );
11381139 if (++ ntc == cnt )
11391140 ntc = 0 ;
1140- ice_put_rx_buf (rx_ring , NULL );
11411141 cleaned_count ++ ;
11421142 continue ;
11431143 }
@@ -1164,25 +1164,15 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
11641164 xdp -> frame_sz = ice_rx_frame_truesize (rx_ring , size );
11651165#endif
11661166
1167- if (!xdp_prog )
1167+ ice_run_xdp (rx_ring , xdp , xdp_prog , xdp_ring , rx_buf );
1168+ if (rx_buf -> act == ICE_XDP_PASS )
11681169 goto construct_skb ;
1169-
1170- xdp_res = ice_run_xdp (rx_ring , xdp , xdp_prog , xdp_ring );
1171- if (!xdp_res )
1172- goto construct_skb ;
1173- if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR )) {
1174- xdp_xmit |= xdp_res ;
1175- ice_rx_buf_adjust_pg_offset (rx_buf , xdp -> frame_sz );
1176- } else {
1177- rx_buf -> pagecnt_bias ++ ;
1178- }
11791170 total_rx_bytes += size ;
11801171 total_rx_pkts ++ ;
11811172
11821173 cleaned_count ++ ;
11831174 if (++ ntc == cnt )
11841175 ntc = 0 ;
1185- ice_put_rx_buf (rx_ring , rx_buf );
11861176 continue ;
11871177construct_skb :
11881178 if (skb ) {
@@ -1203,7 +1193,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
12031193
12041194 if (++ ntc == cnt )
12051195 ntc = 0 ;
1206- ice_put_rx_buf (rx_ring , rx_buf );
12071196 cleaned_count ++ ;
12081197
12091198 /* skip if it is NOP desc */
@@ -1243,6 +1232,22 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
12431232 total_rx_pkts ++ ;
12441233 }
12451234
1235+ while (cached_ntc != ntc ) {
1236+ struct ice_rx_buf * buf = & rx_ring -> rx_buf [cached_ntc ];
1237+
1238+ if (buf -> act & (ICE_XDP_TX | ICE_XDP_REDIR )) {
1239+ ice_rx_buf_adjust_pg_offset (buf , xdp -> frame_sz );
1240+ xdp_xmit |= buf -> act ;
1241+ } else if (buf -> act & ICE_XDP_CONSUMED ) {
1242+ buf -> pagecnt_bias ++ ;
1243+ } else if (buf -> act == ICE_XDP_PASS ) {
1244+ ice_rx_buf_adjust_pg_offset (buf , xdp -> frame_sz );
1245+ }
1246+
1247+ ice_put_rx_buf (rx_ring , buf );
1248+ if (++ cached_ntc >= cnt )
1249+ cached_ntc = 0 ;
1250+ }
12461251 rx_ring -> next_to_clean = ntc ;
12471252 /* return up to cleaned_count buffers to hardware */
12481253 failure = ice_alloc_rx_bufs (rx_ring , cleaned_count );
0 commit comments