@@ -1185,22 +1185,23 @@ static void mana_post_pkt_rxq(struct mana_rxq *rxq)
11851185 WARN_ON_ONCE (recv_buf_oob -> wqe_inf .wqe_size_in_bu != 1 );
11861186}
11871187
1188- static struct sk_buff * mana_build_skb (void * buf_va , uint pkt_len ,
1189- struct xdp_buff * xdp )
1188+ static struct sk_buff * mana_build_skb (struct mana_rxq * rxq , void * buf_va ,
1189+ uint pkt_len , struct xdp_buff * xdp )
11901190{
1191- struct sk_buff * skb = napi_build_skb (buf_va , PAGE_SIZE );
1191+ struct sk_buff * skb = napi_build_skb (buf_va , rxq -> alloc_size );
11921192
11931193 if (!skb )
11941194 return NULL ;
11951195
11961196 if (xdp -> data_hard_start ) {
11971197 skb_reserve (skb , xdp -> data - xdp -> data_hard_start );
11981198 skb_put (skb , xdp -> data_end - xdp -> data );
1199- } else {
1200- skb_reserve (skb , XDP_PACKET_HEADROOM );
1201- skb_put (skb , pkt_len );
1199+ return skb ;
12021200 }
12031201
1202+ skb_reserve (skb , rxq -> headroom );
1203+ skb_put (skb , pkt_len );
1204+
12041205 return skb ;
12051206}
12061207
@@ -1233,7 +1234,7 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
12331234 if (act != XDP_PASS && act != XDP_TX )
12341235 goto drop_xdp ;
12351236
1236- skb = mana_build_skb (buf_va , pkt_len , & xdp );
1237+ skb = mana_build_skb (rxq , buf_va , pkt_len , & xdp );
12371238
12381239 if (!skb )
12391240 goto drop ;
@@ -1301,6 +1302,14 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
13011302 if (rxq -> xdp_save_va ) {
13021303 va = rxq -> xdp_save_va ;
13031304 rxq -> xdp_save_va = NULL ;
1305+ } else if (rxq -> alloc_size > PAGE_SIZE ) {
1306+ if (is_napi )
1307+ va = napi_alloc_frag (rxq -> alloc_size );
1308+ else
1309+ va = netdev_alloc_frag (rxq -> alloc_size );
1310+
1311+ if (!va )
1312+ return NULL ;
13041313 } else {
13051314 page = dev_alloc_page ();
13061315 if (!page )
@@ -1309,7 +1318,7 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
13091318 va = page_to_virt (page );
13101319 }
13111320
1312- * da = dma_map_single (dev , va + XDP_PACKET_HEADROOM , rxq -> datasize ,
1321+ * da = dma_map_single (dev , va + rxq -> headroom , rxq -> datasize ,
13131322 DMA_FROM_DEVICE );
13141323
13151324 if (dma_mapping_error (dev , * da )) {
@@ -1732,7 +1741,7 @@ static int mana_alloc_rx_wqe(struct mana_port_context *apc,
17321741 u32 buf_idx ;
17331742 int ret ;
17341743
1735- WARN_ON (rxq -> datasize == 0 || rxq -> datasize > PAGE_SIZE );
1744+ WARN_ON (rxq -> datasize == 0 );
17361745
17371746 * rxq_size = 0 ;
17381747 * cq_size = 0 ;
@@ -1788,6 +1797,7 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
17881797 struct gdma_dev * gd = apc -> ac -> gdma_dev ;
17891798 struct mana_obj_spec wq_spec ;
17901799 struct mana_obj_spec cq_spec ;
1800+ unsigned int mtu = ndev -> mtu ;
17911801 struct gdma_queue_spec spec ;
17921802 struct mana_cq * cq = NULL ;
17931803 struct gdma_context * gc ;
@@ -1807,7 +1817,15 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
18071817 rxq -> rxq_idx = rxq_idx ;
18081818 rxq -> rxobj = INVALID_MANA_HANDLE ;
18091819
1810- rxq -> datasize = ALIGN (ETH_FRAME_LEN , 64 );
1820+ rxq -> datasize = ALIGN (mtu + ETH_HLEN , 64 );
1821+
1822+ if (mtu > MANA_XDP_MTU_MAX ) {
1823+ rxq -> alloc_size = mtu + MANA_RXBUF_PAD ;
1824+ rxq -> headroom = 0 ;
1825+ } else {
1826+ rxq -> alloc_size = mtu + MANA_RXBUF_PAD + XDP_PACKET_HEADROOM ;
1827+ rxq -> headroom = XDP_PACKET_HEADROOM ;
1828+ }
18111829
18121830 err = mana_alloc_rx_wqe (apc , rxq , & rq_size , & cq_size );
18131831 if (err )
0 commit comments