Skip to content

Commit 84a0a23

Browse files
Maxim Mikityanskiyborkmann
authored andcommitted
net/mlx5e: XDP_TX from UMEM support
When an XDP program returns XDP_TX, and the RQ is XSK-enabled, it requires careful handling, because convert_to_xdp_frame creates a new page and copies the data there, while our driver expects the xdp_frame to point to the same memory as the xdp_buff. Handle this case separately: map the page, and in the end unmap it and call xdp_return_frame. Signed-off-by: Maxim Mikityanskiy <maximmi@mellanox.com> Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Acked-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
1 parent b9673cf commit 84a0a23

File tree

1 file changed

+42
-8
lines changed
  • drivers/net/ethernet/mellanox/mlx5/core/en

1 file changed

+42
-8
lines changed

drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c

Lines changed: 42 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -69,14 +69,48 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
6969
xdptxd.data = xdpf->data;
7070
xdptxd.len = xdpf->len;
7171

72-
xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
72+
if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) {
73+
/* The xdp_buff was in the UMEM and was copied into a newly
74+
* allocated page. The UMEM page was returned via the ZCA, and
75+
* this new page has to be mapped at this point and has to be
76+
* unmapped and returned via xdp_return_frame on completion.
77+
*/
78+
79+
/* Prevent double recycling of the UMEM page. Even in case this
80+
* function returns false, the xdp_buff shouldn't be recycled,
81+
* as it was already done in xdp_convert_zc_to_xdp_frame.
82+
*/
83+
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
84+
85+
xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
7386

74-
dma_addr = di->addr + (xdpf->data - (void *)xdpf);
75-
dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE);
87+
dma_addr = dma_map_single(sq->pdev, xdptxd.data, xdptxd.len,
88+
DMA_TO_DEVICE);
89+
if (dma_mapping_error(sq->pdev, dma_addr)) {
90+
xdp_return_frame(xdpf);
91+
return false;
92+
}
7693

77-
xdptxd.dma_addr = dma_addr;
78-
xdpi.page.rq = rq;
79-
xdpi.page.di = *di;
94+
xdptxd.dma_addr = dma_addr;
95+
xdpi.frame.xdpf = xdpf;
96+
xdpi.frame.dma_addr = dma_addr;
97+
} else {
98+
/* Driver assumes that convert_to_xdp_frame returns an xdp_frame
99+
* that points to the same memory region as the original
100+
* xdp_buff. It allows to map the memory only once and to use
101+
* the DMA_BIDIRECTIONAL mode.
102+
*/
103+
104+
xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
105+
106+
dma_addr = di->addr + (xdpf->data - (void *)xdpf);
107+
dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len,
108+
DMA_TO_DEVICE);
109+
110+
xdptxd.dma_addr = dma_addr;
111+
xdpi.page.rq = rq;
112+
xdpi.page.di = *di;
113+
}
80114

81115
return sq->xmit_xdp_frame(sq, &xdptxd, &xdpi);
82116
}
@@ -298,13 +332,13 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
298332

299333
switch (xdpi.mode) {
300334
case MLX5E_XDP_XMIT_MODE_FRAME:
301-
/* XDP_REDIRECT */
335+
/* XDP_TX from the XSK RQ and XDP_REDIRECT */
302336
dma_unmap_single(sq->pdev, xdpi.frame.dma_addr,
303337
xdpi.frame.xdpf->len, DMA_TO_DEVICE);
304338
xdp_return_frame(xdpi.frame.xdpf);
305339
break;
306340
case MLX5E_XDP_XMIT_MODE_PAGE:
307-
/* XDP_TX */
341+
/* XDP_TX from the regular RQ */
308342
mlx5e_page_release(xdpi.page.rq, &xdpi.page.di, recycle);
309343
break;
310344
default:

0 commit comments

Comments
 (0)