Skip to content

Commit efabce2

Browse files
sumang-mrvlPaolo Abeni
authored andcommitted
octeontx2-pf: AF_XDP zero copy receive support
This patch adds support to AF_XDP zero copy for CN10K. This patch specifically adds receive side support. In this approach once a xdp program with zero copy support on a specific rx queue is enabled, then that receive quse is disabled/detached from the existing kernel queue and re-assigned to the umem memory. Signed-off-by: Suman Ghosh <sumang@marvell.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
1 parent b4164de commit efabce2

File tree

11 files changed

+389
-61
lines changed

11 files changed

+389
-61
lines changed

drivers/net/ethernet/marvell/octeontx2/nic/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o
99

1010
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
1111
otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
12-
otx2_devlink.o qos_sq.o qos.o
12+
otx2_devlink.o qos_sq.o qos.o otx2_xsk.o
1313
rvu_nicvf-y := otx2_vf.o
1414
rvu_rep-y := rep.o
1515

drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,9 +112,12 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
112112
struct otx2_nic *pfvf = dev;
113113
int cnt = cq->pool_ptrs;
114114
u64 ptrs[NPA_MAX_BURST];
115+
struct otx2_pool *pool;
115116
dma_addr_t bufptr;
116117
int num_ptrs = 1;
117118

119+
pool = &pfvf->qset.pool[cq->cq_idx];
120+
118121
/* Refill pool with new buffers */
119122
while (cq->pool_ptrs) {
120123
if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
@@ -124,7 +127,9 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
124127
break;
125128
}
126129
cq->pool_ptrs--;
127-
ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM;
130+
ptrs[num_ptrs] = pool->xsk_pool ?
131+
(u64)bufptr : (u64)bufptr + OTX2_HEAD_ROOM;
132+
128133
num_ptrs++;
129134
if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
130135
__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,

drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c

Lines changed: 83 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include "otx2_common.h"
1818
#include "otx2_struct.h"
1919
#include "cn10k.h"
20+
#include "otx2_xsk.h"
2021

2122
static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf)
2223
{
@@ -549,10 +550,13 @@ static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
549550
}
550551

551552
static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
552-
dma_addr_t *dma)
553+
dma_addr_t *dma, int qidx, int idx)
553554
{
554555
u8 *buf;
555556

557+
if (pool->xsk_pool)
558+
return otx2_xsk_pool_alloc_buf(pfvf, pool, dma, idx);
559+
556560
if (pool->page_pool)
557561
return otx2_alloc_pool_buf(pfvf, pool, dma);
558562

@@ -571,20 +575,21 @@ static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
571575
}
572576

573577
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
574-
dma_addr_t *dma)
578+
dma_addr_t *dma, int qidx, int idx)
575579
{
576580
int ret;
577581

578582
local_bh_disable();
579-
ret = __otx2_alloc_rbuf(pfvf, pool, dma);
583+
ret = __otx2_alloc_rbuf(pfvf, pool, dma, qidx, idx);
580584
local_bh_enable();
581585
return ret;
582586
}
583587

584588
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
585589
dma_addr_t *dma)
586590
{
587-
if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
591+
if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma,
592+
cq->cq_idx, cq->pool_ptrs - 1)))
588593
return -ENOMEM;
589594
return 0;
590595
}
@@ -884,7 +889,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
884889
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
885890
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
886891

887-
static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
892+
int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
888893
{
889894
struct otx2_qset *qset = &pfvf->qset;
890895
struct nix_aq_enq_req *aq;
@@ -1041,7 +1046,7 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
10411046

10421047
}
10431048

1044-
static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
1049+
int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
10451050
{
10461051
struct otx2_qset *qset = &pfvf->qset;
10471052
int err, pool_id, non_xdp_queues;
@@ -1057,11 +1062,18 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
10571062
cq->cint_idx = qidx;
10581063
cq->cqe_cnt = qset->rqe_cnt;
10591064
if (pfvf->xdp_prog) {
1060-
pool = &qset->pool[qidx];
10611065
xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
1062-
xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
1063-
MEM_TYPE_PAGE_POOL,
1064-
pool->page_pool);
1066+
pool = &qset->pool[qidx];
1067+
if (pool->xsk_pool) {
1068+
xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
1069+
MEM_TYPE_XSK_BUFF_POOL,
1070+
NULL);
1071+
xsk_pool_set_rxq_info(pool->xsk_pool, &cq->xdp_rxq);
1072+
} else if (pool->page_pool) {
1073+
xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
1074+
MEM_TYPE_PAGE_POOL,
1075+
pool->page_pool);
1076+
}
10651077
}
10661078
} else if (qidx < non_xdp_queues) {
10671079
cq->cq_type = CQ_TX;
@@ -1281,9 +1293,10 @@ void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
12811293

12821294
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
12831295
page = virt_to_head_page(phys_to_virt(pa));
1284-
12851296
if (pool->page_pool) {
12861297
page_pool_put_full_page(pool->page_pool, page, true);
1298+
} else if (pool->xsk_pool) {
1299+
/* Note: No way of identifying xdp_buff */
12871300
} else {
12881301
dma_unmap_page_attrs(pfvf->dev, iova, size,
12891302
DMA_FROM_DEVICE,
@@ -1298,6 +1311,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
12981311
int pool_id, pool_start = 0, pool_end = 0, size = 0;
12991312
struct otx2_pool *pool;
13001313
u64 iova;
1314+
int idx;
13011315

13021316
if (type == AURA_NIX_SQ) {
13031317
pool_start = otx2_get_pool_idx(pfvf, type, 0);
@@ -1312,16 +1326,21 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
13121326

13131327
/* Free SQB and RQB pointers from the aura pool */
13141328
for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
1315-
iova = otx2_aura_allocptr(pfvf, pool_id);
13161329
pool = &pfvf->qset.pool[pool_id];
1330+
iova = otx2_aura_allocptr(pfvf, pool_id);
13171331
while (iova) {
13181332
if (type == AURA_NIX_RQ)
13191333
iova -= OTX2_HEAD_ROOM;
1320-
13211334
otx2_free_bufs(pfvf, pool, iova, size);
1322-
13231335
iova = otx2_aura_allocptr(pfvf, pool_id);
13241336
}
1337+
1338+
for (idx = 0 ; idx < pool->xdp_cnt; idx++) {
1339+
if (!pool->xdp[idx])
1340+
continue;
1341+
1342+
xsk_buff_free(pool->xdp[idx]);
1343+
}
13251344
}
13261345
}
13271346

@@ -1338,7 +1357,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
13381357
qmem_free(pfvf->dev, pool->stack);
13391358
qmem_free(pfvf->dev, pool->fc_addr);
13401359
page_pool_destroy(pool->page_pool);
1341-
pool->page_pool = NULL;
1360+
devm_kfree(pfvf->dev, pool->xdp);
1361+
pool->xsk_pool = NULL;
13421362
}
13431363
devm_kfree(pfvf->dev, pfvf->qset.pool);
13441364
pfvf->qset.pool = NULL;
@@ -1425,6 +1445,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
14251445
int stack_pages, int numptrs, int buf_size, int type)
14261446
{
14271447
struct page_pool_params pp_params = { 0 };
1448+
struct xsk_buff_pool *xsk_pool;
14281449
struct npa_aq_enq_req *aq;
14291450
struct otx2_pool *pool;
14301451
int err;
@@ -1468,21 +1489,35 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
14681489
aq->ctype = NPA_AQ_CTYPE_POOL;
14691490
aq->op = NPA_AQ_INSTOP_INIT;
14701491

1471-
if (type != AURA_NIX_RQ) {
1472-
pool->page_pool = NULL;
1492+
if (type != AURA_NIX_RQ)
1493+
return 0;
1494+
1495+
if (!test_bit(pool_id, pfvf->af_xdp_zc_qidx)) {
1496+
pp_params.order = get_order(buf_size);
1497+
pp_params.flags = PP_FLAG_DMA_MAP;
1498+
pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
1499+
pp_params.nid = NUMA_NO_NODE;
1500+
pp_params.dev = pfvf->dev;
1501+
pp_params.dma_dir = DMA_FROM_DEVICE;
1502+
pool->page_pool = page_pool_create(&pp_params);
1503+
if (IS_ERR(pool->page_pool)) {
1504+
netdev_err(pfvf->netdev, "Creation of page pool failed\n");
1505+
return PTR_ERR(pool->page_pool);
1506+
}
14731507
return 0;
14741508
}
14751509

1476-
pp_params.order = get_order(buf_size);
1477-
pp_params.flags = PP_FLAG_DMA_MAP;
1478-
pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
1479-
pp_params.nid = NUMA_NO_NODE;
1480-
pp_params.dev = pfvf->dev;
1481-
pp_params.dma_dir = DMA_FROM_DEVICE;
1482-
pool->page_pool = page_pool_create(&pp_params);
1483-
if (IS_ERR(pool->page_pool)) {
1484-
netdev_err(pfvf->netdev, "Creation of page pool failed\n");
1485-
return PTR_ERR(pool->page_pool);
1510+
/* Set XSK pool to support AF_XDP zero-copy */
1511+
xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, pool_id);
1512+
if (xsk_pool) {
1513+
pool->xsk_pool = xsk_pool;
1514+
pool->xdp_cnt = numptrs;
1515+
pool->xdp = devm_kcalloc(pfvf->dev,
1516+
numptrs, sizeof(struct xdp_buff *), GFP_KERNEL);
1517+
if (IS_ERR(pool->xdp)) {
1518+
netdev_err(pfvf->netdev, "Creation of xsk pool failed\n");
1519+
return PTR_ERR(pool->xdp);
1520+
}
14861521
}
14871522

14881523
return 0;
@@ -1543,9 +1578,18 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
15431578
}
15441579

15451580
for (ptr = 0; ptr < num_sqbs; ptr++) {
1546-
err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
1547-
if (err)
1581+
err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
1582+
if (err) {
1583+
if (pool->xsk_pool) {
1584+
ptr--;
1585+
while (ptr >= 0) {
1586+
xsk_buff_free(pool->xdp[ptr]);
1587+
ptr--;
1588+
}
1589+
}
15481590
goto err_mem;
1591+
}
1592+
15491593
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
15501594
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
15511595
}
@@ -1595,11 +1639,19 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
15951639
/* Allocate pointers and free them to aura/pool */
15961640
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
15971641
pool = &pfvf->qset.pool[pool_id];
1642+
15981643
for (ptr = 0; ptr < num_ptrs; ptr++) {
1599-
err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
1600-
if (err)
1644+
err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
1645+
if (err) {
1646+
if (pool->xsk_pool) {
1647+
while (ptr)
1648+
xsk_buff_free(pool->xdp[--ptr]);
1649+
}
16011650
return -ENOMEM;
1651+
}
1652+
16021653
pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
1654+
pool->xsk_pool ? bufptr :
16031655
bufptr + OTX2_HEAD_ROOM);
16041656
}
16051657
}

drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -532,6 +532,8 @@ struct otx2_nic {
532532

533533
/* Inline ipsec */
534534
struct cn10k_ipsec ipsec;
535+
/* af_xdp zero-copy */
536+
unsigned long *af_xdp_zc_qidx;
535537
};
536538

537539
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -1003,7 +1005,7 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
10031005
void otx2_free_pending_sqe(struct otx2_nic *pfvf);
10041006
void otx2_sqb_flush(struct otx2_nic *pfvf);
10051007
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
1006-
dma_addr_t *dma);
1008+
dma_addr_t *dma, int qidx, int idx);
10071009
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
10081010
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
10091011
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
@@ -1033,6 +1035,8 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf);
10331035
void otx2_disable_mbox_intr(struct otx2_nic *pf);
10341036
void otx2_disable_napi(struct otx2_nic *pf);
10351037
irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq);
1038+
int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura);
1039+
int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx);
10361040

10371041
/* RSS configuration APIs*/
10381042
int otx2_rss_init(struct otx2_nic *pfvf);

drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
#include "qos.h"
2828
#include <rvu_trace.h>
2929
#include "cn10k_ipsec.h"
30+
#include "otx2_xsk.h"
3031

3132
#define DRV_NAME "rvu_nicpf"
3233
#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
@@ -1662,9 +1663,7 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
16621663
struct nix_lf_free_req *free_req;
16631664
struct mbox *mbox = &pf->mbox;
16641665
struct otx2_cq_queue *cq;
1665-
struct otx2_pool *pool;
16661666
struct msg_req *req;
1667-
int pool_id;
16681667
int qidx;
16691668

16701669
/* Ensure all SQE are processed */
@@ -1705,13 +1704,6 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
17051704
/* Free RQ buffer pointers*/
17061705
otx2_free_aura_ptr(pf, AURA_NIX_RQ);
17071706

1708-
for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) {
1709-
pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx);
1710-
pool = &pf->qset.pool[pool_id];
1711-
page_pool_destroy(pool->page_pool);
1712-
pool->page_pool = NULL;
1713-
}
1714-
17151707
otx2_free_cq_res(pf);
17161708

17171709
/* Free all ingress bandwidth profiles allocated */
@@ -2788,6 +2780,8 @@ static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
27882780
switch (xdp->command) {
27892781
case XDP_SETUP_PROG:
27902782
return otx2_xdp_setup(pf, xdp->prog);
2783+
case XDP_SETUP_XSK_POOL:
2784+
return otx2_xsk_pool_setup(pf, xdp->xsk.pool, xdp->xsk.queue_id);
27912785
default:
27922786
return -EINVAL;
27932787
}
@@ -2865,6 +2859,7 @@ static const struct net_device_ops otx2_netdev_ops = {
28652859
.ndo_set_vf_vlan = otx2_set_vf_vlan,
28662860
.ndo_get_vf_config = otx2_get_vf_config,
28672861
.ndo_bpf = otx2_xdp,
2862+
.ndo_xsk_wakeup = otx2_xsk_wakeup,
28682863
.ndo_xdp_xmit = otx2_xdp_xmit,
28692864
.ndo_setup_tc = otx2_setup_tc,
28702865
.ndo_set_vf_trust = otx2_ndo_set_vf_trust,
@@ -3203,16 +3198,26 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
32033198
/* Enable link notifications */
32043199
otx2_cgx_config_linkevents(pf, true);
32053200

3201+
pf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL);
3202+
if (!pf->af_xdp_zc_qidx) {
3203+
err = -ENOMEM;
3204+
goto err_sriov_cleannup;
3205+
}
3206+
32063207
#ifdef CONFIG_DCB
32073208
err = otx2_dcbnl_set_ops(netdev);
32083209
if (err)
3209-
goto err_pf_sriov_init;
3210+
goto err_free_zc_bmap;
32103211
#endif
32113212

32123213
otx2_qos_init(pf, qos_txqs);
32133214

32143215
return 0;
32153216

3217+
err_free_zc_bmap:
3218+
bitmap_free(pf->af_xdp_zc_qidx);
3219+
err_sriov_cleannup:
3220+
otx2_sriov_vfcfg_cleanup(pf);
32163221
err_pf_sriov_init:
32173222
otx2_shutdown_tc(pf);
32183223
err_mcam_flow_del:

0 commit comments

Comments
 (0)