Skip to content

Commit 1c1efc2

Browse files
magnus-karlssonborkmann
authored andcommitted
xsk: Create and free buffer pool independently from umem
Create and free the buffer pool independently from the umem. Move these operations that are performed on the buffer pool from the umem create and destroy functions to new create and destroy functions just for the buffer pool. This so that in later commits we can instantiate multiple buffer pools per umem when sharing a umem between HW queues and/or devices. We also erradicate the back pointer from the umem to the buffer pool as this will not work when we introduce the possibility to have multiple buffer pools per umem. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Björn Töpel <bjorn.topel@intel.com> Link: https://lore.kernel.org/bpf/1598603189-32145-4-git-send-email-magnus.karlsson@intel.com
1 parent c465576 commit 1c1efc2

File tree

8 files changed

+236
-187
lines changed

8 files changed

+236
-187
lines changed

include/net/xdp_sock.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,12 @@ struct xdp_buff;
2020
struct xdp_umem {
2121
struct xsk_queue *fq;
2222
struct xsk_queue *cq;
23-
struct xsk_buff_pool *pool;
2423
u64 size;
2524
u32 headroom;
2625
u32 chunk_size;
26+
u32 chunks;
2727
struct user_struct *user;
2828
refcount_t users;
29-
struct work_struct work;
3029
struct page **pgs;
3130
u32 npgs;
3231
u16 queue_id;

include/net/xsk_buff_pool.h

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ struct xdp_rxq_info;
1414
struct xsk_queue;
1515
struct xdp_desc;
1616
struct xdp_umem;
17+
struct xdp_sock;
1718
struct device;
1819
struct page;
1920

@@ -46,16 +47,22 @@ struct xsk_buff_pool {
4647
struct xdp_umem *umem;
4748
void *addrs;
4849
struct device *dev;
50+
refcount_t users;
51+
struct work_struct work;
4952
struct xdp_buff_xsk *free_heads[];
5053
};
5154

5255
/* AF_XDP core. */
53-
struct xsk_buff_pool *xp_create(struct xdp_umem *umem, u32 chunks,
54-
u32 chunk_size, u32 headroom, u64 size,
55-
bool unaligned);
56+
struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
57+
struct xdp_umem *umem);
58+
int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
59+
u16 queue_id, u16 flags);
5660
void xp_set_fq(struct xsk_buff_pool *pool, struct xsk_queue *fq);
5761
void xp_destroy(struct xsk_buff_pool *pool);
5862
void xp_release(struct xdp_buff_xsk *xskb);
63+
void xp_get_pool(struct xsk_buff_pool *pool);
64+
void xp_put_pool(struct xsk_buff_pool *pool);
65+
void xp_clear_dev(struct xsk_buff_pool *pool);
5966

6067
/* AF_XDP, and XDP core. */
6168
void xp_free(struct xdp_buff_xsk *xskb);

net/xdp/xdp_umem.c

Lines changed: 14 additions & 150 deletions
Original file line numberDiff line numberDiff line change
@@ -47,160 +47,41 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
4747
spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags);
4848
}
4949

50-
/* The umem is stored both in the _rx struct and the _tx struct as we do
51-
* not know if the device has more tx queues than rx, or the opposite.
52-
* This might also change during run time.
53-
*/
54-
static int xsk_reg_pool_at_qid(struct net_device *dev,
55-
struct xsk_buff_pool *pool,
56-
u16 queue_id)
57-
{
58-
if (queue_id >= max_t(unsigned int,
59-
dev->real_num_rx_queues,
60-
dev->real_num_tx_queues))
61-
return -EINVAL;
62-
63-
if (queue_id < dev->real_num_rx_queues)
64-
dev->_rx[queue_id].pool = pool;
65-
if (queue_id < dev->real_num_tx_queues)
66-
dev->_tx[queue_id].pool = pool;
67-
68-
return 0;
69-
}
70-
71-
struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
72-
u16 queue_id)
50+
static void xdp_umem_unpin_pages(struct xdp_umem *umem)
7351
{
74-
if (queue_id < dev->real_num_rx_queues)
75-
return dev->_rx[queue_id].pool;
76-
if (queue_id < dev->real_num_tx_queues)
77-
return dev->_tx[queue_id].pool;
52+
unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
7853

79-
return NULL;
54+
kfree(umem->pgs);
55+
umem->pgs = NULL;
8056
}
81-
EXPORT_SYMBOL(xsk_get_pool_from_qid);
8257

83-
static void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
58+
static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
8459
{
85-
if (queue_id < dev->real_num_rx_queues)
86-
dev->_rx[queue_id].pool = NULL;
87-
if (queue_id < dev->real_num_tx_queues)
88-
dev->_tx[queue_id].pool = NULL;
60+
if (umem->user) {
61+
atomic_long_sub(umem->npgs, &umem->user->locked_vm);
62+
free_uid(umem->user);
63+
}
8964
}
9065

91-
int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
92-
u16 queue_id, u16 flags)
66+
void xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
67+
u16 queue_id)
9368
{
94-
bool force_zc, force_copy;
95-
struct netdev_bpf bpf;
96-
int err = 0;
97-
98-
ASSERT_RTNL();
99-
100-
force_zc = flags & XDP_ZEROCOPY;
101-
force_copy = flags & XDP_COPY;
102-
103-
if (force_zc && force_copy)
104-
return -EINVAL;
105-
106-
if (xsk_get_pool_from_qid(dev, queue_id))
107-
return -EBUSY;
108-
109-
err = xsk_reg_pool_at_qid(dev, umem->pool, queue_id);
110-
if (err)
111-
return err;
112-
11369
umem->dev = dev;
11470
umem->queue_id = queue_id;
11571

116-
if (flags & XDP_USE_NEED_WAKEUP) {
117-
umem->flags |= XDP_UMEM_USES_NEED_WAKEUP;
118-
/* Tx needs to be explicitly woken up the first time.
119-
* Also for supporting drivers that do not implement this
120-
* feature. They will always have to call sendto().
121-
*/
122-
xsk_set_tx_need_wakeup(umem->pool);
123-
}
124-
12572
dev_hold(dev);
126-
127-
if (force_copy)
128-
/* For copy-mode, we are done. */
129-
return 0;
130-
131-
if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_wakeup) {
132-
err = -EOPNOTSUPP;
133-
goto err_unreg_umem;
134-
}
135-
136-
bpf.command = XDP_SETUP_XSK_POOL;
137-
bpf.xsk.pool = umem->pool;
138-
bpf.xsk.queue_id = queue_id;
139-
140-
err = dev->netdev_ops->ndo_bpf(dev, &bpf);
141-
if (err)
142-
goto err_unreg_umem;
143-
144-
umem->zc = true;
145-
return 0;
146-
147-
err_unreg_umem:
148-
if (!force_zc)
149-
err = 0; /* fallback to copy mode */
150-
if (err)
151-
xsk_clear_pool_at_qid(dev, queue_id);
152-
return err;
15373
}
15474

15575
void xdp_umem_clear_dev(struct xdp_umem *umem)
15676
{
157-
struct netdev_bpf bpf;
158-
int err;
159-
160-
ASSERT_RTNL();
161-
162-
if (!umem->dev)
163-
return;
164-
165-
if (umem->zc) {
166-
bpf.command = XDP_SETUP_XSK_POOL;
167-
bpf.xsk.pool = NULL;
168-
bpf.xsk.queue_id = umem->queue_id;
169-
170-
err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
171-
172-
if (err)
173-
WARN(1, "failed to disable umem!\n");
174-
}
175-
176-
xsk_clear_pool_at_qid(umem->dev, umem->queue_id);
177-
17877
dev_put(umem->dev);
17978
umem->dev = NULL;
18079
umem->zc = false;
18180
}
18281

183-
static void xdp_umem_unpin_pages(struct xdp_umem *umem)
184-
{
185-
unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
186-
187-
kfree(umem->pgs);
188-
umem->pgs = NULL;
189-
}
190-
191-
static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
192-
{
193-
if (umem->user) {
194-
atomic_long_sub(umem->npgs, &umem->user->locked_vm);
195-
free_uid(umem->user);
196-
}
197-
}
198-
19982
static void xdp_umem_release(struct xdp_umem *umem)
20083
{
201-
rtnl_lock();
20284
xdp_umem_clear_dev(umem);
203-
rtnl_unlock();
20485

20586
ida_simple_remove(&umem_ida, umem->id);
20687

@@ -214,20 +95,12 @@ static void xdp_umem_release(struct xdp_umem *umem)
21495
umem->cq = NULL;
21596
}
21697

217-
xp_destroy(umem->pool);
21898
xdp_umem_unpin_pages(umem);
21999

220100
xdp_umem_unaccount_pages(umem);
221101
kfree(umem);
222102
}
223103

224-
static void xdp_umem_release_deferred(struct work_struct *work)
225-
{
226-
struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
227-
228-
xdp_umem_release(umem);
229-
}
230-
231104
void xdp_get_umem(struct xdp_umem *umem)
232105
{
233106
refcount_inc(&umem->users);
@@ -238,10 +111,8 @@ void xdp_put_umem(struct xdp_umem *umem)
238111
if (!umem)
239112
return;
240113

241-
if (refcount_dec_and_test(&umem->users)) {
242-
INIT_WORK(&umem->work, xdp_umem_release_deferred);
243-
schedule_work(&umem->work);
244-
}
114+
if (refcount_dec_and_test(&umem->users))
115+
xdp_umem_release(umem);
245116
}
246117

247118
static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
@@ -357,6 +228,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
357228
umem->size = size;
358229
umem->headroom = headroom;
359230
umem->chunk_size = chunk_size;
231+
umem->chunks = chunks;
360232
umem->npgs = (u32)npgs;
361233
umem->pgs = NULL;
362234
umem->user = NULL;
@@ -374,16 +246,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
374246
if (err)
375247
goto out_account;
376248

377-
umem->pool = xp_create(umem, chunks, chunk_size, headroom, size,
378-
unaligned_chunks);
379-
if (!umem->pool) {
380-
err = -ENOMEM;
381-
goto out_pin;
382-
}
383249
return 0;
384250

385-
out_pin:
386-
xdp_umem_unpin_pages(umem);
387251
out_account:
388252
xdp_umem_unaccount_pages(umem);
389253
return err;

net/xdp/xdp_umem.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@
88

99
#include <net/xdp_sock_drv.h>
1010

11-
int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
12-
u16 queue_id, u16 flags);
11+
void xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
12+
u16 queue_id);
1313
void xdp_umem_clear_dev(struct xdp_umem *umem);
1414
bool xdp_umem_validate_queues(struct xdp_umem *umem);
1515
void xdp_get_umem(struct xdp_umem *umem);

0 commit comments

Comments
 (0)