@@ -47,160 +47,41 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
47
47
spin_unlock_irqrestore (& umem -> xsk_tx_list_lock , flags );
48
48
}
49
49
50
- /* The umem is stored both in the _rx struct and the _tx struct as we do
51
- * not know if the device has more tx queues than rx, or the opposite.
52
- * This might also change during run time.
53
- */
54
- static int xsk_reg_pool_at_qid (struct net_device * dev ,
55
- struct xsk_buff_pool * pool ,
56
- u16 queue_id )
57
- {
58
- if (queue_id >= max_t (unsigned int ,
59
- dev -> real_num_rx_queues ,
60
- dev -> real_num_tx_queues ))
61
- return - EINVAL ;
62
-
63
- if (queue_id < dev -> real_num_rx_queues )
64
- dev -> _rx [queue_id ].pool = pool ;
65
- if (queue_id < dev -> real_num_tx_queues )
66
- dev -> _tx [queue_id ].pool = pool ;
67
-
68
- return 0 ;
69
- }
70
-
71
- struct xsk_buff_pool * xsk_get_pool_from_qid (struct net_device * dev ,
72
- u16 queue_id )
50
+ static void xdp_umem_unpin_pages (struct xdp_umem * umem )
73
51
{
74
- if (queue_id < dev -> real_num_rx_queues )
75
- return dev -> _rx [queue_id ].pool ;
76
- if (queue_id < dev -> real_num_tx_queues )
77
- return dev -> _tx [queue_id ].pool ;
52
+ unpin_user_pages_dirty_lock (umem -> pgs , umem -> npgs , true);
78
53
79
- return NULL ;
54
+ kfree (umem -> pgs );
55
+ umem -> pgs = NULL ;
80
56
}
81
- EXPORT_SYMBOL (xsk_get_pool_from_qid );
82
57
83
- static void xsk_clear_pool_at_qid (struct net_device * dev , u16 queue_id )
58
+ static void xdp_umem_unaccount_pages (struct xdp_umem * umem )
84
59
{
85
- if (queue_id < dev -> real_num_rx_queues )
86
- dev -> _rx [ queue_id ]. pool = NULL ;
87
- if ( queue_id < dev -> real_num_tx_queues )
88
- dev -> _tx [ queue_id ]. pool = NULL ;
60
+ if (umem -> user ) {
61
+ atomic_long_sub ( umem -> npgs , & umem -> user -> locked_vm ) ;
62
+ free_uid ( umem -> user );
63
+ }
89
64
}
90
65
91
- int xdp_umem_assign_dev (struct xdp_umem * umem , struct net_device * dev ,
92
- u16 queue_id , u16 flags )
66
+ void xdp_umem_assign_dev (struct xdp_umem * umem , struct net_device * dev ,
67
+ u16 queue_id )
93
68
{
94
- bool force_zc , force_copy ;
95
- struct netdev_bpf bpf ;
96
- int err = 0 ;
97
-
98
- ASSERT_RTNL ();
99
-
100
- force_zc = flags & XDP_ZEROCOPY ;
101
- force_copy = flags & XDP_COPY ;
102
-
103
- if (force_zc && force_copy )
104
- return - EINVAL ;
105
-
106
- if (xsk_get_pool_from_qid (dev , queue_id ))
107
- return - EBUSY ;
108
-
109
- err = xsk_reg_pool_at_qid (dev , umem -> pool , queue_id );
110
- if (err )
111
- return err ;
112
-
113
69
umem -> dev = dev ;
114
70
umem -> queue_id = queue_id ;
115
71
116
- if (flags & XDP_USE_NEED_WAKEUP ) {
117
- umem -> flags |= XDP_UMEM_USES_NEED_WAKEUP ;
118
- /* Tx needs to be explicitly woken up the first time.
119
- * Also for supporting drivers that do not implement this
120
- * feature. They will always have to call sendto().
121
- */
122
- xsk_set_tx_need_wakeup (umem -> pool );
123
- }
124
-
125
72
dev_hold (dev );
126
-
127
- if (force_copy )
128
- /* For copy-mode, we are done. */
129
- return 0 ;
130
-
131
- if (!dev -> netdev_ops -> ndo_bpf || !dev -> netdev_ops -> ndo_xsk_wakeup ) {
132
- err = - EOPNOTSUPP ;
133
- goto err_unreg_umem ;
134
- }
135
-
136
- bpf .command = XDP_SETUP_XSK_POOL ;
137
- bpf .xsk .pool = umem -> pool ;
138
- bpf .xsk .queue_id = queue_id ;
139
-
140
- err = dev -> netdev_ops -> ndo_bpf (dev , & bpf );
141
- if (err )
142
- goto err_unreg_umem ;
143
-
144
- umem -> zc = true;
145
- return 0 ;
146
-
147
- err_unreg_umem :
148
- if (!force_zc )
149
- err = 0 ; /* fallback to copy mode */
150
- if (err )
151
- xsk_clear_pool_at_qid (dev , queue_id );
152
- return err ;
153
73
}
154
74
155
75
void xdp_umem_clear_dev (struct xdp_umem * umem )
156
76
{
157
- struct netdev_bpf bpf ;
158
- int err ;
159
-
160
- ASSERT_RTNL ();
161
-
162
- if (!umem -> dev )
163
- return ;
164
-
165
- if (umem -> zc ) {
166
- bpf .command = XDP_SETUP_XSK_POOL ;
167
- bpf .xsk .pool = NULL ;
168
- bpf .xsk .queue_id = umem -> queue_id ;
169
-
170
- err = umem -> dev -> netdev_ops -> ndo_bpf (umem -> dev , & bpf );
171
-
172
- if (err )
173
- WARN (1 , "failed to disable umem!\n" );
174
- }
175
-
176
- xsk_clear_pool_at_qid (umem -> dev , umem -> queue_id );
177
-
178
77
dev_put (umem -> dev );
179
78
umem -> dev = NULL ;
180
79
umem -> zc = false;
181
80
}
182
81
183
- static void xdp_umem_unpin_pages (struct xdp_umem * umem )
184
- {
185
- unpin_user_pages_dirty_lock (umem -> pgs , umem -> npgs , true);
186
-
187
- kfree (umem -> pgs );
188
- umem -> pgs = NULL ;
189
- }
190
-
191
- static void xdp_umem_unaccount_pages (struct xdp_umem * umem )
192
- {
193
- if (umem -> user ) {
194
- atomic_long_sub (umem -> npgs , & umem -> user -> locked_vm );
195
- free_uid (umem -> user );
196
- }
197
- }
198
-
199
82
static void xdp_umem_release (struct xdp_umem * umem )
200
83
{
201
- rtnl_lock ();
202
84
xdp_umem_clear_dev (umem );
203
- rtnl_unlock ();
204
85
205
86
ida_simple_remove (& umem_ida , umem -> id );
206
87
@@ -214,20 +95,12 @@ static void xdp_umem_release(struct xdp_umem *umem)
214
95
umem -> cq = NULL ;
215
96
}
216
97
217
- xp_destroy (umem -> pool );
218
98
xdp_umem_unpin_pages (umem );
219
99
220
100
xdp_umem_unaccount_pages (umem );
221
101
kfree (umem );
222
102
}
223
103
224
- static void xdp_umem_release_deferred (struct work_struct * work )
225
- {
226
- struct xdp_umem * umem = container_of (work , struct xdp_umem , work );
227
-
228
- xdp_umem_release (umem );
229
- }
230
-
231
104
void xdp_get_umem (struct xdp_umem * umem )
232
105
{
233
106
refcount_inc (& umem -> users );
@@ -238,10 +111,8 @@ void xdp_put_umem(struct xdp_umem *umem)
238
111
if (!umem )
239
112
return ;
240
113
241
- if (refcount_dec_and_test (& umem -> users )) {
242
- INIT_WORK (& umem -> work , xdp_umem_release_deferred );
243
- schedule_work (& umem -> work );
244
- }
114
+ if (refcount_dec_and_test (& umem -> users ))
115
+ xdp_umem_release (umem );
245
116
}
246
117
247
118
static int xdp_umem_pin_pages (struct xdp_umem * umem , unsigned long address )
@@ -357,6 +228,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
357
228
umem -> size = size ;
358
229
umem -> headroom = headroom ;
359
230
umem -> chunk_size = chunk_size ;
231
+ umem -> chunks = chunks ;
360
232
umem -> npgs = (u32 )npgs ;
361
233
umem -> pgs = NULL ;
362
234
umem -> user = NULL ;
@@ -374,16 +246,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
374
246
if (err )
375
247
goto out_account ;
376
248
377
- umem -> pool = xp_create (umem , chunks , chunk_size , headroom , size ,
378
- unaligned_chunks );
379
- if (!umem -> pool ) {
380
- err = - ENOMEM ;
381
- goto out_pin ;
382
- }
383
249
return 0 ;
384
250
385
- out_pin :
386
- xdp_umem_unpin_pages (umem );
387
251
out_account :
388
252
xdp_umem_unaccount_pages (umem );
389
253
return err ;
0 commit comments