Skip to content

Commit 687125b

Browse files
committed
devlink: split out core code
Move core code into a separate file. It's spread around the main file which makes refactoring and figuring out how devlink works harder. Move the xarray, all the most core devlink instance code out like locking, ref counting, alloc, register, etc. Leave port stuff in leftover.c, if we want to move port code it'd probably be to its own file. Reviewed-by: Jacob Keller <jacob.e.keller@intel.com> Reviewed-by: Jiri Pirko <jiri@nvidia.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent e50ef40 commit 687125b

File tree

4 files changed

+476
-434
lines changed

4 files changed

+476
-434
lines changed

net/devlink/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
# SPDX-License-Identifier: GPL-2.0
22

3-
obj-y := leftover.o
3+
obj-y := leftover.o core.o

net/devlink/core.c

Lines changed: 347 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,347 @@
1+
// SPDX-License-Identifier: GPL-2.0-or-later
2+
/*
3+
* Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4+
* Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5+
*/
6+
7+
#include <net/genetlink.h>
8+
9+
#include "devl_internal.h"
10+
11+
DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
12+
13+
void *devlink_priv(struct devlink *devlink)
14+
{
15+
return &devlink->priv;
16+
}
17+
EXPORT_SYMBOL_GPL(devlink_priv);
18+
19+
struct devlink *priv_to_devlink(void *priv)
20+
{
21+
return container_of(priv, struct devlink, priv);
22+
}
23+
EXPORT_SYMBOL_GPL(priv_to_devlink);
24+
25+
struct device *devlink_to_dev(const struct devlink *devlink)
26+
{
27+
return devlink->dev;
28+
}
29+
EXPORT_SYMBOL_GPL(devlink_to_dev);
30+
31+
struct net *devlink_net(const struct devlink *devlink)
32+
{
33+
return read_pnet(&devlink->_net);
34+
}
35+
EXPORT_SYMBOL_GPL(devlink_net);
36+
37+
void devl_assert_locked(struct devlink *devlink)
38+
{
39+
lockdep_assert_held(&devlink->lock);
40+
}
41+
EXPORT_SYMBOL_GPL(devl_assert_locked);
42+
43+
#ifdef CONFIG_LOCKDEP
44+
/* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */
45+
bool devl_lock_is_held(struct devlink *devlink)
46+
{
47+
return lockdep_is_held(&devlink->lock);
48+
}
49+
EXPORT_SYMBOL_GPL(devl_lock_is_held);
50+
#endif
51+
52+
void devl_lock(struct devlink *devlink)
53+
{
54+
mutex_lock(&devlink->lock);
55+
}
56+
EXPORT_SYMBOL_GPL(devl_lock);
57+
58+
int devl_trylock(struct devlink *devlink)
59+
{
60+
return mutex_trylock(&devlink->lock);
61+
}
62+
EXPORT_SYMBOL_GPL(devl_trylock);
63+
64+
void devl_unlock(struct devlink *devlink)
65+
{
66+
mutex_unlock(&devlink->lock);
67+
}
68+
EXPORT_SYMBOL_GPL(devl_unlock);
69+
70+
struct devlink *__must_check devlink_try_get(struct devlink *devlink)
71+
{
72+
if (refcount_inc_not_zero(&devlink->refcount))
73+
return devlink;
74+
return NULL;
75+
}
76+
77+
static void __devlink_put_rcu(struct rcu_head *head)
78+
{
79+
struct devlink *devlink = container_of(head, struct devlink, rcu);
80+
81+
complete(&devlink->comp);
82+
}
83+
84+
void devlink_put(struct devlink *devlink)
85+
{
86+
if (refcount_dec_and_test(&devlink->refcount))
87+
/* Make sure unregister operation that may await the completion
88+
* is unblocked only after all users are after the end of
89+
* RCU grace period.
90+
*/
91+
call_rcu(&devlink->rcu, __devlink_put_rcu);
92+
}
93+
94+
static struct devlink *
95+
devlinks_xa_find_get(struct net *net, unsigned long *indexp, xa_mark_t filter,
96+
void * (*xa_find_fn)(struct xarray *, unsigned long *,
97+
unsigned long, xa_mark_t))
98+
{
99+
struct devlink *devlink;
100+
101+
rcu_read_lock();
102+
retry:
103+
devlink = xa_find_fn(&devlinks, indexp, ULONG_MAX, DEVLINK_REGISTERED);
104+
if (!devlink)
105+
goto unlock;
106+
107+
/* In case devlink_unregister() was already called and "unregistering"
108+
* mark was set, do not allow to get a devlink reference here.
109+
* This prevents live-lock of devlink_unregister() wait for completion.
110+
*/
111+
if (xa_get_mark(&devlinks, *indexp, DEVLINK_UNREGISTERING))
112+
goto retry;
113+
114+
/* For a possible retry, the xa_find_after() should be always used */
115+
xa_find_fn = xa_find_after;
116+
if (!devlink_try_get(devlink))
117+
goto retry;
118+
if (!net_eq(devlink_net(devlink), net)) {
119+
devlink_put(devlink);
120+
goto retry;
121+
}
122+
unlock:
123+
rcu_read_unlock();
124+
return devlink;
125+
}
126+
127+
struct devlink *
128+
devlinks_xa_find_get_first(struct net *net, unsigned long *indexp,
129+
xa_mark_t filter)
130+
{
131+
return devlinks_xa_find_get(net, indexp, filter, xa_find);
132+
}
133+
134+
struct devlink *
135+
devlinks_xa_find_get_next(struct net *net, unsigned long *indexp,
136+
xa_mark_t filter)
137+
{
138+
return devlinks_xa_find_get(net, indexp, filter, xa_find_after);
139+
}
140+
141+
/**
142+
* devlink_set_features - Set devlink supported features
143+
*
144+
* @devlink: devlink
145+
* @features: devlink support features
146+
*
147+
* This interface allows us to set reload ops separatelly from
148+
* the devlink_alloc.
149+
*/
150+
void devlink_set_features(struct devlink *devlink, u64 features)
151+
{
152+
ASSERT_DEVLINK_NOT_REGISTERED(devlink);
153+
154+
WARN_ON(features & DEVLINK_F_RELOAD &&
155+
!devlink_reload_supported(devlink->ops));
156+
devlink->features = features;
157+
}
158+
EXPORT_SYMBOL_GPL(devlink_set_features);
159+
160+
/**
161+
* devlink_register - Register devlink instance
162+
*
163+
* @devlink: devlink
164+
*/
165+
void devlink_register(struct devlink *devlink)
166+
{
167+
ASSERT_DEVLINK_NOT_REGISTERED(devlink);
168+
/* Make sure that we are in .probe() routine */
169+
170+
xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
171+
devlink_notify_register(devlink);
172+
}
173+
EXPORT_SYMBOL_GPL(devlink_register);
174+
175+
/**
176+
* devlink_unregister - Unregister devlink instance
177+
*
178+
* @devlink: devlink
179+
*/
180+
void devlink_unregister(struct devlink *devlink)
181+
{
182+
ASSERT_DEVLINK_REGISTERED(devlink);
183+
/* Make sure that we are in .remove() routine */
184+
185+
xa_set_mark(&devlinks, devlink->index, DEVLINK_UNREGISTERING);
186+
devlink_put(devlink);
187+
wait_for_completion(&devlink->comp);
188+
189+
devlink_notify_unregister(devlink);
190+
xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
191+
xa_clear_mark(&devlinks, devlink->index, DEVLINK_UNREGISTERING);
192+
}
193+
EXPORT_SYMBOL_GPL(devlink_unregister);
194+
195+
/**
196+
* devlink_alloc_ns - Allocate new devlink instance resources
197+
* in specific namespace
198+
*
199+
* @ops: ops
200+
* @priv_size: size of user private data
201+
* @net: net namespace
202+
* @dev: parent device
203+
*
204+
* Allocate new devlink instance resources, including devlink index
205+
* and name.
206+
*/
207+
struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
208+
size_t priv_size, struct net *net,
209+
struct device *dev)
210+
{
211+
struct devlink *devlink;
212+
static u32 last_id;
213+
int ret;
214+
215+
WARN_ON(!ops || !dev);
216+
if (!devlink_reload_actions_valid(ops))
217+
return NULL;
218+
219+
devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
220+
if (!devlink)
221+
return NULL;
222+
223+
ret = xa_alloc_cyclic(&devlinks, &devlink->index, devlink, xa_limit_31b,
224+
&last_id, GFP_KERNEL);
225+
if (ret < 0)
226+
goto err_xa_alloc;
227+
228+
devlink->netdevice_nb.notifier_call = devlink_port_netdevice_event;
229+
ret = register_netdevice_notifier_net(net, &devlink->netdevice_nb);
230+
if (ret)
231+
goto err_register_netdevice_notifier;
232+
233+
devlink->dev = dev;
234+
devlink->ops = ops;
235+
xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC);
236+
xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC);
237+
write_pnet(&devlink->_net, net);
238+
INIT_LIST_HEAD(&devlink->rate_list);
239+
INIT_LIST_HEAD(&devlink->linecard_list);
240+
INIT_LIST_HEAD(&devlink->sb_list);
241+
INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
242+
INIT_LIST_HEAD(&devlink->resource_list);
243+
INIT_LIST_HEAD(&devlink->param_list);
244+
INIT_LIST_HEAD(&devlink->region_list);
245+
INIT_LIST_HEAD(&devlink->reporter_list);
246+
INIT_LIST_HEAD(&devlink->trap_list);
247+
INIT_LIST_HEAD(&devlink->trap_group_list);
248+
INIT_LIST_HEAD(&devlink->trap_policer_list);
249+
lockdep_register_key(&devlink->lock_key);
250+
mutex_init(&devlink->lock);
251+
lockdep_set_class(&devlink->lock, &devlink->lock_key);
252+
mutex_init(&devlink->reporters_lock);
253+
mutex_init(&devlink->linecards_lock);
254+
refcount_set(&devlink->refcount, 1);
255+
init_completion(&devlink->comp);
256+
257+
return devlink;
258+
259+
err_register_netdevice_notifier:
260+
xa_erase(&devlinks, devlink->index);
261+
err_xa_alloc:
262+
kfree(devlink);
263+
return NULL;
264+
}
265+
EXPORT_SYMBOL_GPL(devlink_alloc_ns);
266+
267+
/**
268+
* devlink_free - Free devlink instance resources
269+
*
270+
* @devlink: devlink
271+
*/
272+
void devlink_free(struct devlink *devlink)
273+
{
274+
ASSERT_DEVLINK_NOT_REGISTERED(devlink);
275+
276+
mutex_destroy(&devlink->linecards_lock);
277+
mutex_destroy(&devlink->reporters_lock);
278+
mutex_destroy(&devlink->lock);
279+
lockdep_unregister_key(&devlink->lock_key);
280+
WARN_ON(!list_empty(&devlink->trap_policer_list));
281+
WARN_ON(!list_empty(&devlink->trap_group_list));
282+
WARN_ON(!list_empty(&devlink->trap_list));
283+
WARN_ON(!list_empty(&devlink->reporter_list));
284+
WARN_ON(!list_empty(&devlink->region_list));
285+
WARN_ON(!list_empty(&devlink->param_list));
286+
WARN_ON(!list_empty(&devlink->resource_list));
287+
WARN_ON(!list_empty(&devlink->dpipe_table_list));
288+
WARN_ON(!list_empty(&devlink->sb_list));
289+
WARN_ON(!list_empty(&devlink->rate_list));
290+
WARN_ON(!list_empty(&devlink->linecard_list));
291+
WARN_ON(!xa_empty(&devlink->ports));
292+
293+
xa_destroy(&devlink->snapshot_ids);
294+
xa_destroy(&devlink->ports);
295+
296+
WARN_ON_ONCE(unregister_netdevice_notifier_net(devlink_net(devlink),
297+
&devlink->netdevice_nb));
298+
299+
xa_erase(&devlinks, devlink->index);
300+
301+
kfree(devlink);
302+
}
303+
EXPORT_SYMBOL_GPL(devlink_free);
304+
305+
static void __net_exit devlink_pernet_pre_exit(struct net *net)
306+
{
307+
struct devlink *devlink;
308+
u32 actions_performed;
309+
unsigned long index;
310+
int err;
311+
312+
/* In case network namespace is getting destroyed, reload
313+
* all devlink instances from this namespace into init_net.
314+
*/
315+
devlinks_xa_for_each_registered_get(net, index, devlink) {
316+
WARN_ON(!(devlink->features & DEVLINK_F_RELOAD));
317+
mutex_lock(&devlink->lock);
318+
err = devlink_reload(devlink, &init_net,
319+
DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
320+
DEVLINK_RELOAD_LIMIT_UNSPEC,
321+
&actions_performed, NULL);
322+
mutex_unlock(&devlink->lock);
323+
if (err && err != -EOPNOTSUPP)
324+
pr_warn("Failed to reload devlink instance into init_net\n");
325+
devlink_put(devlink);
326+
}
327+
}
328+
329+
static struct pernet_operations devlink_pernet_ops __net_initdata = {
330+
.pre_exit = devlink_pernet_pre_exit,
331+
};
332+
333+
static int __init devlink_init(void)
334+
{
335+
int err;
336+
337+
err = genl_register_family(&devlink_nl_family);
338+
if (err)
339+
goto out;
340+
err = register_pernet_subsys(&devlink_pernet_ops);
341+
342+
out:
343+
WARN_ON(err);
344+
return err;
345+
}
346+
347+
subsys_initcall(devlink_init);

0 commit comments

Comments
 (0)