Skip to content

Commit

Permalink
net: remove indirect block netdev event registration
Browse files Browse the repository at this point in the history
Drivers do not register to netdev events to set up indirect blocks
anymore. Remove __flow_indr_block_cb_register() and
__flow_indr_block_cb_unregister().

The frontends set up the callbacks through flow_indr_dev_setup_block()

Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
ummakynes authored and davem330 committed Jun 1, 2020
1 parent e445e30 commit 709ffbe
Show file tree
Hide file tree
Showing 5 changed files with 1 addition and 444 deletions.
9 changes: 0 additions & 9 deletions include/net/flow_offload.h
Original file line number Diff line number Diff line change
Expand Up @@ -546,15 +546,6 @@ typedef void flow_indr_block_cmd_t(struct net_device *dev,
flow_indr_block_bind_cb_t *cb, void *cb_priv,
enum flow_block_command command);

struct flow_indr_block_entry {
flow_indr_block_cmd_t *cb;
struct list_head list;
};

void flow_indr_add_block_cb(struct flow_indr_block_entry *entry);

void flow_indr_del_block_cb(struct flow_indr_block_entry *entry);

int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
flow_indr_block_bind_cb_t *cb,
void *cb_ident);
Expand Down
238 changes: 0 additions & 238 deletions net/core/flow_offload.c
Original file line number Diff line number Diff line change
Expand Up @@ -473,241 +473,3 @@ int flow_indr_dev_setup_offload(struct net_device *dev,
return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
}
EXPORT_SYMBOL(flow_indr_dev_setup_offload);

static LIST_HEAD(block_cb_list);

static struct rhashtable indr_setup_block_ht;

struct flow_indr_block_cb {
struct list_head list;
void *cb_priv;
flow_indr_block_bind_cb_t *cb;
void *cb_ident;
};

struct flow_indr_block_dev {
struct rhash_head ht_node;
struct net_device *dev;
unsigned int refcnt;
struct list_head cb_list;
};

static const struct rhashtable_params flow_indr_setup_block_ht_params = {
.key_offset = offsetof(struct flow_indr_block_dev, dev),
.head_offset = offsetof(struct flow_indr_block_dev, ht_node),
.key_len = sizeof(struct net_device *),
};

static struct flow_indr_block_dev *
flow_indr_block_dev_lookup(struct net_device *dev)
{
return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
flow_indr_setup_block_ht_params);
}

static struct flow_indr_block_dev *
flow_indr_block_dev_get(struct net_device *dev)
{
struct flow_indr_block_dev *indr_dev;

indr_dev = flow_indr_block_dev_lookup(dev);
if (indr_dev)
goto inc_ref;

indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
if (!indr_dev)
return NULL;

INIT_LIST_HEAD(&indr_dev->cb_list);
indr_dev->dev = dev;
if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
flow_indr_setup_block_ht_params)) {
kfree(indr_dev);
return NULL;
}

inc_ref:
indr_dev->refcnt++;
return indr_dev;
}

static void flow_indr_block_dev_put(struct flow_indr_block_dev *indr_dev)
{
if (--indr_dev->refcnt)
return;

rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
flow_indr_setup_block_ht_params);
kfree(indr_dev);
}

static struct flow_indr_block_cb *
flow_indr_block_cb_lookup(struct flow_indr_block_dev *indr_dev,
flow_indr_block_bind_cb_t *cb, void *cb_ident)
{
struct flow_indr_block_cb *indr_block_cb;

list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
if (indr_block_cb->cb == cb &&
indr_block_cb->cb_ident == cb_ident)
return indr_block_cb;
return NULL;
}

static struct flow_indr_block_cb *
flow_indr_block_cb_add(struct flow_indr_block_dev *indr_dev, void *cb_priv,
flow_indr_block_bind_cb_t *cb, void *cb_ident)
{
struct flow_indr_block_cb *indr_block_cb;

indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident);
if (indr_block_cb)
return ERR_PTR(-EEXIST);

indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
if (!indr_block_cb)
return ERR_PTR(-ENOMEM);

indr_block_cb->cb_priv = cb_priv;
indr_block_cb->cb = cb;
indr_block_cb->cb_ident = cb_ident;
list_add(&indr_block_cb->list, &indr_dev->cb_list);

return indr_block_cb;
}

static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb)
{
list_del(&indr_block_cb->list);
kfree(indr_block_cb);
}

static DEFINE_MUTEX(flow_indr_block_cb_lock);

static void flow_block_cmd(struct net_device *dev,
flow_indr_block_bind_cb_t *cb, void *cb_priv,
enum flow_block_command command)
{
struct flow_indr_block_entry *entry;

mutex_lock(&flow_indr_block_cb_lock);
list_for_each_entry(entry, &block_cb_list, list) {
entry->cb(dev, cb, cb_priv, command);
}
mutex_unlock(&flow_indr_block_cb_lock);
}

int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
flow_indr_block_bind_cb_t *cb,
void *cb_ident)
{
struct flow_indr_block_cb *indr_block_cb;
struct flow_indr_block_dev *indr_dev;
int err;

indr_dev = flow_indr_block_dev_get(dev);
if (!indr_dev)
return -ENOMEM;

indr_block_cb = flow_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
err = PTR_ERR_OR_ZERO(indr_block_cb);
if (err)
goto err_dev_put;

flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
FLOW_BLOCK_BIND);

return 0;

err_dev_put:
flow_indr_block_dev_put(indr_dev);
return err;
}
EXPORT_SYMBOL_GPL(__flow_indr_block_cb_register);

int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
flow_indr_block_bind_cb_t *cb,
void *cb_ident)
{
int err;

rtnl_lock();
err = __flow_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
rtnl_unlock();

return err;
}
EXPORT_SYMBOL_GPL(flow_indr_block_cb_register);

void __flow_indr_block_cb_unregister(struct net_device *dev,
flow_indr_block_bind_cb_t *cb,
void *cb_ident)
{
struct flow_indr_block_cb *indr_block_cb;
struct flow_indr_block_dev *indr_dev;

indr_dev = flow_indr_block_dev_lookup(dev);
if (!indr_dev)
return;

indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident);
if (!indr_block_cb)
return;

flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
FLOW_BLOCK_UNBIND);

flow_indr_block_cb_del(indr_block_cb);
flow_indr_block_dev_put(indr_dev);
}
EXPORT_SYMBOL_GPL(__flow_indr_block_cb_unregister);

void flow_indr_block_cb_unregister(struct net_device *dev,
flow_indr_block_bind_cb_t *cb,
void *cb_ident)
{
rtnl_lock();
__flow_indr_block_cb_unregister(dev, cb, cb_ident);
rtnl_unlock();
}
EXPORT_SYMBOL_GPL(flow_indr_block_cb_unregister);

void flow_indr_block_call(struct net_device *dev,
struct flow_block_offload *bo,
enum flow_block_command command,
enum tc_setup_type type)
{
struct flow_indr_block_cb *indr_block_cb;
struct flow_indr_block_dev *indr_dev;

indr_dev = flow_indr_block_dev_lookup(dev);
if (!indr_dev)
return;

list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
indr_block_cb->cb(dev, indr_block_cb->cb_priv, type, bo);
}
EXPORT_SYMBOL_GPL(flow_indr_block_call);

void flow_indr_add_block_cb(struct flow_indr_block_entry *entry)
{
mutex_lock(&flow_indr_block_cb_lock);
list_add_tail(&entry->list, &block_cb_list);
mutex_unlock(&flow_indr_block_cb_lock);
}
EXPORT_SYMBOL_GPL(flow_indr_add_block_cb);

void flow_indr_del_block_cb(struct flow_indr_block_entry *entry)
{
mutex_lock(&flow_indr_block_cb_lock);
list_del(&entry->list);
mutex_unlock(&flow_indr_block_cb_lock);
}
EXPORT_SYMBOL_GPL(flow_indr_del_block_cb);

static int __init init_flow_indr_rhashtable(void)
{
return rhashtable_init(&indr_setup_block_ht,
&flow_indr_setup_block_ht_params);
}
subsys_initcall(init_flow_indr_rhashtable);
66 changes: 0 additions & 66 deletions net/netfilter/nf_flow_table_offload.c
Original file line number Diff line number Diff line change
Expand Up @@ -1008,83 +1008,17 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
}
EXPORT_SYMBOL_GPL(nf_flow_table_offload_setup);

static void nf_flow_table_indr_block_ing_cmd(struct net_device *dev,
struct nf_flowtable *flowtable,
flow_indr_block_bind_cb_t *cb,
void *cb_priv,
enum flow_block_command cmd)
{
struct netlink_ext_ack extack = {};
struct flow_block_offload bo;

if (!flowtable)
return;

nf_flow_table_block_offload_init(&bo, dev_net(dev), cmd, flowtable,
&extack);

cb(dev, cb_priv, TC_SETUP_FT, &bo);

nf_flow_table_block_setup(flowtable, &bo, cmd);
}

static void nf_flow_table_indr_block_cb_cmd(struct nf_flowtable *flowtable,
struct net_device *dev,
flow_indr_block_bind_cb_t *cb,
void *cb_priv,
enum flow_block_command cmd)
{
if (!(flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD))
return;

nf_flow_table_indr_block_ing_cmd(dev, flowtable, cb, cb_priv, cmd);
}

static void nf_flow_table_indr_block_cb(struct net_device *dev,
flow_indr_block_bind_cb_t *cb,
void *cb_priv,
enum flow_block_command cmd)
{
struct net *net = dev_net(dev);
struct nft_flowtable *nft_ft;
struct nft_table *table;
struct nft_hook *hook;

mutex_lock(&net->nft.commit_mutex);
list_for_each_entry(table, &net->nft.tables, list) {
list_for_each_entry(nft_ft, &table->flowtables, list) {
list_for_each_entry(hook, &nft_ft->hook_list, list) {
if (hook->ops.dev != dev)
continue;

nf_flow_table_indr_block_cb_cmd(&nft_ft->data,
dev, cb,
cb_priv, cmd);
}
}
}
mutex_unlock(&net->nft.commit_mutex);
}

static struct flow_indr_block_entry block_ing_entry = {
.cb = nf_flow_table_indr_block_cb,
.list = LIST_HEAD_INIT(block_ing_entry.list),
};

int nf_flow_table_offload_init(void)
{
nf_flow_offload_wq = alloc_workqueue("nf_flow_table_offload",
WQ_UNBOUND, 0);
if (!nf_flow_offload_wq)
return -ENOMEM;

flow_indr_add_block_cb(&block_ing_entry);

return 0;
}

void nf_flow_table_offload_exit(void)
{
flow_indr_del_block_cb(&block_ing_entry);
destroy_workqueue(nf_flow_offload_wq);
}
Loading

0 comments on commit 709ffbe

Please sign in to comment.