[SRU][F:linux-bluefield][PATCH 2/2] netfilter: flowtable: Make nf_flow_table_offload_add/del_cb inline
Kleber Souza
kleber.souza at canonical.com
Wed May 19 10:57:15 UTC 2021
Hi Daniel,
On 18.05.21 23:41, Daniel Jurgens wrote:
> From: Alaa Hleihel <alaa at mellanox.com>
>
> BugLink: https://bugs.launchpad.net/bugs/1927246
>
> Currently, nf_flow_table_offload_add/del_cb are exported by nf_flow_table
> module, therefore modules using them will have hard-dependency
> on nf_flow_table and will require loading it all the time.
>
> This can lead to an unnecessary overhead on systems that do not
> use this API.
>
> To relax the hard-dependency between the modules, we unexport these
> functions and make them static inline.
>
> Fixes: 978703f42549 ("netfilter: flowtable: Add API for registering to flow table events")
> Signed-off-by: Alaa Hleihel <alaa at mellanox.com>
> Reviewed-by: Roi Dayan <roid at mellanox.com>
> Reviewed-by: Marcelo Ricardo Leitner <marcelo.leitner at gmail.com>
> Signed-off-by: David S. Miller <davem at davemloft.net>
> (backported from commit 505ee3a1cab96785fbc2c7cdb41ab677ec270c3c)
It's always good to have some explanation why a backport was needed
for the patch, below that line between "[]". Mainly when the change
wasn't a simple context adjustment.
In your backport there was an extra call to flow_block_cb_free() added,
which was not in the original nf_flow_table_offload_del_cb() function removed.
This fix seems to have been added by bc8e71314e84 "netfilter: flowtable: Free
block_cb when being deleted". In this case I think it would be good to either
backport this fix as well or at least mention it on the backport additional
information. The former might be better for future maintainability.
Regards,
Kleber
> Signed-off-by: Daniel Jurgens <danielj at nvidia.com>
>
> ---
> include/net/netfilter/nf_flow_table.h | 49 ++++++++++++++++++++++++++++++++---
> net/netfilter/nf_flow_table_core.c | 44 -------------------------------
> 2 files changed, 45 insertions(+), 48 deletions(-)
>
> diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
> index 3a46b3c..09c5a7f 100644
> --- a/include/net/netfilter/nf_flow_table.h
> +++ b/include/net/netfilter/nf_flow_table.h
> @@ -166,10 +166,51 @@ struct nf_flow_route {
> struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
> void flow_offload_free(struct flow_offload *flow);
>
> -int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
> - flow_setup_cb_t *cb, void *cb_priv);
> -void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
> - flow_setup_cb_t *cb, void *cb_priv);
> +static inline int
> +nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
> + flow_setup_cb_t *cb, void *cb_priv)
> +{
> + struct flow_block *block = &flow_table->flow_block;
> + struct flow_block_cb *block_cb;
> + int err = 0;
> +
> + mutex_lock(&flow_table->flow_block_lock);
> + block_cb = flow_block_cb_lookup(block, cb, cb_priv);
> + if (block_cb) {
> + err = -EEXIST;
> + goto unlock;
> + }
> +
> + block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
> + if (IS_ERR(block_cb)) {
> + err = PTR_ERR(block_cb);
> + goto unlock;
> + }
> +
> + list_add_tail(&block_cb->list, &block->cb_list);
> +
> +unlock:
> + mutex_unlock(&flow_table->flow_block_lock);
> + return err;
> +}
> +
> +static inline void
> +nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
> + flow_setup_cb_t *cb, void *cb_priv)
> +{
> + struct flow_block *block = &flow_table->flow_block;
> + struct flow_block_cb *block_cb;
> +
> + mutex_lock(&flow_table->flow_block_lock);
> + block_cb = flow_block_cb_lookup(block, cb, cb_priv);
> + if (block_cb) {
> + list_del(&block_cb->list);
> + flow_block_cb_free(block_cb);
> + } else {
> + WARN_ON(true);
> + }
> + mutex_unlock(&flow_table->flow_block_lock);
> +}
>
> int flow_offload_route_init(struct flow_offload *flow,
> const struct nf_flow_route *route);
> diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
> index 5144e31..97b1780 100644
> --- a/net/netfilter/nf_flow_table_core.c
> +++ b/net/netfilter/nf_flow_table_core.c
> @@ -391,50 +391,6 @@ static void nf_flow_offload_work_gc(struct work_struct *work)
> queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
> }
>
> -int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
> - flow_setup_cb_t *cb, void *cb_priv)
> -{
> - struct flow_block *block = &flow_table->flow_block;
> - struct flow_block_cb *block_cb;
> - int err = 0;
> -
> - mutex_lock(&flow_table->flow_block_lock);
> - block_cb = flow_block_cb_lookup(block, cb, cb_priv);
> - if (block_cb) {
> - err = -EEXIST;
> - goto unlock;
> - }
> -
> - block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
> - if (IS_ERR(block_cb)) {
> - err = PTR_ERR(block_cb);
> - goto unlock;
> - }
> -
> - list_add_tail(&block_cb->list, &block->cb_list);
> -
> -unlock:
> - mutex_unlock(&flow_table->flow_block_lock);
> - return err;
> -}
> -EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);
> -
> -void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
> - flow_setup_cb_t *cb, void *cb_priv)
> -{
> - struct flow_block *block = &flow_table->flow_block;
> - struct flow_block_cb *block_cb;
> -
> - mutex_lock(&flow_table->flow_block_lock);
> - block_cb = flow_block_cb_lookup(block, cb, cb_priv);
> - if (block_cb)
> - list_del(&block_cb->list);
> - else
> - WARN_ON(true);
> - mutex_unlock(&flow_table->flow_block_lock);
> -}
> -EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);
> -
> static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
> __be16 port, __be16 new_port)
> {
>
More information about the kernel-team
mailing list