NACK: [SRU][F:linux-bluefield][PATCH] netfilter: flowtable: Use rw sem as flow block lock
Daniel Jurgens
danielj at nvidia.com
Tue Jun 1 21:49:36 UTC 2021
> -----Original Message-----
> From: Daniel Jurgens <danielj at nvidia.com>
> Sent: Tuesday, May 18, 2021 4:43 PM
> Subject: [SRU][F:linux-bluefield][PATCH] netfilter: flowtable: Use rw sem as
> flow block lock
>
> From: Paul Blakey <paulb at mellanox.com>
>
> BugLink: https://bugs.launchpad.net/bugs/1927251
>
> Currently flow offload threads are synchronized by the flow block mutex.
> Use rw lock instead to increase flow insertion (read) concurrency.
>
> Signed-off-by: Paul Blakey <paulb at mellanox.com>
> Reviewed-by: Oz Shlomo <ozsh at mellanox.com>
> Signed-off-by: Pablo Neira Ayuso <pablo at netfilter.org> (backported from
> commit 422c032afcf57d5e8109a54912e22ffc53d99068)
Resent with additional info about why it was backported
> Signed-off-by: Daniel Jurgens <danielj at nvidia.com>
>
> ---
> include/net/netfilter/nf_flow_table.h | 2 +-
> net/netfilter/nf_flow_table_core.c | 11 +++++------
> net/netfilter/nf_flow_table_offload.c | 4 ++--
> 3 files changed, 8 insertions(+), 9 deletions(-)
>
> diff --git a/include/net/netfilter/nf_flow_table.h
> b/include/net/netfilter/nf_flow_table.h
> index 3a46b3c..e27b73f 100644
> --- a/include/net/netfilter/nf_flow_table.h
> +++ b/include/net/netfilter/nf_flow_table.h
> @@ -73,7 +73,7 @@ struct nf_flowtable {
> struct delayed_work gc_work;
> unsigned int flags;
> struct flow_block flow_block;
> - struct mutex flow_block_lock; /* Guards
> flow_block */
> + struct rw_semaphore flow_block_lock; /* Guards
> flow_block */
> u32 flow_timeout;
> possible_net_t net;
> };
> diff --git a/net/netfilter/nf_flow_table_core.c
> b/net/netfilter/nf_flow_table_core.c
> index 5144e31..0c409aa 100644
> --- a/net/netfilter/nf_flow_table_core.c
> +++ b/net/netfilter/nf_flow_table_core.c
> @@ -398,7 +398,7 @@ int nf_flow_table_offload_add_cb(struct
> nf_flowtable *flow_table,
> struct flow_block_cb *block_cb;
> int err = 0;
>
> - mutex_lock(&flow_table->flow_block_lock);
> + down_write(&flow_table->flow_block_lock);
> block_cb = flow_block_cb_lookup(block, cb, cb_priv);
> if (block_cb) {
> err = -EEXIST;
> @@ -414,7 +414,7 @@ int nf_flow_table_offload_add_cb(struct
> nf_flowtable *flow_table,
> list_add_tail(&block_cb->list, &block->cb_list);
>
> unlock:
> - mutex_unlock(&flow_table->flow_block_lock);
> + up_write(&flow_table->flow_block_lock);
> return err;
> }
> EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);
> @@ -425,13 +425,13 @@ void nf_flow_table_offload_del_cb(struct
> nf_flowtable *flow_table,
> struct flow_block *block = &flow_table->flow_block;
> struct flow_block_cb *block_cb;
>
> - mutex_lock(&flow_table->flow_block_lock);
> + down_write(&flow_table->flow_block_lock);
> block_cb = flow_block_cb_lookup(block, cb, cb_priv);
> if (block_cb)
> list_del(&block_cb->list);
> else
> WARN_ON(true);
> - mutex_unlock(&flow_table->flow_block_lock);
> + up_write(&flow_table->flow_block_lock);
> }
> EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);
>
> @@ -561,7 +561,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
>
> INIT_DEFERRABLE_WORK(&flowtable->gc_work,
> nf_flow_offload_work_gc);
> flow_block_init(&flowtable->flow_block);
> - mutex_init(&flowtable->flow_block_lock);
> + init_rwsem(&flowtable->flow_block_lock);
>
> err = rhashtable_init(&flowtable->rhashtable,
> &nf_flow_offload_rhash_params); @@ -627,7
> +627,6 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
> nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
> flow_table);
> rhashtable_destroy(&flow_table->rhashtable);
> - mutex_destroy(&flow_table->flow_block_lock);
> }
> EXPORT_SYMBOL_GPL(nf_flow_table_free);
>
> diff --git a/net/netfilter/nf_flow_table_offload.c
> b/net/netfilter/nf_flow_table_offload.c
> index 26a950d..46bffaa 100644
> --- a/net/netfilter/nf_flow_table_offload.c
> +++ b/net/netfilter/nf_flow_table_offload.c
> @@ -698,7 +698,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable
> *flowtable,
> if (cmd == FLOW_CLS_REPLACE)
> cls_flow.rule = flow_rule->rule;
>
> - mutex_lock(&flowtable->flow_block_lock);
> + down_read(&flowtable->flow_block_lock);
> list_for_each_entry(block_cb, block_cb_list, list) {
> err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
> block_cb->cb_priv);
> @@ -707,7 +707,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable
> *flowtable,
>
> i++;
> }
> - mutex_unlock(&flowtable->flow_block_lock);
> + up_read(&flowtable->flow_block_lock);
>
> if (cmd == FLOW_CLS_STATS)
> memcpy(stats, &cls_flow.stats, sizeof(*stats));
> --
> 1.8.3.1
More information about the kernel-team
mailing list