[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8cf44ce9-e117-46fe-8bef-21200db97d0f@fiberby.net>
Date: Mon, 13 Jan 2025 21:41:19 +0000
From: Asbjørn Sloth Tønnesen <ast@...erby.net>
To: Xin Long <lucien.xin@...il.com>
Cc: davem@...emloft.net, kuba@...nel.org, Eric Dumazet <edumazet@...gle.com>,
Paolo Abeni <pabeni@...hat.com>, Jamal Hadi Salim <jhs@...atatu.com>,
Cong Wang <xiyou.wangcong@...il.com>, Jiri Pirko <jiri@...nulli.us>,
Marcelo Ricardo Leitner <marcelo.leitner@...il.com>,
Shuang Li <shuali@...hat.com>, network dev <netdev@...r.kernel.org>
Subject: Re: [PATCHv2 net] net: sched: refine software bypass handling in
tc_run
Hi Xin,
With the concept turned on it's head, we properly shouldn't call it a bypass
anymore? Now that software processing is only enabled, if there are any rules
that needs it.
s/PATCHv2 net/PATCH v2 net/g, but I think my patch below pushes it
firmly into net-next territory, unless you can convince the maintainers that
usesw is always set correctly.
I will run it through some tests tomorrow with my patch applied.
On 1/13/25 6:42 PM, Xin Long wrote:
> [...]
> @@ -410,48 +411,17 @@ static void tcf_proto_get(struct tcf_proto *tp)
> refcount_inc(&tp->refcnt);
> }
>
> -static void tcf_maintain_bypass(struct tcf_block *block)
> -{
> - int filtercnt = atomic_read(&block->filtercnt);
> - int skipswcnt = atomic_read(&block->skipswcnt);
> - bool bypass_wanted = filtercnt > 0 && filtercnt == skipswcnt;
> -
> - if (bypass_wanted != block->bypass_wanted) {
> -#ifdef CONFIG_NET_CLS_ACT
> - if (bypass_wanted)
> - static_branch_inc(&tcf_bypass_check_needed_key);
This enabled the global sw bypass checking static key, when sw was NOT used.
> [...]
> @@ -2409,7 +2379,13 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
> tfilter_notify(net, skb, n, tp, block, q, parent, fh,
> RTM_NEWTFILTER, false, rtnl_held, extack);
> tfilter_put(tp, fh);
> - tcf_block_filter_cnt_update(block, &tp->counted, true);
> + spin_lock(&tp->lock);
> + if (tp->usesw && !tp->counted) {
> + if (atomic_inc_return(&block->useswcnt) == 1)
> + static_branch_inc(&tcf_bypass_check_needed_key);
This enables the global sw bypass checking static key, when sw IS used.
I think you are missing the below patch (not tested in anyway, yet):
This patch:
- Renames the static key, as it's use has changed.
- Fixes tc_run() to the new way to use the static key.
---
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index e4fea1decca1..4eb0ebb9e76c 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -75,7 +75,7 @@ static inline bool tcf_block_non_null_shared(struct tcf_block *block)
}
#ifdef CONFIG_NET_CLS_ACT
-DECLARE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key);
+DECLARE_STATIC_KEY_FALSE(tcf_sw_enabled_key);
static inline bool tcf_block_bypass_sw(struct tcf_block *block)
{
diff --git a/net/core/dev.c b/net/core/dev.c
index a9f62f5aeb84..3ec89165296f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2134,8 +2134,8 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
#endif
#ifdef CONFIG_NET_CLS_ACT
-DEFINE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key);
-EXPORT_SYMBOL(tcf_bypass_check_needed_key);
+DEFINE_STATIC_KEY_FALSE(tcf_sw_enabled_key);
+EXPORT_SYMBOL(tcf_sw_enabled_key);
#endif
DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
@@ -4030,10 +4030,13 @@ static int tc_run(struct tcx_entry *entry, struct sk_buff *skb,
if (!miniq)
return ret;
- if (static_branch_unlikely(&tcf_bypass_check_needed_key)) {
- if (tcf_block_bypass_sw(miniq->block))
- return ret;
- }
+ /* Global bypass */
+ if (!static_branch_likely(&tcf_sw_enabled_key))
+ return ret;
+
+ /* Block-wise bypass */
+ if (tcf_block_bypass_sw(miniq->block))
+ return ret;
tc_skb_cb(skb)->mru = 0;
tc_skb_cb(skb)->post_ct = false;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 358b66dfdc83..617fcb682209 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -419,7 +419,7 @@ static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
tp->ops->destroy(tp, rtnl_held, extack);
if (tp->usesw && tp->counted) {
if (!atomic_dec_return(&tp->chain->block->useswcnt))
- static_branch_dec(&tcf_bypass_check_needed_key);
+ static_branch_dec(&tcf_sw_enabled_key);
tp->counted = false;
}
if (sig_destroy)
@@ -2382,7 +2382,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
spin_lock(&tp->lock);
if (tp->usesw && !tp->counted) {
if (atomic_inc_return(&block->useswcnt) == 1)
- static_branch_inc(&tcf_bypass_check_needed_key);
+ static_branch_inc(&tcf_sw_enabled_key);
tp->counted = true;
}
spin_unlock(&tp->lock);
Powered by blists - more mailing lists