lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <583cd753-395a-5109-03a6-382195c4dbf0@solarflare.com>
Date:   Wed, 29 May 2019 21:11:03 +0100
From:   Edward Cree <ecree@...arflare.com>
To:     Jamal Hadi Salim <jhs@...atatu.com>, Jiri Pirko <jiri@...nulli.us>,
        "Pablo Neira Ayuso" <pablo@...filter.org>,
        David Miller <davem@...emloft.net>
CC:     netdev <netdev@...r.kernel.org>,
        Cong Wang <xiyou.wangcong@...il.com>,
        Andy Gospodarek <andy@...yhouse.net>,
        Jakub Kicinski <jakub.kicinski@...ronome.com>,
        Michael Chan <michael.chan@...adcom.com>,
        Vishal Kulkarni <vishal@...lsio.com>
Subject: [RFC PATCH net-next 2/2] net/sched: add action block binding to other
 classifiers

cls_matchall, cls_u32, and cls_bpf all have offloads as well, so they also
 need to bind actions to blocks for RTM_GETACTION stats collection.

Signed-off-by: Edward Cree <ecree@...arflare.com>
---
 net/sched/cls_bpf.c      | 10 +++++++++-
 net/sched/cls_matchall.c |  7 +++++++
 net/sched/cls_u32.c      |  7 +++++++
 3 files changed, 23 insertions(+), 1 deletion(-)

diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 27365ed3fe0b..c99e53cbf83d 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -165,8 +165,11 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
 	cls_bpf.name = obj->bpf_name;
 	cls_bpf.exts_integrated = obj->exts_integrated;
 
-	if (oldprog)
+	if (oldprog) {
+		if (oldprog->in_hw_count)
+			tc_unbind_action_blocks(&oldprog->exts, block);
 		tcf_block_offload_dec(block, &oldprog->gen_flags);
+	}
 
 	err = tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
 	if (prog) {
@@ -175,6 +178,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
 			return err;
 		} else if (err > 0) {
 			prog->in_hw_count = err;
+			tc_bind_action_blocks(&prog->exts, block);
 			tcf_block_offload_inc(block, &prog->gen_flags);
 		}
 	}
@@ -683,8 +687,12 @@ static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
 			continue;
 		}
 
+		if (add && !prog->in_hw_count)
+			tc_bind_action_blocks(&prog->exts, block);
 		tc_cls_offload_cnt_update(block, &prog->in_hw_count,
 					  &prog->gen_flags, add);
+		if (!add && !prog->in_hw_count)
+			tc_unbind_action_blocks(&prog->exts, block);
 	}
 
 	return 0;
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index b6b7b041fd6a..c65782cf2924 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -79,6 +79,8 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp,
 	cls_mall.cookie = cookie;
 
 	tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false);
+	if (head->in_hw_count)
+		tc_unbind_action_blocks(&head->exts, block);
 	tcf_block_offload_dec(block, &head->flags);
 }
 
@@ -120,6 +122,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
 		return err;
 	} else if (err > 0) {
 		head->in_hw_count = err;
+		tc_bind_action_blocks(&head->exts, block);
 		tcf_block_offload_inc(block, &head->flags);
 	}
 
@@ -320,7 +323,11 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
 		return 0;
 	}
 
+	if (add && !head->in_hw_count)
+		tc_bind_action_blocks(&head->exts, block);
 	tc_cls_offload_cnt_update(block, &head->in_hw_count, &head->flags, add);
+	if (!add && !head->in_hw_count)
+		tc_unbind_action_blocks(&head->exts, block);
 
 	return 0;
 }
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 4b8710a266cc..84f067d9b4a4 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -534,6 +534,8 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
 	cls_u32.knode.handle = n->handle;
 
 	tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false);
+	if (n->in_hw_count)
+		tc_unbind_action_blocks(&n->exts, block);
 	tcf_block_offload_dec(block, &n->flags);
 }
 
@@ -569,6 +571,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
 		return err;
 	} else if (err > 0) {
 		n->in_hw_count = err;
+		tc_bind_action_blocks(&n->exts, block);
 		tcf_block_offload_inc(block, &n->flags);
 	}
 
@@ -1223,7 +1226,11 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
 		return 0;
 	}
 
+	if (add && !n->in_hw_count)
+		tc_bind_action_blocks(&n->exts, block);
 	tc_cls_offload_cnt_update(block, &n->in_hw_count, &n->flags, add);
+	if (!add && !n->in_hw_count)
+		tc_unbind_action_blocks(&n->exts, block);
 
 	return 0;
 }

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ