lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20221127014749.38849-1-kuniyu@amazon.com>
Date:   Sun, 27 Nov 2022 10:47:49 +0900
From:   Kuniyuki Iwashima <kuniyu@...zon.com>
To:     <pctammela@...il.com>
CC:     <davem@...emloft.net>, <edumazet@...gle.com>, <jhs@...atatu.com>,
        <jiri@...nulli.us>, <kuba@...nel.org>, <netdev@...r.kernel.org>,
        <pabeni@...hat.com>, <pctammela@...atatu.com>,
        <xiyou.wangcong@...il.com>, <kuniyu@...zon.com>
Subject: Re: [PATCH RFC net-next 1/3] net/sched: add retpoline wrapper for tc

From:   Pedro Tammela <pctammela@...il.com>
Date:   Fri, 25 Nov 2022 14:52:05 -0300
> On kernels compiled with CONFIG_RETPOLINE and CONFIG_NET_TC_INDIRECT_WRAPPER,
> optimize actions and filters that are compiled as built-ins into a direct call.
> The calls are ordered alphabetically, but new ones should be ideally
> added last.
> 
> On subsequent patches we expose the classifiers and actions functions
> and wire up the wrapper into tc.
> 
> Signed-off-by: Pedro Tammela <pctammela@...atatu.com>
> Reviewed-by: Jamal Hadi Salim <jhs@...atatu.com>
> ---
>  include/net/tc_wrapper.h | 274 +++++++++++++++++++++++++++++++++++++++
>  net/sched/Kconfig        |  13 ++
>  2 files changed, 287 insertions(+)
>  create mode 100644 include/net/tc_wrapper.h
> 
> diff --git a/include/net/tc_wrapper.h b/include/net/tc_wrapper.h
> new file mode 100644
> index 000000000000..7890d2810148
> --- /dev/null
> +++ b/include/net/tc_wrapper.h
> @@ -0,0 +1,274 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef __NET_TC_WRAPPER_H
> +#define __NET_TC_WRAPPER_H
> +
> +#include <linux/indirect_call_wrapper.h>
> +#include <net/pkt_cls.h>
> +
> +#if IS_ENABLED(CONFIG_RETPOLINE) && IS_ENABLED(CONFIG_NET_TC_INDIRECT_WRAPPER)
> +
> +#define TC_INDIRECT_SCOPE
> +
> +/* TC Actions */
> +INDIRECT_CALLABLE_DECLARE(int tcf_bpf_act(struct sk_buff *skb,
> +					  const struct tc_action *a,
> +					  struct tcf_result *res));

I prefer writing this like below than repeating INDIRECT_CALLABLE_DECLARE()
as all action have the same args.

  #define TC_INDIRECT_ACTION_DECLARE(func) \
  	INDIRECT_CALLABLE_DECLARE(int func(...))

  TC_INDIRECT_ACTION_DECLARE(tcf_bpf_act);
  TC_INDIRECT_ACTION_DECLARE(tcf_csum_act);
  ...


> +INDIRECT_CALLABLE_DECLARE(int tcf_connmark_act(struct sk_buff *skb,
> +					       const struct tc_action *a,
> +					       struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_csum_act(struct sk_buff *skb,
> +					   const struct tc_action *a,
> +					   struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_ct_act(struct sk_buff *skb,
> +					 const struct tc_action *a,
> +					 struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_ctinfo_act(struct sk_buff *skb,
> +					     const struct tc_action *a,
> +					     struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_gact_act(struct sk_buff *skb,
> +					   const struct tc_action *a,
> +					   struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_gate_act(struct sk_buff *skb,
> +					   const struct tc_action *a,
> +					   struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_ife_act(struct sk_buff *skb,
> +					  const struct tc_action *a,
> +					  struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_ipt_act(struct sk_buff *skb,
> +					  const struct tc_action *a,
> +					  struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_mirred_act(struct sk_buff *skb,
> +					     const struct tc_action *a,
> +					     struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_mpls_act(struct sk_buff *skb,
> +					   const struct tc_action *a,
> +					   struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_nat_act(struct sk_buff *skb,
> +					  const struct tc_action *a,
> +					  struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_pedit_act(struct sk_buff *skb,
> +					    const struct tc_action *a,
> +					    struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_police_act(struct sk_buff *skb,
> +					     const struct tc_action *a,
> +					     struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_sample_act(struct sk_buff *skb,
> +					     const struct tc_action *a,
> +					     struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_simp_act(struct sk_buff *skb,
> +					   const struct tc_action *a,
> +					   struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_skbedit_act(struct sk_buff *skb,
> +					      const struct tc_action *a,
> +					      struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_skbmod_act(struct sk_buff *skb,
> +					     const struct tc_action *a,
> +					     struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_vlan_act(struct sk_buff *skb,
> +					   const struct tc_action *a,
> +					   struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tunnel_key_act(struct sk_buff *skb,
> +					     const struct tc_action *a,
> +					     struct tcf_result *res));
> +
> +/* TC Filters */
> +INDIRECT_CALLABLE_DECLARE(int basic_classify(struct sk_buff *skb,
> +					     const struct tcf_proto *tp,
> +					     struct tcf_result *res));

Same here.


> +INDIRECT_CALLABLE_DECLARE(int cls_bpf_classify(struct sk_buff *skb,
> +					       const struct tcf_proto *tp,
> +					       struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int cls_cgroup_classify(struct sk_buff *skb,
> +						  const struct tcf_proto *tp,
> +						  struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int fl_classify(struct sk_buff *skb,
> +					  const struct tcf_proto *tp,
> +					  struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int flow_classify(struct sk_buff *skb,
> +					    const struct tcf_proto *tp,
> +					    struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int fw_classify(struct sk_buff *skb,
> +					  const struct tcf_proto *tp,
> +					  struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int mall_classify(struct sk_buff *skb,
> +					    const struct tcf_proto *tp,
> +					    struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int route4_classify(struct sk_buff *skb,
> +					      const struct tcf_proto *tp,
> +					      struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int rsvp_classify(struct sk_buff *skb,
> +					    const struct tcf_proto *tp,
> +					    struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int rsvp6_classify(struct sk_buff *skb,
> +					     const struct tcf_proto *tp,
> +					     struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcindex_classify(struct sk_buff *skb,
> +					       const struct tcf_proto *tp,
> +					       struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int u32_classify(struct sk_buff *skb,
> +					   const struct tcf_proto *tp,
> +					   struct tcf_result *res));
> +
> +static inline int __tc_act(struct sk_buff *skb, const struct tc_action *a,
> +			   struct tcf_result *res)
> +{
> +	if (0) { /* noop */ }
> +#if IS_BUILTIN(CONFIG_NET_ACT_BPF)
> +	else if (a->ops->act == tcf_bpf_act)
> +		return tcf_bpf_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_CONNMARK)
> +	else if (a->ops->act == tcf_connmark_act)
> +		return tcf_connmark_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_CSUM)
> +	else if (a->ops->act == tcf_csum_act)
> +		return tcf_csum_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_CT)
> +	else if (a->ops->act == tcf_ct_act)
> +		return tcf_ct_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_CTINFO)
> +	else if (a->ops->act == tcf_ctinfo_act)
> +		return tcf_ctinfo_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_GACT)
> +	else if (a->ops->act == tcf_gact_act)
> +		return tcf_gact_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_GATE)
> +	else if (a->ops->act == tcf_gate_act)
> +		return tcf_gate_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_IFE)
> +	else if (a->ops->act == tcf_ife_act)
> +		return tcf_ife_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_IPT)
> +	else if (a->ops->act == tcf_ipt_act)
> +		return tcf_ipt_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_MIRRED)
> +	else if (a->ops->act == tcf_mirred_act)
> +		return tcf_mirred_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_MPLS)
> +	else if (a->ops->act == tcf_mpls_act)
> +		return tcf_mpls_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_NAT)
> +	else if (a->ops->act == tcf_nat_act)
> +		return tcf_nat_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_PEDIT)
> +	else if (a->ops->act == tcf_pedit_act)
> +		return tcf_pedit_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_POLICE)
> +	else if (a->ops->act == tcf_police_act)
> +		return tcf_police_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_SAMPLE)
> +	else if (a->ops->act == tcf_sample_act)
> +		return tcf_sample_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_SIMP)
> +	else if (a->ops->act == tcf_simp_act)
> +		return tcf_simp_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_SKBEDIT)
> +	else if (a->ops->act == tcf_skbedit_act)
> +		return tcf_skbedit_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_SKBMOD)
> +	else if (a->ops->act == tcf_skbmod_act)
> +		return tcf_skbmod_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_TUNNEL_KEY)
> +	else if (a->ops->act == tunnel_key_act)
> +		return tunnel_key_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_VLAN)
> +	else if (a->ops->act == tcf_vlan_act)
> +		return tcf_vlan_act(skb, a, res);
> +#endif
> +	else
> +		return a->ops->act(skb, a, res);
> +}
> +
> +static inline int __tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
> +				struct tcf_result *res)
> +{
> +	if (0) { /* noop */ }
> +#if IS_BUILTIN(CONFIG_NET_CLS_BASIC)
> +	else if (tp->classify == basic_classify)
> +		return basic_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_BPF)
> +	else if (tp->classify == cls_bpf_classify)
> +		return cls_bpf_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_CGROUP)
> +	else if (tp->classify == cls_cgroup_classify)
> +		return cls_cgroup_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_FLOW)
> +	else if (tp->classify == flow_classify)
> +		return flow_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_FLOWER)
> +	else if (tp->classify == fl_classify)
> +		return fl_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_FW)
> +	else if (tp->classify == fw_classify)
> +		return fw_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_MATCHALL)
> +	else if (tp->classify == mall_classify)
> +		return mall_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_ROUTE4)
> +	else if (tp->classify == route4_classify)
> +		return route4_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_RSVP)
> +	else if (tp->classify == rsvp_classify)
> +		return rsvp_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_RSVP6)
> +	else if (tp->classify == rsvp6_classify)
> +		return rsvp_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_TCINDEX)
> +	else if (tp->classify == tcindex_classify)
> +		return tcindex_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_U32)
> +	else if (tp->classify == u32_classify)
> +		return u32_classify(skb, tp, res);
> +#endif
> +	else
> +		return tp->classify(skb, tp, res);
> +}
> +
> +#else
> +
> +#define TC_INDIRECT_SCOPE static
> +
> +static inline int __tc_act(struct sk_buff *skb, const struct tc_action *a,
> +			   struct tcf_result *res)
> +{
> +	return a->ops->act(skb, a, res);
> +}
> +
> +static inline int __tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
> +				struct tcf_result *res)
> +{
> +	return tp->classify(skb, tp, res);
> +}
> +
> +#endif
> +
> +#endif /* __NET_TC_WRAPPER_H */
> diff --git a/net/sched/Kconfig b/net/sched/Kconfig
> index 1e8ab4749c6c..9bc055f8013e 100644
> --- a/net/sched/Kconfig
> +++ b/net/sched/Kconfig
> @@ -1021,6 +1021,19 @@ config NET_TC_SKB_EXT
>  
>  	  Say N here if you won't be using tc<->ovs offload or tc chains offload.
>  
> +config NET_TC_INDIRECT_WRAPPER
> +	bool "TC indirect call wrapper"
> +	depends on NET_SCHED
> +	depends on RETPOLINE
> +
> +	help
> +	  Say Y here to skip indirect calls in the TC datapath for known
> +	  builtin classifiers/actions under CONFIG_RETPOLINE kernels.
> +
> +	  TC may run slower on CPUs with hardware based mitigations.
> +
> +	  If unsure, say N.
> +
>  endif # NET_SCHED
>  
>  config NET_SCH_FIFO
> -- 
> 2.34.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ