lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAMDZJNUO9k8xmrJwrXnj+LVG=bEv5Zwe=YkjOqSBrDS348OQfA@mail.gmail.com>
Date:   Tue, 15 Mar 2022 20:48:47 +0800
From:   Tonghao Zhang <xiangxia.m.yue@...il.com>
To:     Daniel Borkmann <daniel@...earbox.net>
Cc:     Linux Kernel Network Developers <netdev@...r.kernel.org>,
        Jamal Hadi Salim <jhs@...atatu.com>,
        Cong Wang <xiyou.wangcong@...il.com>,
        Jiri Pirko <jiri@...nulli.us>,
        "David S. Miller" <davem@...emloft.net>,
        Jakub Kicinski <kuba@...nel.org>,
        Jonathan Lemon <jonathan.lemon@...il.com>,
        Eric Dumazet <edumazet@...gle.com>,
        Alexander Lobakin <alobakin@...me>,
        Paolo Abeni <pabeni@...hat.com>,
        Talal Ahmad <talalahmad@...gle.com>,
        Kevin Hao <haokexin@...il.com>,
        Alexei Starovoitov <ast@...nel.org>, bpf@...r.kernel.org
Subject: Re: [net-next v10 1/2] net: sched: use queue_mapping to pick tx queue

On Tue, Mar 15, 2022 at 5:59 AM Daniel Borkmann <daniel@...earbox.net> wrote:
>
> On 3/14/22 3:15 PM, xiangxia.m.yue@...il.com wrote:
> [...]
> >   include/linux/netdevice.h |  3 +++
> >   include/linux/rtnetlink.h |  1 +
> >   net/core/dev.c            | 31 +++++++++++++++++++++++++++++--
> >   net/sched/act_skbedit.c   |  6 +++++-
> >   4 files changed, 38 insertions(+), 3 deletions(-)
> >
> > diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
> > index 0d994710b335..f33fb2d6712a 100644
> > --- a/include/linux/netdevice.h
> > +++ b/include/linux/netdevice.h
> > @@ -3065,6 +3065,9 @@ struct softnet_data {
> >       struct {
> >               u16 recursion;
> >               u8  more;
> > +#ifdef CONFIG_NET_EGRESS
> > +             u8  skip_txqueue;
> > +#endif
> >       } xmit;
> >   #ifdef CONFIG_RPS
> >       /* input_queue_head should be written by cpu owning this struct,
> > diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
> > index 7f970b16da3a..ae2c6a3cec5d 100644
> > --- a/include/linux/rtnetlink.h
> > +++ b/include/linux/rtnetlink.h
> > @@ -100,6 +100,7 @@ void net_dec_ingress_queue(void);
> >   #ifdef CONFIG_NET_EGRESS
> >   void net_inc_egress_queue(void);
> >   void net_dec_egress_queue(void);
> > +void netdev_xmit_skip_txqueue(bool skip);
> >   #endif
> >
> >   void rtnetlink_init(void);
> > diff --git a/net/core/dev.c b/net/core/dev.c
> > index 75bab5b0dbae..8e83b7099977 100644
> > --- a/net/core/dev.c
> > +++ b/net/core/dev.c
> > @@ -3908,6 +3908,25 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
> >
> >       return skb;
> >   }
> > +
> > +static struct netdev_queue *
> > +netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
> > +{
> > +     int qm = skb_get_queue_mapping(skb);
> > +
> > +     return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
> > +}
> > +
> > +static bool netdev_xmit_txqueue_skipped(void)
> > +{
> > +     return __this_cpu_read(softnet_data.xmit.skip_txqueue);
> > +}
> > +
> > +void netdev_xmit_skip_txqueue(bool skip)
> > +{
> > +     __this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
> > +}
> > +EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
> >   #endif /* CONFIG_NET_EGRESS */
> >
> >   #ifdef CONFIG_XPS
> > @@ -4078,7 +4097,7 @@ struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
> >   static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
> >   {
> >       struct net_device *dev = skb->dev;
> > -     struct netdev_queue *txq;
> > +     struct netdev_queue *txq = NULL;
> >       struct Qdisc *q;
> >       int rc = -ENOMEM;
> >       bool again = false;
> > @@ -4106,11 +4125,17 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
> >                       if (!skb)
> >                               goto out;
> >               }
> > +
> > +             netdev_xmit_skip_txqueue(false);
> > +
> >               nf_skip_egress(skb, true);
> >               skb = sch_handle_egress(skb, &rc, dev);
> >               if (!skb)
> >                       goto out;
> >               nf_skip_egress(skb, false);
> > +
> > +             if (netdev_xmit_txqueue_skipped())
> > +                     txq = netdev_tx_queue_mapping(dev, skb);
> >       }
> >   #endif
> >       /* If device/qdisc don't need skb->dst, release it right now while
> > @@ -4121,7 +4146,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
> >       else
> >               skb_dst_force(skb);
> >
> > -     txq = netdev_core_pick_tx(dev, skb, sb_dev);
> > +     if (likely(!txq))
>
> nit: Drop likely(). If the feature is used from sch_handle_egress(), then this would always be the case.
Hi Daniel
I think in most case, we don't use skbedit queue_mapping in the
sch_handle_egress() , so I add likely in fast path.
> > +             txq = netdev_core_pick_tx(dev, skb, sb_dev);
> > +
> >       q = rcu_dereference_bh(txq->qdisc);
>
> How will the `netdev_xmit_skip_txqueue(true)` be usable from BPF side (see bpf_convert_ctx_access() ->
> queue_mapping)?
Good questions, In other patch, I introduce the
bpf_netdev_skip_txqueue, so we can use netdev_xmit_skip_txqueue in bpf
side

not official patch:(I will post this patch, if ready)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 4eebea830613..ef147a1a2d62 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -5117,6 +5117,21 @@ union bpf_attr {
  * 0 on success.
  * **-EINVAL** for invalid input
  * **-EOPNOTSUPP** for unsupported delivery_time_type and protocol
+ *
+ * void bpf_netdev_skip_txqueue(u32 skip)
+ * Description
+ * Redirect the packet to another net device of index *ifindex*.
+ * This helper is somewhat similar to **bpf_redirect**\ (), except
+ * that the redirection happens to the *skip*' peer device and
+ * the netns switch takes place from ingress to ingress without
+ * going through the CPU's backlog queue.
+ *
+ * The *skip* argument is reserved and must be 0. The helper is
+ * currently only supported for tc BPF program types at the ingress
+ * hook and for veth device types. The peer device must reside in a
+ * different network namespace.
+ * Return
+ * Nothing. Always succeeds.
  */
 #define __BPF_FUNC_MAPPER(FN) \
  FN(unspec), \
@@ -5312,6 +5327,7 @@ union bpf_attr {
  FN(xdp_store_bytes), \
  FN(copy_from_user_task), \
  FN(skb_set_delivery_time),      \
+ FN(netdev_skip_txqueue), \
  /* */

 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/net/core/filter.c b/net/core/filter.c
index 88767f7da150..5845b4632b6b 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2517,6 +2517,19 @@ static const struct bpf_func_proto
bpf_redirect_peer_proto = {
  .arg2_type      = ARG_ANYTHING,
 };

+BPF_CALL_1(bpf_netdev_skip_txqueue, u32, skip)
+{
+ netdev_xmit_skip_txqueue(!!skip);
+ return 0;
+};
+
+static const struct bpf_func_proto bpf_netdev_skip_txqueue_proto = {
+ .func           = bpf_netdev_skip_txqueue,
+ .gpl_only       = false,
+ .ret_type       = RET_VOID,
+ .arg1_type      = ARG_ANYTHING,
+};
+
 BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params,
     int, plen, u64, flags)
 {
@@ -7721,6 +7734,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id,
const struct bpf_prog *prog)
  return &bpf_redirect_proto;
  case BPF_FUNC_redirect_neigh:
  return &bpf_redirect_neigh_proto;
+ case BPF_FUNC_netdev_skip_txqueue:
+ return &bpf_netdev_skip_txqueue_proto;
  case BPF_FUNC_redirect_peer:
  return &bpf_redirect_peer_proto;
  case BPF_FUNC_get_route_realm:
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 4eebea830613..ef147a1a2d62 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -5117,6 +5117,21 @@ union bpf_attr {
  * 0 on success.
  * **-EINVAL** for invalid input
  * **-EOPNOTSUPP** for unsupported delivery_time_type and protocol
+ *
+ * void bpf_netdev_skip_txqueue(u32 skip)
+ * Description
+ * Redirect the packet to another net device of index *ifindex*.
+ * This helper is somewhat similar to **bpf_redirect**\ (), except
+ * that the redirection happens to the *skip*' peer device and
+ * the netns switch takes place from ingress to ingress without
+ * going through the CPU's backlog queue.
+ *
+ * The *skip* argument is reserved and must be 0. The helper is
+ * currently only supported for tc BPF program types at the ingress
+ * hook and for veth device types. The peer device must reside in a
+ * different network namespace.
+ * Return
+ * Nothing. Always succeeds.
  */
 #define __BPF_FUNC_MAPPER(FN) \
  FN(unspec), \
@@ -5312,6 +5327,7 @@ union bpf_attr {
  FN(xdp_store_bytes), \
  FN(copy_from_user_task), \
  FN(skb_set_delivery_time),      \
+ FN(netdev_skip_txqueue), \
  /* */

 /* integer value in 'imm' field of BPF_CALL instruction selects which helper

>
> >       trace_net_dev_queue(skb);
> > diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
> > index ceba11b198bb..d5799b4fc499 100644
> > --- a/net/sched/act_skbedit.c
> > +++ b/net/sched/act_skbedit.c
> > @@ -58,8 +58,12 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
> >               }
> >       }
> >       if (params->flags & SKBEDIT_F_QUEUE_MAPPING &&
> > -         skb->dev->real_num_tx_queues > params->queue_mapping)
> > +         skb->dev->real_num_tx_queues > params->queue_mapping) {
> > +#ifdef CONFIG_NET_EGRESS
> > +             netdev_xmit_skip_txqueue(true);
> > +#endif
> >               skb_set_queue_mapping(skb, params->queue_mapping);
> > +     }
> >       if (params->flags & SKBEDIT_F_MARK) {
> >               skb->mark &= ~params->mask;
> >               skb->mark |= params->mark & params->mask;
> >
>


-- 
Best regards, Tonghao

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ