lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 12 Oct 2017 23:45:43 +0200
From:   Daniel Borkmann <daniel@...earbox.net>
To:     Jiri Pirko <jiri@...nulli.us>, netdev@...r.kernel.org
CC:     davem@...emloft.net, jhs@...atatu.com, xiyou.wangcong@...il.com,
        mlxsw@...lanox.com, andrew@...n.ch,
        vivien.didelot@...oirfairelinux.com, f.fainelli@...il.com,
        michael.chan@...adcom.com, ganeshgr@...lsio.com,
        jeffrey.t.kirsher@...el.com, saeedm@...lanox.com,
        matanb@...lanox.com, leonro@...lanox.com, idosch@...lanox.com,
        jakub.kicinski@...ronome.com, ast@...nel.org,
        simon.horman@...ronome.com, pieter.jansenvanvuuren@...ronome.com,
        john.hurley@...ronome.com, edumazet@...gle.com, dsahern@...il.com,
        alexander.h.duyck@...el.com, john.fastabend@...il.com,
        willemb@...gle.com
Subject: Re: [patch net-next 06/34] net: core: use dev->ingress_queue instead
 of tp->q

On 10/12/2017 07:17 PM, Jiri Pirko wrote:
> From: Jiri Pirko <jiri@...lanox.com>
>
> In sch_handle_egress and sch_handle_ingress, don't use tp->q and use
> dev->ingress_queue which stores the same pointer instead.
>
> Signed-off-by: Jiri Pirko <jiri@...lanox.com>
> ---
>   net/core/dev.c | 21 +++++++++++++++------
>   1 file changed, 15 insertions(+), 6 deletions(-)
>
> diff --git a/net/core/dev.c b/net/core/dev.c
> index fcddccb..cb9e5e5 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -3273,14 +3273,18 @@ EXPORT_SYMBOL(dev_loopback_xmit);
>   static struct sk_buff *
>   sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
>   {
> +	struct netdev_queue *netdev_queue =
> +				rcu_dereference_bh(dev->ingress_queue);
>   	struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list);
>   	struct tcf_result cl_res;
> +	struct Qdisc *q;
>
> -	if (!cl)
> +	if (!cl || !netdev_queue)
>   		return skb;
> +	q = netdev_queue->qdisc;

NAK, no additional overhead in the software fast-path of
sch_handle_{ingress,egress}() like this. There are users out there
that use tc in software only, so performance is critical here.

>   	/* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
> -	qdisc_bstats_cpu_update(cl->q, skb);
> +	qdisc_bstats_cpu_update(q, skb);
>
>   	switch (tcf_classify(skb, cl, &cl_res, false)) {
>   	case TC_ACT_OK:
> @@ -3288,7 +3292,7 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
>   		skb->tc_index = TC_H_MIN(cl_res.classid);
>   		break;
>   	case TC_ACT_SHOT:
> -		qdisc_qstats_cpu_drop(cl->q);
> +		qdisc_qstats_cpu_drop(q);
>   		*ret = NET_XMIT_DROP;
>   		kfree_skb(skb);
>   		return NULL;
> @@ -4188,16 +4192,21 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
>   		   struct net_device *orig_dev)
>   {
>   #ifdef CONFIG_NET_CLS_ACT
> +	struct netdev_queue *netdev_queue =
> +				rcu_dereference_bh(skb->dev->ingress_queue);
>   	struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
>   	struct tcf_result cl_res;
> +	struct Qdisc *q;
>
>   	/* If there's at least one ingress present somewhere (so
>   	 * we get here via enabled static key), remaining devices
>   	 * that are not configured with an ingress qdisc will bail
>   	 * out here.
>   	 */
> -	if (!cl)
> +	if (!cl || !netdev_queue)
>   		return skb;
> +	q = netdev_queue->qdisc;
> +
>   	if (*pt_prev) {
>   		*ret = deliver_skb(skb, *pt_prev, orig_dev);
>   		*pt_prev = NULL;
> @@ -4205,7 +4214,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
>
>   	qdisc_skb_cb(skb)->pkt_len = skb->len;
>   	skb->tc_at_ingress = 1;
> -	qdisc_bstats_cpu_update(cl->q, skb);
> +	qdisc_bstats_cpu_update(q, skb);
>
>   	switch (tcf_classify(skb, cl, &cl_res, false)) {
>   	case TC_ACT_OK:
> @@ -4213,7 +4222,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
>   		skb->tc_index = TC_H_MIN(cl_res.classid);
>   		break;
>   	case TC_ACT_SHOT:
> -		qdisc_qstats_cpu_drop(cl->q);
> +		qdisc_qstats_cpu_drop(q);
>   		kfree_skb(skb);
>   		return NULL;
>   	case TC_ACT_STOLEN:
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ