lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <2AD939572F25A448A3AE3CAEA61328C237845184@BC-MAIL-M28.internal.baidu.com>
Date:   Tue, 10 Jul 2018 10:59:41 +0000
From:   "Li,Rongqing" <lirongqing@...du.com>
To:     Eric Dumazet <eric.dumazet@...il.com>,
        David Miller <davem@...emloft.net>
CC:     "netdev@...r.kernel.org" <netdev@...r.kernel.org>
Subject: 答复: [PATCH][net-next][v2] net: limit each hash list length to MAX_GRO_SKBS



> -----邮件原件-----
> 发件人: Eric Dumazet [mailto:eric.dumazet@...il.com]
> 发送时间: 2018年7月8日 8:22
> 收件人: David Miller <davem@...emloft.net>; Li,Rongqing
> <lirongqing@...du.com>
> 抄送: netdev@...r.kernel.org
> 主题: Re: [PATCH][net-next][v2] net: limit each hash list length to
> MAX_GRO_SKBS
> 
> 
> 
> On 07/05/2018 03:20 AM, David Miller wrote:
> > From: Li RongQing <lirongqing@...du.com>
> > Date: Thu,  5 Jul 2018 14:34:32 +0800
> >
> >> After commit 07d78363dcff ("net: Convert NAPI gro list into a small
> >> hash table.")' there is 8 hash buckets, which allows more flows to be
> >> held for merging.  but MAX_GRO_SKBS, the total held skb for merging,
> >> is 8 skb still, limit the hash table performance.
> >>
> >> keep MAX_GRO_SKBS as 8 skb, but limit each hash list length to 8 skb,
> >> not the total 8 skb
> >>
> >> Signed-off-by: Li RongQing <lirongqing@...du.com>
> >
> > Applied, thanks.
> >
> 
> Maybe gro_count should be replaced by a bitmask, so that we can speed up
> napi_gro_flush(), since it now has to use 3 cache lines (gro_hash[] size is 192
> bytes)

Do you means that?

Subject: [PATCH][RFC][net-next] net: convert gro_count to bitmask

convert gro_count to a bitmask, and rename it as gro_bitmask to speed
up napi_gro_flush(), since gro_hash now has to use 3 cache lines

---
 include/linux/netdevice.h |  2 +-
 net/core/dev.c            | 36 ++++++++++++++++++++++++------------
 2 files changed, 25 insertions(+), 13 deletions(-)

diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b683971e500d..df49b36ef378 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -322,7 +322,7 @@ struct napi_struct {
 
        unsigned long           state;
        int                     weight;
-       unsigned int            gro_count;
+       unsigned long           gro_bitmask;
        int                     (*poll)(struct napi_struct *, int);
 #ifdef CONFIG_NETPOLL
        int                     poll_owner;
diff --git a/net/core/dev.c b/net/core/dev.c
index 89825c1eccdc..da2d1185eb82 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5161,9 +5161,11 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
                        return;
                list_del_init(&skb->list);
                napi_gro_complete(skb);
-               napi->gro_count--;
                napi->gro_hash[index].count--;
        }
+
+       if (!napi->gro_hash[index].count)
+               clear_bit(index, &napi->gro_bitmask);
 }
 
 /* napi->gro_hash[].list contains packets ordered by age.
@@ -5174,8 +5176,10 @@ void napi_gro_flush(struct napi_struct *napi, bool flush_old)
 {
        u32 i;
 
-       for (i = 0; i < GRO_HASH_BUCKETS; i++)
-               __napi_gro_flush_chain(napi, i, flush_old);
+       for (i = 0; i < GRO_HASH_BUCKETS; i++) {
+               if (test_bit(i, &napi->gro_bitmask))
+                       __napi_gro_flush_chain(napi, i, flush_old);
+       }
 }
 EXPORT_SYMBOL(napi_gro_flush);
 
@@ -5267,8 +5271,8 @@ static void gro_flush_oldest(struct list_head *head)
        if (WARN_ON_ONCE(!oldest))
                return;
 
-       /* Do not adjust napi->gro_count, caller is adding a new SKB to
-        * the chain.
+       /* Do not adjust napi->gro_hash[].count, caller is adding a new
+        * SKB to the chain.
         */
        list_del(&oldest->list);
        napi_gro_complete(oldest);
@@ -5342,7 +5346,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        if (pp) {
                list_del_init(&pp->list);
                napi_gro_complete(pp);
-               napi->gro_count--;
                napi->gro_hash[hash].count--;
        }
 
@@ -5355,7 +5358,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
                gro_flush_oldest(gro_head);
        } else {
-               napi->gro_count++;
                napi->gro_hash[hash].count++;
        }
        NAPI_GRO_CB(skb)->count = 1;
@@ -5370,6 +5372,13 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        if (grow > 0)
                gro_pull_from_frag0(skb, grow);
 ok:
+
+       if (napi->gro_hash[hash].count)
+               if (!test_bit(hash, &napi->gro_bitmask))
+                       set_bit(hash, &napi->gro_bitmask);
+       else if (test_bit(hash, &napi->gro_bitmask))
+               clear_bit(hash, &napi->gro_bitmask);
+
        return ret;
 
 normal:
@@ -5768,7 +5777,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
                                 NAPIF_STATE_IN_BUSY_POLL)))
                return false;
 
-       if (n->gro_count) {
+       if (n->gro_bitmask) {
                unsigned long timeout = 0;
 
                if (work_done)
@@ -5977,7 +5986,7 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
        /* Note : we use a relaxed variant of napi_schedule_prep() not setting
         * NAPI_STATE_MISSED, since we do not react to a device IRQ.
         */
-       if (napi->gro_count && !napi_disable_pending(napi) &&
+       if (napi->gro_bitmask && !napi_disable_pending(napi) &&
            !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
                __napi_schedule_irqoff(napi);
 
@@ -5992,7 +6001,7 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
        INIT_LIST_HEAD(&napi->poll_list);
        hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
        napi->timer.function = napi_watchdog;
-       napi->gro_count = 0;
+       napi->gro_bitmask = 0;
        for (i = 0; i < GRO_HASH_BUCKETS; i++) {
                INIT_LIST_HEAD(&napi->gro_hash[i].list);
                napi->gro_hash[i].count = 0;
@@ -6052,7 +6061,7 @@ void netif_napi_del(struct napi_struct *napi)
        napi_free_frags(napi);
 
        flush_gro_hash(napi);
-       napi->gro_count = 0;
+       napi->gro_bitmask = 0;
 }
 EXPORT_SYMBOL(netif_napi_del);
 
@@ -6094,7 +6103,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
                goto out_unlock;
        }
 
-       if (n->gro_count) {
+       if (n->gro_bitmask) {
                /* flush too old packets
                 * If HZ < 1000, flush all packets.
                 */
@@ -9141,6 +9150,9 @@ static struct hlist_head * __net_init netdev_create_hash(void)
 /* Initialize per network namespace state */
 static int __net_init netdev_init(struct net *net)
 {
+       BUILD_BUG_ON(GRO_HASH_BUCKETS >
+                       FIELD_SIZEOF(struct napi_struct, gro_bitmask));
+
        if (net != &init_net)
                INIT_LIST_HEAD(&net->dev_base_head);
 
-- 
2.16.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ