[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180410170812.18905-1-saeedm@mellanox.com>
Date: Tue, 10 Apr 2018 10:08:11 -0700
From: Saeed Mahameed <saeedm@...lanox.com>
To: netdev@...r.kernel.org
Cc: Saeed Mahameed <saeedm@...lanox.com>
Subject: [RFC net-next 1/2] net: net-porcfs: Reduce rcu lock critical section
The current net proc fs sequence file implementation to show current
namespace netdevs list statistics and mc lists holds the rcu lock
throughout the whole process, from dev seq start up to dev seq stop.
This is really greedy and demanding from device drivers since
ndo_get_stats64 called from dev_seq_show while the rcu lock is held.
The rcu lock is needed to guarantee that device chain is not modified
while the dev sequence file is walking through it and handling the
netdev in the same time.
To minimize this critical section and drastically reduce the time rcu lock
is being held, all we need is to grab the rcu lock only for the brief
moment where we are looking for the next netdev to handle, if found,
dev_hold it to guarantee it kept alive while accessed later in seq show
callback and release the rcu lock immediately.
The current netdev being handled will be released "dev_put" when the seq next
callback is called or dev seq stop is called.
Signed-off-by: Saeed Mahameed <saeedm@...lanox.com>
---
net/core/net-procfs.c | 21 +++++++++++++--------
1 file changed, 13 insertions(+), 8 deletions(-)
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 9737302907b1..9d5ce6a203d2 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -31,19 +31,24 @@ static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff
static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
{
- struct net_device *dev;
+ struct net_device *dev = NULL;
unsigned int bucket;
+ rcu_read_lock();
do {
dev = dev_from_same_bucket(seq, pos);
- if (dev)
- return dev;
+ if (dev) {
+ dev_hold(dev);
+ goto unlock;
+ }
bucket = get_bucket(*pos) + 1;
*pos = set_bucket_offset(bucket, 1);
} while (bucket < NETDEV_HASHENTRIES);
- return NULL;
+unlock:
+ rcu_read_unlock();
+ return dev;
}
/*
@@ -51,9 +56,7 @@ static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *p
* in detail.
*/
static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(RCU)
{
- rcu_read_lock();
if (!*pos)
return SEQ_START_TOKEN;
@@ -66,13 +69,15 @@ static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
+ if (v && v != SEQ_START_TOKEN)
+ dev_put(v);
return dev_from_bucket(seq, pos);
}
static void dev_seq_stop(struct seq_file *seq, void *v)
- __releases(RCU)
{
- rcu_read_unlock();
+ if (v && v != SEQ_START_TOKEN)
+ dev_put(v);
}
static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
--
2.14.3
Powered by blists - more mailing lists