lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [day] [month] [year] [list]
Message-Id: <1642511447-8998-1-git-send-email-praveen.kannoju@oracle.com>
Date:   Tue, 18 Jan 2022 13:10:47 +0000
From:   Praveen Kumar Kannoju <praveen.kannoju@...cle.com>
To:     santosh.shilimkar@...cle.com, davem@...emloft.net, kuba@...nel.org,
        netdev@...r.kernel.org, linux-rdma@...r.kernel.org,
        rds-devel@....oracle.com, linux-kernel@...r.kernel.org
Cc:     rama.nichanamatlu@...cle.com,
        rajesh.sivaramasubramaniom@...cle.com,
        Praveen Kumar Kannoju <praveen.kannoju@...cle.com>
Subject: [PATCH RFC] rds: ib: Reduce the contention caused by the asynchronous workers to flush the mr pool

This patch aims to reduce the number of asynchronous workers being spawned
to execute the function "rds_ib_flush_mr_pool" during the high I/O
situations. Synchronous call path's to this function "rds_ib_flush_mr_pool"
will be executed without being disturbed. By reducing the number of
processes contending to flush the mr pool, the total number of D state
processes waiting to acquire the mutex lock will be greatly reduced, which
otherwise were causing DB instance crash as the corresponding processes
were not progressing while waiting to acquire the mutex lock.

Signed-off-by: Praveen Kumar Kannoju <praveen.kannoju@...cle.com>
---
 net/rds/ib.h       |  1 +
 net/rds/ib_mr.h    |  2 ++
 net/rds/ib_rdma.c  | 18 ++++++++++++++++--
 net/rds/ib_stats.c |  1 +
 4 files changed, 20 insertions(+), 2 deletions(-)

diff --git a/net/rds/ib.h b/net/rds/ib.h
index 2ba7110..d881e3f 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -308,6 +308,7 @@ struct rds_ib_statistics {
 	uint64_t	s_ib_rdma_mr_1m_pool_flush;
 	uint64_t	s_ib_rdma_mr_1m_pool_wait;
 	uint64_t	s_ib_rdma_mr_1m_pool_depleted;
+	uint64_t	s_ib_rdma_flush_mr_pool_avoided;
 	uint64_t	s_ib_rdma_mr_8k_reused;
 	uint64_t	s_ib_rdma_mr_1m_reused;
 	uint64_t	s_ib_atomic_cswp;
diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h
index ea5e9ae..9cbec6e 100644
--- a/net/rds/ib_mr.h
+++ b/net/rds/ib_mr.h
@@ -105,6 +105,8 @@ struct rds_ib_mr_pool {
 	unsigned long		max_items_soft;
 	unsigned long		max_free_pinned;
 	unsigned int		max_pages;
+
+	bool                    flush_ongoing;	/* To avoid redundant flushes */
 };
 
 extern struct workqueue_struct *rds_ib_mr_wq;
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 8f070ee..6b640b5 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -393,6 +393,8 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
 	 */
 	dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
 	dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
+	WRITE_ONCE(pool->flush_ongoing, true);
+	smp_wmb();
 	if (free_all) {
 		unsigned long flags;
 
@@ -430,6 +432,8 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
 	atomic_sub(nfreed, &pool->item_count);
 
 out:
+	WRITE_ONCE(pool->flush_ongoing, false);
+	smp_wmb();
 	mutex_unlock(&pool->flush_lock);
 	if (waitqueue_active(&pool->flush_wait))
 		wake_up(&pool->flush_wait);
@@ -507,8 +511,17 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
 
 	/* If we've pinned too many pages, request a flush */
 	if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
-	    atomic_read(&pool->dirty_count) >= pool->max_items / 5)
-		queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+	    atomic_read(&pool->dirty_count) >= pool->max_items / 5) {
+		smp_rmb();
+		if (!READ_ONCE(pool->flush_ongoing)) {
+			queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+		} else {
+			/* This counter indicates the number of redundant
+			 * flush calls avoided, and provides an indication
+			 * of the load pattern imposed on kernel.
+			 */
+			rds_ib_stats_inc(s_ib_rdma_flush_mr_pool_avoided);
+		}
 
 	if (invalidate) {
 		if (likely(!in_interrupt())) {
@@ -670,6 +683,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
 
 	pool->max_free_pinned = pool->max_items * pool->max_pages / 4;
 	pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
+	pool->flush_ongoing = false;
 
 	return pool;
 }
diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c
index ac46d89..29ae5cb 100644
--- a/net/rds/ib_stats.c
+++ b/net/rds/ib_stats.c
@@ -75,6 +75,7 @@
 	"ib_rdma_mr_1m_pool_flush",
 	"ib_rdma_mr_1m_pool_wait",
 	"ib_rdma_mr_1m_pool_depleted",
+	"ib_rdma_flush_mr_pool_avoided",
 	"ib_rdma_mr_8k_reused",
 	"ib_rdma_mr_1m_reused",
 	"ib_atomic_cswp",
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ