lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 19 Oct 2020 14:48:07 -0700
From:   Manjunath Patil <manjunath.b.patil@...cle.com>
To:     santosh.shilimkar@...cle.com
Cc:     netdev@...r.kernel.org, linux-rdma@...r.kernel.org,
        rama.nichanamatlu@...cle.com, manjunath.b.patil@...cle.com
Subject: [PATCH 1/2] rds: track memory region (MR) usage in kernel

Excessive MR utilization by certain RDS applications can starve other
RDS applications from getting MRs. Therefore tracking MR usage by RDS
applications is beneficial.

The collected data is intended to be exported to userspace using
rds-info interface.

Signed-off-by: Manjunath Patil <manjunath.b.patil@...cle.com>
Reviewed-by: Alan Maguire <alan.maguire@...cle.com>
---
 net/rds/af_rds.c |  4 ++++
 net/rds/rdma.c   | 29 ++++++++++++++++++++++-------
 net/rds/rds.h    |  6 ++++++
 3 files changed, 32 insertions(+), 7 deletions(-)

diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 1a5bf3fa4578..e291095e5224 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -688,6 +688,10 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
 	rs->rs_rx_traces = 0;
 	rs->rs_tos = 0;
 	rs->rs_conn = NULL;
+	rs->rs_pid = current->pid;
+	get_task_comm(rs->rs_comm, current);
+	atomic64_set(&rs->rs_mr_gets, 0);
+	atomic64_set(&rs->rs_mr_puts, 0);
 
 	spin_lock_bh(&rds_sock_lock);
 	list_add_tail(&rs->rs_item, &rds_sock_list);
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 585e6b3b69ce..a1ae7b5ea3b2 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -44,6 +44,23 @@
  *  - an rdma is an mlock, apply rlimit?
  */
 
+static inline void mr_stats_update_gets(struct rds_sock *rs)
+{
+	atomic64_inc(&rs->rs_mr_gets);
+}
+
+static inline void mr_stats_update_puts(struct rds_sock *rs)
+{
+	atomic64_inc(&rs->rs_mr_puts);
+}
+
+static inline void rds_rb_erase(struct rds_sock *rs, struct rds_mr *mr)
+{
+	rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
+	RB_CLEAR_NODE(&mr->r_rb_node);
+	mr_stats_update_puts(rs);
+}
+
 /*
  * get the number of pages by looking at the page indices that the start and
  * end addresses fall in.
@@ -106,7 +123,7 @@ static void rds_destroy_mr(struct rds_mr *mr)
 
 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 	if (!RB_EMPTY_NODE(&mr->r_rb_node))
-		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
+		rds_rb_erase(rs, mr);
 	trans_private = mr->r_trans_private;
 	mr->r_trans_private = NULL;
 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
@@ -137,8 +154,7 @@ void rds_rdma_drop_keys(struct rds_sock *rs)
 		mr = rb_entry(node, struct rds_mr, r_rb_node);
 		if (mr->r_trans == rs->rs_transport)
 			mr->r_invalidate = 0;
-		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
-		RB_CLEAR_NODE(&mr->r_rb_node);
+		rds_rb_erase(rs, mr);
 		spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 		rds_destroy_mr(mr);
 		rds_mr_put(mr);
@@ -337,6 +353,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
 	 * reference count. */
 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 	found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
+	mr_stats_update_gets(rs);
 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 
 	BUG_ON(found && found != mr);
@@ -424,8 +441,7 @@ int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
 	if (mr) {
-		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
-		RB_CLEAR_NODE(&mr->r_rb_node);
+		rds_rb_erase(rs, mr);
 		if (args.flags & RDS_RDMA_INVALIDATE)
 			mr->r_invalidate = 1;
 	}
@@ -465,8 +481,7 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
 	}
 
 	if (mr->r_use_once || force) {
-		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
-		RB_CLEAR_NODE(&mr->r_rb_node);
+		rds_rb_erase(rs, mr);
 		zot_me = 1;
 	}
 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
diff --git a/net/rds/rds.h b/net/rds/rds.h
index e4a603523083..5e61868e1799 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -654,6 +654,12 @@ struct rds_sock {
 	spinlock_t		rs_rdma_lock;
 	struct rb_root		rs_rdma_keys;
 
+	/* per rds_sock MR stats */
+	pid_t                   rs_pid;
+	char                    rs_comm[TASK_COMM_LEN];
+	atomic64_t              rs_mr_gets;
+	atomic64_t              rs_mr_puts;
+
 	/* Socket options - in case there will be more */
 	unsigned char		rs_recverr,
 				rs_cong_monitor;
-- 
2.27.0.112.g101b320

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ