lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 10 Mar 2017 10:08:09 +0100
From:   Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To:     linux-kernel@...r.kernel.org
Cc:     Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        stable@...r.kernel.org, Mike Snitzer <snitzer@...hat.com>
Subject: [PATCH 4.9 056/153] dm round robin: revert "use percpu repeat_count and current_path"

4.9-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Mike Snitzer <snitzer@...hat.com>

commit 37a098e9d10db6e2efc05fe61e3a6ff2e9802c53 upstream.

The sloppy nature of lockless access to percpu pointers
(s->current_path) in rr_select_path(), from multiple threads, is
causing some paths to used more than others -- which results in less
IO performance being observed.

Revert these upstream commits to restore truly symmetric round-robin
IO submission in DM multipath:

b0b477c dm round robin: use percpu 'repeat_count' and 'current_path'
802934b dm round robin: do not use this_cpu_ptr() without having preemption disabled

There is no benefit to all this complexity if repeat_count = 1 (which is
the recommended default).

Signed-off-by: Mike Snitzer <snitzer@...hat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>

---
 drivers/md/dm-round-robin.c |   67 +++++++++-----------------------------------
 1 file changed, 14 insertions(+), 53 deletions(-)

--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -17,8 +17,8 @@
 #include <linux/module.h>
 
 #define DM_MSG_PREFIX "multipath round-robin"
-#define RR_MIN_IO     1000
-#define RR_VERSION    "1.1.0"
+#define RR_MIN_IO     1
+#define RR_VERSION    "1.2.0"
 
 /*-----------------------------------------------------------------
  * Path-handling code, paths are held in lists
@@ -47,44 +47,19 @@ struct selector {
 	struct list_head valid_paths;
 	struct list_head invalid_paths;
 	spinlock_t lock;
-	struct dm_path * __percpu *current_path;
-	struct percpu_counter repeat_count;
 };
 
-static void set_percpu_current_path(struct selector *s, struct dm_path *path)
-{
-	int cpu;
-
-	for_each_possible_cpu(cpu)
-		*per_cpu_ptr(s->current_path, cpu) = path;
-}
-
 static struct selector *alloc_selector(void)
 {
 	struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
 
-	if (!s)
-		return NULL;
-
-	INIT_LIST_HEAD(&s->valid_paths);
-	INIT_LIST_HEAD(&s->invalid_paths);
-	spin_lock_init(&s->lock);
-
-	s->current_path = alloc_percpu(struct dm_path *);
-	if (!s->current_path)
-		goto out_current_path;
-	set_percpu_current_path(s, NULL);
-
-	if (percpu_counter_init(&s->repeat_count, 0, GFP_KERNEL))
-		goto out_repeat_count;
+	if (s) {
+		INIT_LIST_HEAD(&s->valid_paths);
+		INIT_LIST_HEAD(&s->invalid_paths);
+		spin_lock_init(&s->lock);
+	}
 
 	return s;
-
-out_repeat_count:
-	free_percpu(s->current_path);
-out_current_path:
-	kfree(s);
-	return NULL;;
 }
 
 static int rr_create(struct path_selector *ps, unsigned argc, char **argv)
@@ -105,8 +80,6 @@ static void rr_destroy(struct path_selec
 
 	free_paths(&s->valid_paths);
 	free_paths(&s->invalid_paths);
-	free_percpu(s->current_path);
-	percpu_counter_destroy(&s->repeat_count);
 	kfree(s);
 	ps->context = NULL;
 }
@@ -157,6 +130,11 @@ static int rr_add_path(struct path_selec
 		return -EINVAL;
 	}
 
+	if (repeat_count > 1) {
+		DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead");
+		repeat_count = 1;
+	}
+
 	/* allocate the path */
 	pi = kmalloc(sizeof(*pi), GFP_KERNEL);
 	if (!pi) {
@@ -183,9 +161,6 @@ static void rr_fail_path(struct path_sel
 	struct path_info *pi = p->pscontext;
 
 	spin_lock_irqsave(&s->lock, flags);
-	if (p == *this_cpu_ptr(s->current_path))
-		set_percpu_current_path(s, NULL);
-
 	list_move(&pi->list, &s->invalid_paths);
 	spin_unlock_irqrestore(&s->lock, flags);
 }
@@ -208,29 +183,15 @@ static struct dm_path *rr_select_path(st
 	unsigned long flags;
 	struct selector *s = ps->context;
 	struct path_info *pi = NULL;
-	struct dm_path *current_path = NULL;
 
-	local_irq_save(flags);
-	current_path = *this_cpu_ptr(s->current_path);
-	if (current_path) {
-		percpu_counter_dec(&s->repeat_count);
-		if (percpu_counter_read_positive(&s->repeat_count) > 0) {
-			local_irq_restore(flags);
-			return current_path;
-		}
-	}
-
-	spin_lock(&s->lock);
+	spin_lock_irqsave(&s->lock, flags);
 	if (!list_empty(&s->valid_paths)) {
 		pi = list_entry(s->valid_paths.next, struct path_info, list);
 		list_move_tail(&pi->list, &s->valid_paths);
-		percpu_counter_set(&s->repeat_count, pi->repeat_count);
-		set_percpu_current_path(s, pi->path);
-		current_path = pi->path;
 	}
 	spin_unlock_irqrestore(&s->lock, flags);
 
-	return current_path;
+	return pi ? pi->path : NULL;
 }
 
 static struct path_selector_type rr_ps = {


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ