lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Fri, 29 Jan 2016 14:30:44 -0500
From:	Waiman Long <Waiman.Long@....com>
To:	Thomas Gleixner <tglx@...utronix.de>,
	Ingo Molnar <mingo@...hat.com>,
	"H. Peter Anvin" <hpa@...or.com>,
	Alexander Viro <viro@...iv.linux.org.uk>
Cc:	linux-fsdevel@...r.kernel.org, x86@...nel.org,
	linux-kernel@...r.kernel.org,
	Peter Zijlstra <peterz@...radead.org>,
	Andi Kleen <andi@...stfloor.org>,
	Scott J Norton <scott.norton@...com>,
	Douglas Hatch <doug.hatch@...com>,
	Waiman Long <Waiman.Long@....com>
Subject: [PATCH v2 1/3] lib/list_batch: A simple list insertion/deletion batching facility

Linked list insertion or deletion under lock is a very common activity
in the Linux kernel. If this is the only activity under lock, the
locking overhead can be pretty large compared with the actual time
spent on the insertion or deletion operation itself especially on a
large system with many CPUs.

This patch introduces a simple list insertion/deletion batching
facility where a group of list insertion and deletion operations are
grouped together in a single batch under lock. This can reduce the
locking overhead and improve overall system performance.

The fast path of this batching facility will be similar in performance
to the "lock; listop; unlock;" sequence of the existing code. If
the lock is not available, it will enter slowpath where the batching
happens.

A new config option LIST_BATCHING is added so that we can control on
which architecture do we want to have this facility enabled.

Signed-off-by: Waiman Long <Waiman.Long@....com>
---
 include/linux/list_batch.h |  133 ++++++++++++++++++++++++++++++++++++++++++++
 lib/Kconfig                |    7 ++
 lib/Makefile               |    1 +
 lib/list_batch.c           |  125 +++++++++++++++++++++++++++++++++++++++++
 4 files changed, 266 insertions(+), 0 deletions(-)
 create mode 100644 include/linux/list_batch.h
 create mode 100644 lib/list_batch.c

diff --git a/include/linux/list_batch.h b/include/linux/list_batch.h
new file mode 100644
index 0000000..a445a2e
--- /dev/null
+++ b/include/linux/list_batch.h
@@ -0,0 +1,133 @@
+/*
+ * List insertion/deletion batching facility
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2016 Hewlett-Packard Enterprise Development LP
+ *
+ * Authors: Waiman Long <waiman.long@....com>
+ */
+#ifndef __LINUX_LIST_BATCH_H
+#define __LINUX_LIST_BATCH_H
+
+#include <linux/spinlock.h>
+#include <linux/list.h>
+
+/*
+ * include/linux/list_batch.h
+ *
+ * Inserting or deleting an entry from a linked list under a spinlock is a
+ * very common operation in the Linux kernel. If many CPUs are trying to
+ * grab the lock and manipulate the linked list, it can lead to significant
+ * lock contention and slow operation.
+ *
+ * This list operation batching facility is used to batch multiple list
+ * operations under one lock/unlock critical section, thus reducing the
+ * locking and cacheline bouncing overhead and improving overall performance.
+ */
+enum list_batch_cmd {
+	lb_cmd_add,
+	lb_cmd_del,
+	lb_cmd_del_init
+};
+
+enum list_batch_state {
+	lb_state_waiting,	/* Node is waiting */
+	lb_state_batch,		/* Queue head to perform batch processing */
+	lb_state_done		/* Job is done */
+};
+
+struct list_batch_qnode {
+	struct list_batch_qnode	*next;
+	struct list_head	*entry;
+	enum list_batch_cmd	cmd;
+	enum list_batch_state	state;
+};
+
+struct list_batch {
+	struct list_head	*list;
+	struct list_batch_qnode *tail;
+};
+
+#define LIST_BATCH_INIT(_list)	\
+	{			\
+		.list = _list,	\
+		.tail = NULL	\
+	}
+
+static inline void list_batch_init(struct list_batch *batch,
+				   struct list_head *list)
+{
+	batch->list = list;
+	batch->tail = NULL;
+}
+
+static __always_inline void _list_batch_cmd(enum list_batch_cmd cmd,
+					    struct list_head *head,
+					    struct list_head *entry)
+{
+	switch (cmd) {
+	case lb_cmd_add:
+		list_add(entry, head);
+		break;
+
+	case lb_cmd_del:
+		list_del(entry);
+		break;
+
+	case lb_cmd_del_init:
+		list_del_init(entry);
+		break;
+	}
+}
+
+#ifdef CONFIG_LIST_BATCHING
+
+extern void do_list_batch_slowpath(spinlock_t *lock, enum list_batch_cmd cmd,
+				   struct list_batch *batch,
+				   struct list_head *entry);
+
+/*
+ * The caller is expected to pass in a constant cmd parameter. As a
+ * result, most of unneeded code in the switch statement of _list_batch_cmd()
+ * will be optimized away. This should make the fast path almost as fast
+ * as the "lock; listop; unlock;" sequence it replaces.
+ */
+static inline void do_list_batch(spinlock_t *lock, enum list_batch_cmd cmd,
+				   struct list_batch *batch,
+				   struct list_head *entry)
+{
+	/*
+	 * Fast path
+	 */
+	if (likely(spin_trylock(lock))) {
+		_list_batch_cmd(cmd, batch->list, entry);
+		spin_unlock(lock);
+		return;
+	}
+	do_list_batch_slowpath(lock, cmd, batch, entry);
+}
+
+
+#else /* CONFIG_LIST_BATCHING */
+
+static inline void do_list_batch(spinlock_t *lock, enum list_batch_cmd cmd,
+				   struct list_batch *batch,
+				   struct list_head *entry)
+{
+	spin_lock(lock);
+	_list_batch_cmd(cmd, batch->list, entry);
+	spin_unlock(lock);
+}
+
+#endif /* CONFIG_LIST_BATCHING */
+
+#endif /* __LINUX_LIST_BATCH_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index 133ebc0..d75ce19 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -514,6 +514,13 @@ config OID_REGISTRY
 config UCS2_STRING
         tristate
 
+config LIST_BATCHING
+	def_bool y if ARCH_USE_LIST_BATCHING
+	depends on SMP
+
+config ARCH_USE_LIST_BATCHING
+	bool
+
 source "lib/fonts/Kconfig"
 
 config SG_SPLIT
diff --git a/lib/Makefile b/lib/Makefile
index a7c26a4..2791262 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -210,6 +210,7 @@ quiet_cmd_build_OID_registry = GEN     $@
 clean-files	+= oid_registry_data.c
 
 obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
+obj-$(CONFIG_LIST_BATCHING) += list_batch.o
 obj-$(CONFIG_UBSAN) += ubsan.o
 
 UBSAN_SANITIZE_ubsan.o := n
diff --git a/lib/list_batch.c b/lib/list_batch.c
new file mode 100644
index 0000000..174f4ba
--- /dev/null
+++ b/lib/list_batch.c
@@ -0,0 +1,125 @@
+/*
+ * List insertion/deletion batching facility
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2016 Hewlett-Packard Enterprise Development LP
+ *
+ * Authors: Waiman Long <waiman.long@....com>
+ */
+#include <linux/list_batch.h>
+
+/*
+ * List processing batch size = 128
+ *
+ * The batch size shouldn't be too large. Otherwise, it will be too unfair
+ * to the task doing the batch processing. It shouldn't be too small neither
+ * as the performance benefit will be reduced.
+ */
+#define LB_BATCH_SIZE	(1 << 7)
+
+/*
+ * Inserting or deleting an entry from a linked list under a spinlock is a
+ * very common operation in the Linux kernel. If many CPUs are trying to
+ * grab the lock and manipulate the linked list, it can lead to significant
+ * lock contention and slow operation.
+ *
+ * This list operation batching facility is used to batch multiple list
+ * operations under one lock/unlock critical section, thus reducing the
+ * locking overhead and improving overall performance.
+ */
+void do_list_batch_slowpath(spinlock_t *lock, enum list_batch_cmd cmd,
+			    struct list_batch *batch, struct list_head *entry)
+{
+	struct list_batch_qnode node, *prev, *next, *nptr;
+	int loop;
+
+	/*
+	 * Put itself into the list_batch queue
+	 */
+	node.next  = NULL;
+	node.entry = entry;
+	node.cmd   = cmd;
+	node.state = lb_state_waiting;
+
+	/*
+	 * We rely on the implictit memory barrier of xchg() to make sure
+	 * that node initialization will be done before its content is being
+	 * accessed by other CPUs.
+	 */
+	prev = xchg(&batch->tail, &node);
+
+	if (prev) {
+		WRITE_ONCE(prev->next, &node);
+		while (READ_ONCE(node.state) == lb_state_waiting)
+			cpu_relax();
+		if (node.state == lb_state_done)
+			return;
+		WARN_ON(node.state != lb_state_batch);
+	}
+
+	/*
+	 * We are now the queue head, we should acquire the lock and
+	 * process a batch of qnodes.
+	 */
+	loop = LB_BATCH_SIZE;
+	next = &node;
+	spin_lock(lock);
+
+do_list_again:
+	do {
+		nptr = next;
+		_list_batch_cmd(nptr->cmd, batch->list, nptr->entry);
+		next = READ_ONCE(nptr->next);
+		/*
+		 * As soon as the state is marked lb_state_done, we
+		 * can no longer assume the content of *nptr as valid.
+		 * So we have to hold off marking it done until we no
+		 * longer need its content.
+		 *
+		 * The release barrier here is to make sure that we
+		 * won't access its content after marking it done.
+		 */
+		if (next)
+			smp_store_release(&nptr->state, lb_state_done);
+	} while (--loop && next);
+	if (!next) {
+		/*
+		 * The queue tail should equal to nptr, so clear it to
+		 * mark the queue as empty.
+		 */
+		if (cmpxchg_relaxed(&batch->tail, nptr, NULL) != nptr) {
+			/*
+			 * Queue not empty, wait until the next pointer is
+			 * initialized.
+			 */
+			while (!(next = READ_ONCE(nptr->next)))
+				cpu_relax();
+		}
+		/*
+		 * The release barrier is required to make sure that
+		 * setting the done state is the last operation.
+		 */
+		smp_store_release(&nptr->state, lb_state_done);
+	}
+	if (next) {
+		if (loop)
+			goto do_list_again;	/* More qnodes to process */
+		/*
+		 * Mark the next qnode as head to process the next batch
+		 * of qnodes. The new queue head cannot proceed until we
+		 * release the lock.
+		 */
+		WRITE_ONCE(next->state, lb_state_batch);
+	}
+	spin_unlock(lock);
+}
+EXPORT_SYMBOL_GPL(do_list_batch_slowpath);
-- 
1.7.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ