lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1455672680-7153-2-git-send-email-Waiman.Long@hpe.com>
Date:	Tue, 16 Feb 2016 20:31:19 -0500
From:	Waiman Long <Waiman.Long@....com>
To:	Alexander Viro <viro@...iv.linux.org.uk>, Jan Kara <jack@...e.com>,
	Jeff Layton <jlayton@...chiereds.net>,
	"J. Bruce Fields" <bfields@...ldses.org>,
	Tejun Heo <tj@...nel.org>,
	Christoph Lameter <cl@...ux-foundation.org>
Cc:	linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
	Ingo Molnar <mingo@...hat.com>,
	Peter Zijlstra <peterz@...radead.org>,
	Andi Kleen <andi@...stfloor.org>,
	Dave Chinner <dchinner@...hat.com>,
	Scott J Norton <scott.norton@...com>,
	Douglas Hatch <doug.hatch@...com>,
	Waiman Long <Waiman.Long@....com>
Subject: [RFC PATCH 1/2] lib/percpu-list: Per-cpu list with associated per-cpu locks

Linked list is used everywhere in the Linux kernel. However, if many
threads are trying to add or delete entries into the same linked list,
it can create a performance bottleneck.

This patch introduces a new per-cpu list subystem with associated
per-cpu locks for protecting each of the lists individually. This
allows list entries insertion and deletion operations to happen in
parallel instead of being serialized with a global list and lock.

List entry insertion is strictly per cpu. List deletion, however, can
happen in a cpu other than the one that did the insertion. So we still
need lock to protect the list. Because of that, there may still be
a small amount of contention when deletion is being done.

A new header file include/linux/percpu-list.h will be added with the
associated percpu_list structure. The following functions are used
to manage the per-cpu list:

 1. int init_percpu_list_head(struct percpu_list **pclist_handle)
 2. void percpu_list_add(struct percpu_list *new,
			 struct percpu_list *head)
 3. void percpu_list_del(struct percpu_list *entry)

Signed-off-by: Waiman Long <Waiman.Long@....com>
---
 include/linux/percpu-list.h |  117 +++++++++++++++++++++++++++++++++++++++++++
 lib/Makefile                |    2 +-
 lib/percpu-list.c           |   80 +++++++++++++++++++++++++++++
 3 files changed, 198 insertions(+), 1 deletions(-)
 create mode 100644 include/linux/percpu-list.h
 create mode 100644 lib/percpu-list.c

diff --git a/include/linux/percpu-list.h b/include/linux/percpu-list.h
new file mode 100644
index 0000000..94be520
--- /dev/null
+++ b/include/linux/percpu-list.h
@@ -0,0 +1,117 @@
+/*
+ * Per-cpu list
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2016 Hewlett-Packard Enterprise Development LP
+ *
+ * Authors: Waiman Long <waiman.long@....com>
+ */
+#ifndef __LINUX_PERCPU_LIST_H
+#define __LINUX_PERCPU_LIST_H
+
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/percpu.h>
+
+/*
+ * include/linux/percpu-list.h
+ *
+ * A per-cpu list protected by a per-cpu spinlock.
+ *
+ * The list head percpu_list structure contains the spinlock, the other
+ * entries in the list contain the spinlock pointer.
+ */
+struct percpu_list {
+	struct list_head list;
+	union {
+		spinlock_t lock;	/* For list head */
+		spinlock_t *lockptr;	/* For other entries */
+	};
+};
+
+/*
+ * A simplified for_all_percpu_list_entries macro without the next and pchead
+ * parameters.
+ */
+#define for_all_percpu_list_entries_simple(pos, pclock, head, member)	\
+	for_all_percpu_list_entries(pos, next, pchead, pclock, head, member)
+
+#define PERCPU_LIST_HEAD_INIT(name)				\
+	{							\
+		.list.prev = &name.list,			\
+		.list.next = &name.list,			\
+		.list.lock = __SPIN_LOCK_UNLOCKED(name),	\
+	}
+
+#define PERCPU_LIST_ENTRY_INIT(name)		\
+	{					\
+		.list.prev = &name.list,	\
+		.list.next = &name.list,	\
+		.list.lockptr = NULL		\
+	}
+
+static inline void INIT_PERCPU_LIST_HEAD(struct percpu_list *pcpu_list)
+{
+	INIT_LIST_HEAD(&pcpu_list->list);
+	pcpu_list->lock = __SPIN_LOCK_UNLOCKED(&pcpu_list->lock);
+}
+
+static inline void INIT_PERCPU_LIST_ENTRY(struct percpu_list *pcpu_list)
+{
+	INIT_LIST_HEAD(&pcpu_list->list);
+	pcpu_list->lockptr = NULL;
+}
+
+#define PERCPU_LIST_HEAD(name)	struct percpu_list __percpu *name
+
+static inline void free_percpu_list_head(struct percpu_list **pclist_handle)
+{
+	free_percpu(*pclist_handle);
+	*pclist_handle = NULL;
+}
+
+static inline bool percpu_list_empty(struct percpu_list *pcpu_list)
+{
+	int cpu;
+
+	for_each_possible_cpu (cpu)
+		if (!list_empty(&per_cpu_ptr(pcpu_list, cpu)->list))
+			return false;
+	return true;
+}
+
+/**
+ * for_all_percpu_list_entries - iterate over all the per-cpu list with locking
+ * @pos:	the type * to use as a loop cursor for the current .
+ * @next:	an internal type * variable pointing to the next entry
+ * @pchead:	an internal struct list * of percpu list head
+ * @pclock:	an internal variable for the current per-cpu spinlock
+ * @head:	the head of the per-cpu list
+ * @member:	the name of the per-cpu list within the struct
+ */
+#define for_all_percpu_list_entries(pos, next, pchead, pclock, head, member)\
+	{								 \
+	int cpu;							 \
+	for_each_possible_cpu (cpu) {					 \
+		typeof(*pos) *next;					 \
+		spinlock_t *pclock = per_cpu_ptr(&(head)->lock, cpu);	 \
+		struct list_head *pchead = &per_cpu_ptr(head, cpu)->list;\
+		spin_lock(pclock);					 \
+		list_for_each_entry_safe(pos, next, pchead, member.list)
+
+#define end_all_percpu_list_entries(pclock)	spin_unlock(pclock); } }
+
+extern int init_percpu_list_head(struct percpu_list **pclist_handle);
+extern void percpu_list_add(struct percpu_list *new, struct percpu_list *head);
+extern void percpu_list_del(struct percpu_list *entry);
+
+#endif /* __LINUX_PERCPU_LIST_H */
diff --git a/lib/Makefile b/lib/Makefile
index a7c26a4..71a25d4 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -27,7 +27,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
 	 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
 	 bsearch.o find_bit.o llist.o memweight.o kfifo.o \
 	 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
-	 once.o
+	 once.o percpu-list.o
 obj-y += string_helpers.o
 obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
 obj-y += hexdump.o
diff --git a/lib/percpu-list.c b/lib/percpu-list.c
new file mode 100644
index 0000000..e5c04bf
--- /dev/null
+++ b/lib/percpu-list.c
@@ -0,0 +1,80 @@
+/*
+ * Per-cpu list
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2016 Hewlett-Packard Enterprise Development LP
+ *
+ * Authors: Waiman Long <waiman.long@....com>
+ */
+#include <linux/percpu-list.h>
+
+/*
+ * Initialize the per-cpu list
+ */
+int init_percpu_list_head(struct percpu_list **pclist_handle)
+{
+	struct percpu_list *pclist = alloc_percpu(struct percpu_list);
+	int cpu;
+
+	if (!pclist)
+		return -ENOMEM;
+
+	for_each_possible_cpu (cpu)
+		INIT_PERCPU_LIST_HEAD(per_cpu_ptr(pclist, cpu));
+
+	*pclist_handle = pclist;
+	return 0;
+}
+
+/*
+ * List selection is based on the CPU being used when the percpu_list_add()
+ * function is called. However, deletion may be done by a different CPU.
+ * So we still need to use a lock to protect the content of the list.
+ */
+void percpu_list_add(struct percpu_list *new, struct percpu_list *head)
+{
+	spinlock_t *lock;
+
+	/*
+	 * We need to disable preemption before accessing the per-cpu data
+	 * to make sure that the cpu won't be changed because of preemption.
+	 */
+	preempt_disable();
+	lock = this_cpu_ptr(&head->lock);
+	spin_lock(lock);
+	new->lockptr = lock;
+	list_add(&new->list, this_cpu_ptr(&head->list));
+	spin_unlock(lock);
+	preempt_enable();
+}
+
+/*
+ * Delete an entry from a percpu list
+ *
+ * We need to check the lock pointer again after taking the lock to guard
+ * against concurrent delete of the same entry. If the lock pointer changes
+ * or becomes NULL, we assume that the deletion was done elsewhere.
+ */
+void percpu_list_del(struct percpu_list *entry)
+{
+	spinlock_t *lock = READ_ONCE(entry->lockptr);
+
+	if (unlikely(!lock))
+		return;
+
+	spin_lock(lock);
+	if (likely(entry->lockptr && (lock == entry->lockptr))) {
+		list_del_init(&entry->list);
+		entry->lockptr = NULL;
+	}
+	spin_unlock(lock);
+}
-- 
1.7.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ