lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1456866003-32441-6-git-send-email-Waiman.Long@hpe.com>
Date:	Tue,  1 Mar 2016 16:00:03 -0500
From:	Waiman Long <Waiman.Long@....com>
To:	Alexander Viro <viro@...iv.linux.org.uk>, Jan Kara <jack@...e.com>,
	Jeff Layton <jlayton@...chiereds.net>,
	"J. Bruce Fields" <bfields@...ldses.org>,
	Tejun Heo <tj@...nel.org>,
	Christoph Lameter <cl@...ux-foundation.org>
Cc:	linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
	Ingo Molnar <mingo@...hat.com>,
	Peter Zijlstra <peterz@...radead.org>,
	Andi Kleen <andi@...stfloor.org>,
	Dave Chinner <dchinner@...hat.com>,
	Boqun Feng <boqun.feng@...il.com>,
	Scott J Norton <scott.norton@...com>,
	Douglas Hatch <doug.hatch@...com>,
	Waiman Long <Waiman.Long@....com>
Subject: [PATCH v5 5/5] lib/percpu-list: Add a config parameter for disabling per-cpu list

As there is concern that the larger pcpu_list_node structure and the
per-cpu overhead may be a waste of resource on small system. This patch
adds a config parameter CONFIG_PERCPU_LIST to disable the per-cpu list
if the kernel builder chooses to do so. With per-cpu list disabled,
all the different groups of per-cpu lists will be degenerated into
global lists for all the CPUs.

The current default is to enable per-cpu list. A kernel builder needs
to explicitly turn it off.

Signed-off-by: Waiman Long <Waiman.Long@....com>
---
 fs/inode.c                  |    2 +-
 include/linux/percpu-list.h |   93 ++++++++++++++++++++++++++++++++++++++++++-
 lib/Kconfig                 |   14 ++++++
 lib/percpu-list.c           |   24 +++++++++++-
 4 files changed, 130 insertions(+), 3 deletions(-)

diff --git a/fs/inode.c b/fs/inode.c
index 58d1a13..23e544b 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -437,7 +437,7 @@ EXPORT_SYMBOL_GPL(inode_sb_list_add);
 static inline void inode_sb_list_del(struct inode *inode)
 {
 	if (!list_empty(&inode->i_sb_list.list))
-		pcpu_list_del(&inode->i_sb_list);
+		pcpu_list_del(&inode->i_sb_list, inode->i_sb->s_inodes);
 }
 
 static unsigned long hash(struct super_block *sb, unsigned long hashval)
diff --git a/include/linux/percpu-list.h b/include/linux/percpu-list.h
index fc6207f..12d75ad 100644
--- a/include/linux/percpu-list.h
+++ b/include/linux/percpu-list.h
@@ -80,6 +80,7 @@ static inline void init_pcpu_list_state(struct pcpu_list_state *state)
  */
 #define pcpu_list_next_entry(pos, member) list_next_entry(pos, member.list)
 
+#ifdef CONFIG_PERCPU_LIST
 /*
  * Per-cpu node data structure
  */
@@ -212,7 +213,97 @@ static inline bool pcpu_list_iterate_safe(struct pcpu_list_head *head,
 
 extern void pcpu_list_add(struct pcpu_list_node *node,
 			  struct pcpu_list_head *head);
-extern void pcpu_list_del(struct pcpu_list_node *node);
+extern void pcpu_list_del(struct pcpu_list_node *node,
+			  struct pcpu_list_head *head);
+
+#else /* CONFIG_PERCPU_LIST */
+
+#include <linux/slab.h>
+
+/*
+ * The per-cpu lists will now be degenerated into a single global list
+ */
+struct pcpu_list_node {
+	struct list_head list;
+};
+
+static inline void init_pcpu_list_node(struct pcpu_list_node *node)
+{
+	INIT_LIST_HEAD(&node->list);
+}
+
+static inline void free_pcpu_list_head(struct pcpu_list_head **phead)
+{
+	kfree(*phead);
+	*phead = NULL;
+}
+
+static inline bool pcpu_list_empty(struct pcpu_list_head *head)
+{
+	return list_empty(&head->list);
+}
+
+static inline void
+pcpu_list_add(struct pcpu_list_node *node, struct pcpu_list_head *head)
+{
+	spin_lock(&head->lock);
+	list_add(&node->list, &head->list);
+	spin_unlock(&head->lock);
+}
+
+static inline void
+pcpu_list_del(struct pcpu_list_node *node, struct pcpu_list_head *head)
+{
+	spin_lock(&head->lock);
+	list_del_init(&node->list);
+	spin_unlock(&head->lock);
+}
+
+static inline bool pcpu_list_iterate(struct pcpu_list_head *head,
+				     struct pcpu_list_state *state)
+{
+	/*
+	 * Find next entry
+	 */
+	if (state->curr) {
+		state->curr = list_next_entry(state->curr, list);
+	} else {
+		spin_lock(&head->lock);
+		state->lock = &head->lock;
+		state->curr = list_entry(head->list.next,
+					 struct pcpu_list_node, list);
+	}
+	if (&state->curr->list == &head->list) {
+		spin_unlock(&head->lock);
+		return false;	/* The list has been exhausted */
+	}
+	return true;	/* Continue the iteration */
+}
+
+static inline bool pcpu_list_iterate_safe(struct pcpu_list_head *head,
+					  struct pcpu_list_state *state)
+{
+	/*
+	 * Find next entry
+	 */
+	if (state->curr) {
+		state->curr = state->next;
+		state->next = list_next_entry(state->next, list);
+	} else {
+		spin_lock(&head->lock);
+		state->lock = &head->lock;
+		state->curr = list_entry(head->list.next,
+					 struct pcpu_list_node, list);
+		state->next = list_next_entry(state->curr, list);
+	}
+	if (&state->curr->list == &head->list) {
+		spin_unlock(&head->lock);
+		return false;	/* The list has been exhausted */
+	}
+	return true;	/* Continue the iteration */
+}
+#endif /* CONFIG_PERCPU_LIST */
+
 extern int  init_pcpu_list_head(struct pcpu_list_head **ppcpu_head);
 
 #endif /* __LINUX_PERCPU_LIST_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index 133ebc0..cdadc7e 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -536,4 +536,18 @@ config ARCH_HAS_PMEM_API
 config ARCH_HAS_MMIO_FLUSH
 	bool
 
+#
+# Per-cpu list
+#
+config PERCPU_LIST
+	bool "Enable the use of per-cpu lists"
+	default y
+	depends on SMP
+	help
+	  Enables the use of per-cpu list to allow parallel insertion
+	  and deletion of list entry at the expense of a bit more
+	  overhead in list iteration as well as larger list node entry.
+	  This can help improve performance on system with a lot of
+	  cpu cores.
+
 endmenu
diff --git a/lib/percpu-list.c b/lib/percpu-list.c
index 5003bbb..46a91aa 100644
--- a/lib/percpu-list.c
+++ b/lib/percpu-list.c
@@ -24,6 +24,7 @@
  */
 static struct lock_class_key percpu_list_key;
 
+#ifdef CONFIG_PERCPU_LIST
 /*
  * Initialize the per-cpu list head
  */
@@ -76,7 +77,7 @@ void pcpu_list_add(struct pcpu_list_node *node, struct pcpu_list_head *head)
  * (becomes NULL or to a different one), we assume that the deletion was done
  * elsewhere.
  */
-void pcpu_list_del(struct pcpu_list_node *node)
+void pcpu_list_del(struct pcpu_list_node *node, struct pcpu_list_head *unused)
 {
 	spinlock_t *lock = READ_ONCE(node->lockptr);
 
@@ -98,3 +99,24 @@ void pcpu_list_del(struct pcpu_list_node *node)
 	}
 	spin_unlock(lock);
 }
+
+#else /* CONFIG_PERCPU_LIST */
+/*
+ * Initialize the per-cpu list head
+ */
+int init_pcpu_list_head(struct pcpu_list_head **phead)
+{
+	struct pcpu_list_head *head = kmalloc(sizeof(struct pcpu_list_head),
+					      GFP_KERNEL);
+
+	if (!head)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&head->list);
+	head->lock = __SPIN_LOCK_UNLOCKED(&head->lock);
+	lockdep_set_class(&head->lock, &percpu_list_key);
+
+	*phead = head;
+	return 0;
+}
+#endif /* CONFIG_PERCPU_LIST */
-- 
1.7.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ