lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1424443195-18676-5-git-send-email-daniel.wagner@bmw-carit.de>
Date:	Fri, 20 Feb 2015 15:39:54 +0100
From:	Daniel Wagner <daniel.wagner@...-carit.de>
To:	Jeff Layton <jlayton@...chiereds.net>
Cc:	linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
	John Kacur <jkacur@...hat.com>,
	Daniel Wagner <daniel.wagner@...-carit.de>,
	"J. Bruce Fields" <bfields@...ldses.org>,
	Alexander Viro <viro@...iv.linux.org.uk>
Subject: [RFC v1 4/5] locks: Use percpu spinlocks to protect file_lock_list

Replace the lglock with percpu spinlocks. That allows us to iterate in
the seqfile ops without taking all underlyng spinlocks with the
lg_global_lock().

Signed-off-by: Daniel Wagner <daniel.wagner@...-carit.de>
Cc: Jeff Layton <jlayton@...chiereds.net>
Cc: "J. Bruce Fields" <bfields@...ldses.org>
Cc: Alexander Viro <viro@...iv.linux.org.uk>
---
 fs/locks.c | 30 ++++++++++++++++--------------
 1 file changed, 16 insertions(+), 14 deletions(-)

diff --git a/fs/locks.c b/fs/locks.c
index 142e4fd..20ed00a 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -128,7 +128,6 @@
 #include <linux/pid_namespace.h>
 #include <linux/hashtable.h>
 #include <linux/percpu.h>
-#include <linux/lglock.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/filelock.h>
@@ -160,10 +159,10 @@ int lease_break_time = 45;
 /*
  * The global file_lock_list is only used for displaying /proc/locks, so we
  * keep a list on each CPU, with each list protected by its own spinlock via
- * the file_lock_lglock. Note that alterations to the list also require that
+ * the file_lock_lock. Note that alterations to the list also require that
  * the relevant flc_lock is held.
  */
-DEFINE_STATIC_LGLOCK(file_lock_lglock);
+static DEFINE_PER_CPU(spinlock_t, file_lock_lock);
 static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
 
 /*
@@ -561,10 +560,10 @@ static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
 /* Must be called with the flc_lock held! */
 static void locks_insert_global_locks(struct file_lock *fl)
 {
-	lg_local_lock(&file_lock_lglock);
+	spin_lock(this_cpu_ptr(&file_lock_lock));
 	fl->fl_link_cpu = smp_processor_id();
 	hlist_add_head(&fl->fl_link, this_cpu_ptr(&file_lock_list));
-	lg_local_unlock(&file_lock_lglock);
+	spin_unlock(this_cpu_ptr(&file_lock_lock));
 }
 
 /* Must be called with the flc_lock held! */
@@ -577,9 +576,9 @@ static void locks_delete_global_locks(struct file_lock *fl)
 	 */
 	if (hlist_unhashed(&fl->fl_link))
 		return;
-	lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu);
+	spin_lock(per_cpu_ptr(&file_lock_lock, fl->fl_link_cpu));
 	hlist_del_init(&fl->fl_link);
-	lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu);
+	spin_unlock(per_cpu_ptr(&file_lock_lock, fl->fl_link_cpu));
 }
 
 static unsigned long
@@ -2628,9 +2627,9 @@ static void *locks_start(struct seq_file *f, loff_t *pos)
 	struct locks_iterator *iter = f->private;
 
 	iter->li_pos = *pos + 1;
-	lg_global_lock(&file_lock_lglock);
 	spin_lock(&blocked_lock_lock);
-	return seq_hlist_start_percpu(&file_lock_list, &iter->li_cpu, *pos);
+	return seq_hlist_start_percpu_locked(&file_lock_list, &file_lock_lock,
+					&iter->li_cpu, *pos);
 }
 
 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
@@ -2638,14 +2637,17 @@ static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
 	struct locks_iterator *iter = f->private;
 
 	++iter->li_pos;
-	return seq_hlist_next_percpu(v, &file_lock_list, &iter->li_cpu, pos);
+	return seq_hlist_next_percpu_locked(v, &file_lock_list, &file_lock_lock,
+				&iter->li_cpu, pos);
 }
 
 static void locks_stop(struct seq_file *f, void *v)
 	__releases(&blocked_lock_lock)
 {
+	struct locks_iterator *iter = f->private;
+
+	seq_hlist_stop_percpu_locked(v, &file_lock_lock, &iter->li_cpu);
 	spin_unlock(&blocked_lock_lock);
-	lg_global_unlock(&file_lock_lglock);
 }
 
 static const struct seq_operations locks_seq_operations = {
@@ -2686,10 +2688,10 @@ static int __init filelock_init(void)
 	filelock_cache = kmem_cache_create("file_lock_cache",
 			sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
 
-	lg_lock_init(&file_lock_lglock, "file_lock_lglock");
-
-	for_each_possible_cpu(i)
+	for_each_possible_cpu(i) {
 		INIT_HLIST_HEAD(per_cpu_ptr(&file_lock_list, i));
+		spin_lock_init(per_cpu_ptr(&file_lock_lock, i));
+	}
 
 	return 0;
 }
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ