lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1495624801-8063-9-git-send-email-ldufour@linux.vnet.ibm.com>
Date:   Wed, 24 May 2017 13:19:59 +0200
From:   Laurent Dufour <ldufour@...ux.vnet.ibm.com>
To:     linux-mm@...ck.org
Cc:     Davidlohr Bueso <dave@...olabs.net>, akpm@...ux-foundation.org,
        Jan Kara <jack@...e.cz>,
        "Kirill A . Shutemov" <kirill@...temov.name>,
        Michal Hocko <mhocko@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Mel Gorman <mgorman@...hsingularity.net>,
        Andi Kleen <andi@...stfloor.org>, haren@...ux.vnet.ibm.com,
        aneesh.kumar@...ux.vnet.ibm.com, khandual@...ux.vnet.ibm.com,
        paulmck@...ux.vnet.ibm.com, linux-kernel@...r.kernel.org
Subject: [RFC v2 08/10] mm: Define mem range lock operations

This patch introduce new mm_down/up*() operation which will run on
mmap_sem as a semaphore or as a range lock depending on
CONFIG_MEM_RANGE_LOCK.

When CONFIG_MEM_RANGE_LOCK is defined, the additional range parameter
is used, otherwise it is ignored to avoid any useless additional stack
parameter.

Signed-off-by: Laurent Dufour <ldufour@...ux.vnet.ibm.com>
---
 include/linux/mm.h       | 27 +++++++++++++++++++++++++++
 include/linux/mm_types.h |  5 +++++
 2 files changed, 32 insertions(+)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index b09048386152..d47b28eb0a53 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -15,6 +15,7 @@
 #include <linux/debug_locks.h>
 #include <linux/mm_types.h>
 #include <linux/range.h>
+#include <linux/range_lock.h>
 #include <linux/pfn.h>
 #include <linux/percpu-refcount.h>
 #include <linux/bit_spinlock.h>
@@ -2588,5 +2589,31 @@ void __init setup_nr_node_ids(void);
 static inline void setup_nr_node_ids(void) {}
 #endif
 
+#ifdef CONFIG_MEM_RANGE_LOCK
+#define mm_range_define(r)						\
+	struct range_lock r = __RANGE_LOCK_INITIALIZER(0, RANGE_LOCK_FULL)
+#define mm_read_lock(m, r)	range_read_lock(&(m)->mmap_sem, r)
+#define mm_read_trylock(m, r)	range_read_trylock(&(m)->mmap_sem, r)
+#define mm_read_unlock(m, r)	range_read_unlock(&(m)->mmap_sem, r)
+#define mm_write_lock(m, r)	range_write_lock(&(m)->mmap_sem, r)
+#define mm_write_trylock(m, r)	range_write_trylock(&(m)->mmap_sem, r)
+#define mm_write_unlock(m, r)	range_write_unlock(&(m)->mmap_sem, r)
+#define mm_write_lock_killable(m, r) \
+	range_write_lock_interruptible(&(m)->mmap_sem, r)
+#define mm_downgrade_write(m, r) range_downgrade_write(&(m)->mmap_sem, r)
+
+#else /* CONFIG_MEM_RANGE_LOCK */
+#define mm_range_define(r)	do { } while (0)
+#define mm_read_lock(m, r)	down_read(&(m)->mmap_sem)
+#define mm_read_trylock(m, r)	down_read_trylock(&(m)->mmap_sem)
+#define mm_read_unlock(m, r)	up_read(&(m)->mmap_sem)
+#define mm_write_lock(m, r)	down_write(&(m)->mmap_sem)
+#define mm_write_trylock(m, r)	down_write_trylock(&(m)->mmap_sem)
+#define mm_write_unlock(m, r)	up_write(&(m)->mmap_sem)
+#define mm_write_lock_killable(m, r) down_write_killable(&(m)->mmap_sem)
+#define mm_downgrade_write(m, r) downgrade_write(&(m)->mmap_sem)
+
+#endif /* CONFIG_MEM_RANGE_LOCK */
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 45cdb27791a3..d40611490200 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -8,6 +8,7 @@
 #include <linux/spinlock.h>
 #include <linux/rbtree.h>
 #include <linux/rwsem.h>
+#include <linux/range_lock.h>
 #include <linux/completion.h>
 #include <linux/cpumask.h>
 #include <linux/uprobes.h>
@@ -403,7 +404,11 @@ struct mm_struct {
 	int map_count;				/* number of VMAs */
 
 	spinlock_t page_table_lock;		/* Protects page tables and some counters */
+#ifdef CONFIG_MEM_RANGE_LOCK
+	struct range_lock_tree mmap_sem;
+#else
 	struct rw_semaphore mmap_sem;
+#endif
 
 	struct list_head mmlist;		/* List of maybe swapped mm's.	These are globally strung
 						 * together off init_mm.mmlist, and are protected
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ