lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190402204158.27582-4-daniel.m.jordan@oracle.com>
Date:   Tue,  2 Apr 2019 16:41:55 -0400
From:   Daniel Jordan <daniel.m.jordan@...cle.com>
To:     akpm@...ux-foundation.org
Cc:     daniel.m.jordan@...cle.com, Alexey Kardashevskiy <aik@...abs.ru>,
        Alex Williamson <alex.williamson@...hat.com>,
        Christoph Lameter <cl@...ux.com>,
        Davidlohr Bueso <dave@...olabs.net>, linux-mm@...ck.org,
        kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH 3/6] vfio/spapr_tce: drop mmap_sem now that locked_vm is atomic

With locked_vm now an atomic, there is no need to take mmap_sem as
writer.  Delete and refactor accordingly.

Signed-off-by: Daniel Jordan <daniel.m.jordan@...cle.com>
Cc: Alexey Kardashevskiy <aik@...abs.ru>
Cc: Alex Williamson <alex.williamson@...hat.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Christoph Lameter <cl@...ux.com>
Cc: Davidlohr Bueso <dave@...olabs.net>
Cc: <linux-mm@...ck.org>
Cc: <kvm@...r.kernel.org>
Cc: <linux-kernel@...r.kernel.org>
---
 drivers/vfio/vfio_iommu_spapr_tce.c | 36 ++++++++++++-----------------
 1 file changed, 15 insertions(+), 21 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index e7d787e5d839..7675a3b28410 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -36,8 +36,9 @@ static void tce_iommu_detach_group(void *iommu_data,
 
 static long try_increment_locked_vm(struct mm_struct *mm, long npages)
 {
-	long ret = 0, lock_limit;
+	long ret = 0;
 	s64 locked;
+	unsigned long lock_limit;
 
 	if (WARN_ON_ONCE(!mm))
 		return -EPERM;
@@ -45,39 +46,32 @@ static long try_increment_locked_vm(struct mm_struct *mm, long npages)
 	if (!npages)
 		return 0;
 
-	down_write(&mm->mmap_sem);
-	locked = atomic64_read(&mm->locked_vm) + npages;
+	locked = atomic64_add_return(npages, &mm->locked_vm);
 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-	if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+	if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
 		ret = -ENOMEM;
-	else
-		atomic64_add(npages, &mm->locked_vm);
-
-	pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
-			npages << PAGE_SHIFT,
-			atomic64_read(&mm->locked_vm) << PAGE_SHIFT,
-			rlimit(RLIMIT_MEMLOCK),
-			ret ? " - exceeded" : "");
+		atomic64_sub(npages, &mm->locked_vm);
+	}
 
-	up_write(&mm->mmap_sem);
+	pr_debug("[%d] RLIMIT_MEMLOCK +%ld %lld/%lu%s\n", current->pid,
+			npages << PAGE_SHIFT, locked << PAGE_SHIFT,
+			lock_limit, ret ? " - exceeded" : "");
 
 	return ret;
 }
 
 static void decrement_locked_vm(struct mm_struct *mm, long npages)
 {
+	s64 locked;
+
 	if (!mm || !npages)
 		return;
 
-	down_write(&mm->mmap_sem);
-	if (WARN_ON_ONCE(npages > atomic64_read(&mm->locked_vm)))
-		npages = atomic64_read(&mm->locked_vm);
-	atomic64_sub(npages, &mm->locked_vm);
-	pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
-			npages << PAGE_SHIFT,
-			atomic64_read(&mm->locked_vm) << PAGE_SHIFT,
+	locked = atomic64_sub_return(npages, &mm->locked_vm);
+	WARN_ON_ONCE(locked < 0);
+	pr_debug("[%d] RLIMIT_MEMLOCK -%ld %lld/%lu\n", current->pid,
+			npages << PAGE_SHIFT, locked << PAGE_SHIFT,
 			rlimit(RLIMIT_MEMLOCK));
-	up_write(&mm->mmap_sem);
 }
 
 /*
-- 
2.21.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ