lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20150513061853.GA7539@gwshan>
Date:	Wed, 13 May 2015 16:18:53 +1000
From:	Gavin Shan <gwshan@...ux.vnet.ibm.com>
To:	Alexey Kardashevskiy <aik@...abs.ru>
Cc:	linuxppc-dev@...ts.ozlabs.org,
	David Gibson <david@...son.dropbear.id.au>,
	Benjamin Herrenschmidt <benh@...nel.crashing.org>,
	Paul Mackerras <paulus@...ba.org>,
	Alex Williamson <alex.williamson@...hat.com>,
	Gavin Shan <gwshan@...ux.vnet.ibm.com>,
	Wei Yang <weiyang@...ux.vnet.ibm.com>,
	linux-kernel@...r.kernel.org
Subject: Re: [PATCH kernel v10 09/34] vfio: powerpc/spapr: Move locked_vm
 accounting to helpers

On Tue, May 12, 2015 at 01:38:58AM +1000, Alexey Kardashevskiy wrote:
>There moves locked pages accounting to helpers.
>Later they will be reused for Dynamic DMA windows (DDW).
>
>This reworks debug messages to show the current value and the limit.
>
>This stores the locked pages number in the container so when unlocking
>the iommu table pointer won't be needed. This does not have an effect
>now but it will with the multiple tables per container as then we will
>allow attaching/detaching groups on fly and we may end up having
>a container with no group attached but with the counter incremented.
>
>While we are here, update the comment explaining why RLIMIT_MEMLOCK
>might be required to be bigger than the guest RAM. This also prints
>pid of the current process in pr_warn/pr_debug.
>
>Signed-off-by: Alexey Kardashevskiy <aik@...abs.ru>
>[aw: for the vfio related changes]
>Acked-by: Alex Williamson <alex.williamson@...hat.com>
>Reviewed-by: David Gibson <david@...son.dropbear.id.au>

Reviewed-by: Gavin Shan <gwshan@...ux.vnet.ibm.com>

>---
>Changes:
>v4:
>* new helpers do nothing if @npages == 0
>* tce_iommu_disable() now can decrement the counter if the group was
>detached (not possible now but will be in the future)
>---
> drivers/vfio/vfio_iommu_spapr_tce.c | 82 ++++++++++++++++++++++++++++---------
> 1 file changed, 63 insertions(+), 19 deletions(-)
>
>diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
>index 64300cc..40583f9 100644
>--- a/drivers/vfio/vfio_iommu_spapr_tce.c
>+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
>@@ -29,6 +29,51 @@
> static void tce_iommu_detach_group(void *iommu_data,
> 		struct iommu_group *iommu_group);
>
>+static long try_increment_locked_vm(long npages)
>+{
>+	long ret = 0, locked, lock_limit;
>+
>+	if (!current || !current->mm)
>+		return -ESRCH; /* process exited */
>+
>+	if (!npages)
>+		return 0;
>+
>+	down_write(&current->mm->mmap_sem);
>+	locked = current->mm->locked_vm + npages;
>+	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
>+	if (locked > lock_limit && !capable(CAP_IPC_LOCK))
>+		ret = -ENOMEM;
>+	else
>+		current->mm->locked_vm += npages;
>+
>+	pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
>+			npages << PAGE_SHIFT,
>+			current->mm->locked_vm << PAGE_SHIFT,
>+			rlimit(RLIMIT_MEMLOCK),
>+			ret ? " - exceeded" : "");
>+

I'm not sure if current->pid + current->comm can give a bit more
readability or not.

Thanks,
Gavin

>+	up_write(&current->mm->mmap_sem);
>+
>+	return ret;
>+}
>+
>+static void decrement_locked_vm(long npages)
>+{
>+	if (!current || !current->mm || !npages)
>+		return; /* process exited */
>+
>+	down_write(&current->mm->mmap_sem);
>+	if (npages > current->mm->locked_vm)
>+		npages = current->mm->locked_vm;
>+	current->mm->locked_vm -= npages;
>+	pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
>+			npages << PAGE_SHIFT,
>+			current->mm->locked_vm << PAGE_SHIFT,
>+			rlimit(RLIMIT_MEMLOCK));
>+	up_write(&current->mm->mmap_sem);
>+}
>+
> /*
>  * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
>  *
>@@ -45,6 +90,7 @@ struct tce_container {
> 	struct mutex lock;
> 	struct iommu_table *tbl;
> 	bool enabled;
>+	unsigned long locked_pages;
> };
>
> static bool tce_page_is_contained(struct page *page, unsigned page_shift)
>@@ -60,7 +106,7 @@ static bool tce_page_is_contained(struct page *page, unsigned page_shift)
> static int tce_iommu_enable(struct tce_container *container)
> {
> 	int ret = 0;
>-	unsigned long locked, lock_limit, npages;
>+	unsigned long locked;
> 	struct iommu_table *tbl = container->tbl;
>
> 	if (!container->tbl)
>@@ -89,21 +135,22 @@ static int tce_iommu_enable(struct tce_container *container)
> 	 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
> 	 * that would effectively kill the guest at random points, much better
> 	 * enforcing the limit based on the max that the guest can map.
>+	 *
>+	 * Unfortunately at the moment it counts whole tables, no matter how
>+	 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
>+	 * each with 2GB DMA window, 8GB will be counted here. The reason for
>+	 * this is that we cannot tell here the amount of RAM used by the guest
>+	 * as this information is only available from KVM and VFIO is
>+	 * KVM agnostic.
> 	 */
>-	down_write(&current->mm->mmap_sem);
>-	npages = (tbl->it_size << tbl->it_page_shift) >> PAGE_SHIFT;
>-	locked = current->mm->locked_vm + npages;
>-	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
>-	if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
>-		pr_warn("RLIMIT_MEMLOCK (%ld) exceeded\n",
>-				rlimit(RLIMIT_MEMLOCK));
>-		ret = -ENOMEM;
>-	} else {
>+	locked = (tbl->it_size << tbl->it_page_shift) >> PAGE_SHIFT;
>+	ret = try_increment_locked_vm(locked);
>+	if (ret)
>+		return ret;
>
>-		current->mm->locked_vm += npages;
>-		container->enabled = true;
>-	}
>-	up_write(&current->mm->mmap_sem);
>+	container->locked_pages = locked;
>+
>+	container->enabled = true;
>
> 	return ret;
> }
>@@ -115,13 +162,10 @@ static void tce_iommu_disable(struct tce_container *container)
>
> 	container->enabled = false;
>
>-	if (!container->tbl || !current->mm)
>+	if (!current->mm)
> 		return;
>
>-	down_write(&current->mm->mmap_sem);
>-	current->mm->locked_vm -= (container->tbl->it_size <<
>-			container->tbl->it_page_shift) >> PAGE_SHIFT;
>-	up_write(&current->mm->mmap_sem);
>+	decrement_locked_vm(container->locked_pages);
> }
>
> static void *tce_iommu_open(unsigned long arg)
>-- 
>2.4.0.rc3.8.gfb3e7d5
>

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ