[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1422922326.22865.447.camel@redhat.com>
Date: Mon, 02 Feb 2015 17:12:06 -0700
From: Alex Williamson <alex.williamson@...hat.com>
To: Alexey Kardashevskiy <aik@...abs.ru>
Cc: linuxppc-dev@...ts.ozlabs.org,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Michael Ellerman <mpe@...erman.id.au>,
Gavin Shan <gwshan@...ux.vnet.ibm.com>,
Alexander Graf <agraf@...e.de>,
Alexander Gordeev <agordeev@...hat.com>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 05/24] vfio: powerpc/spapr: Move locked_vm accounting
to helpers
On Thu, 2015-01-29 at 20:21 +1100, Alexey Kardashevskiy wrote:
> There moves locked pages accounting to helpers.
> Later they will be reused for Dynamic DMA windows (DDW).
>
> While we are here, update the comment explaining why RLIMIT_MEMLOCK
> might be required to be bigger than the guest RAM. This also prints
> pid of the current process in pr_warn/pr_debug.
>
> Signed-off-by: Alexey Kardashevskiy <aik@...abs.ru>
> ---
> drivers/vfio/vfio_iommu_spapr_tce.c | 72 +++++++++++++++++++++++++++----------
> 1 file changed, 53 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
> index c596053..29d5708 100644
> --- a/drivers/vfio/vfio_iommu_spapr_tce.c
> +++ b/drivers/vfio/vfio_iommu_spapr_tce.c
> @@ -29,6 +29,47 @@
> static void tce_iommu_detach_group(void *iommu_data,
> struct iommu_group *iommu_group);
>
> +#define IOMMU_TABLE_PAGES(tbl) \
> + (((tbl)->it_size << (tbl)->it_page_shift) >> PAGE_SHIFT)
A bit of an infringement on the global namespace with such a generic
name.
> +
> +static long try_increment_locked_vm(long npages)
> +{
> + long ret = 0, locked, lock_limit;
> +
> + if (!current || !current->mm)
> + return -ESRCH; /* process exited */
> +
> + down_write(¤t->mm->mmap_sem);
> + locked = current->mm->locked_vm + npages;
> + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
> + if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
> + pr_warn("[%d] RLIMIT_MEMLOCK (%ld) exceeded\n",
> + current->pid, rlimit(RLIMIT_MEMLOCK));
> + ret = -ENOMEM;
> + } else {
> + current->mm->locked_vm += npages;
> + }
> + pr_debug("[%d] RLIMIT_MEMLOCK+ %ld pages\n", current->pid,
> + current->mm->locked_vm);
> + up_write(¤t->mm->mmap_sem);
> +
> + return ret;
> +}
> +
> +static void decrement_locked_vm(long npages)
> +{
> + if (!current || !current->mm)
> + return; /* process exited */
> +
> + down_write(¤t->mm->mmap_sem);
> + if (npages > current->mm->locked_vm)
> + npages = current->mm->locked_vm;
> + current->mm->locked_vm -= npages;
> + pr_debug("[%d] RLIMIT_MEMLOCK- %ld pages\n", current->pid,
> + current->mm->locked_vm);
> + up_write(¤t->mm->mmap_sem);
> +}
> +
> /*
> * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
> *
> @@ -66,8 +107,6 @@ static bool tce_check_page_size(struct page *page, unsigned page_shift)
> static int tce_iommu_enable(struct tce_container *container)
> {
> int ret = 0;
> - unsigned long locked, lock_limit, npages;
> - struct iommu_table *tbl = container->tbl;
>
> if (!container->tbl)
> return -ENXIO;
> @@ -95,21 +134,19 @@ static int tce_iommu_enable(struct tce_container *container)
> * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
> * that would effectively kill the guest at random points, much better
> * enforcing the limit based on the max that the guest can map.
> + *
> + * Unfortunately at the moment it counts whole tables, no matter how
> + * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
> + * each with 2GB DMA window, 8GB will be counted here. The reason for
> + * this is that we cannot tell here the amount of RAM used by the guest
> + * as this information is only available from KVM and VFIO is
> + * KVM agnostic.
> */
> - down_write(¤t->mm->mmap_sem);
> - npages = (tbl->it_size << tbl->it_page_shift) >> PAGE_SHIFT;
> - locked = current->mm->locked_vm + npages;
> - lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
> - if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
> - pr_warn("RLIMIT_MEMLOCK (%ld) exceeded\n",
> - rlimit(RLIMIT_MEMLOCK));
> - ret = -ENOMEM;
> - } else {
> + ret = try_increment_locked_vm(IOMMU_TABLE_PAGES(container->tbl));
> + if (ret)
> + return ret;
>
> - current->mm->locked_vm += npages;
> - container->enabled = true;
> - }
> - up_write(¤t->mm->mmap_sem);
> + container->enabled = true;
>
> return ret;
> }
> @@ -124,10 +161,7 @@ static void tce_iommu_disable(struct tce_container *container)
> if (!container->tbl || !current->mm)
> return;
>
> - down_write(¤t->mm->mmap_sem);
> - current->mm->locked_vm -= (container->tbl->it_size <<
> - container->tbl->it_page_shift) >> PAGE_SHIFT;
> - up_write(¤t->mm->mmap_sem);
> + decrement_locked_vm(IOMMU_TABLE_PAGES(container->tbl));
> }
>
> static void *tce_iommu_open(unsigned long arg)
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists