[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200918163724.2511-4-hch@lst.de>
Date: Fri, 18 Sep 2020 18:37:21 +0200
From: Christoph Hellwig <hch@....de>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
Juergen Gross <jgross@...e.com>,
Stefano Stabellini <sstabellini@...nel.org>,
Jani Nikula <jani.nikula@...ux.intel.com>,
Joonas Lahtinen <joonas.lahtinen@...ux.intel.com>,
Rodrigo Vivi <rodrigo.vivi@...el.com>,
Minchan Kim <minchan@...nel.org>,
Nitin Gupta <ngupta@...are.org>, x86@...nel.org,
xen-devel@...ts.xenproject.org, linux-kernel@...r.kernel.org,
intel-gfx@...ts.freedesktop.org, dri-devel@...ts.freedesktop.org,
linux-mm@...ck.org
Subject: [PATCH 3/6] drm/i915: use vmap in shmem_pin_map
shmem_pin_map somewhat awkwardly reimplements vmap using
alloc_vm_area and manual pte setup. The only practical difference
is that alloc_vm_area prefeaults the vmalloc area PTEs, which doesn't
seem to be required here (and could be added to vmap using a flag
if actually required).
Signed-off-by: Christoph Hellwig <hch@....de>
---
drivers/gpu/drm/i915/gt/shmem_utils.c | 90 +++++++++++----------------
1 file changed, 38 insertions(+), 52 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 43c7acbdc79dea..77410091597f19 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -49,80 +49,66 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
return file;
}
-static size_t shmem_npte(struct file *file)
+static size_t shmem_npages(struct file *file)
{
return file->f_mapping->host->i_size >> PAGE_SHIFT;
}
-static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
-{
- unsigned long pfn;
-
- vunmap(ptr);
-
- for (pfn = 0; pfn < n_pte; pfn++) {
- struct page *page;
-
- page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
- GFP_KERNEL);
- if (!WARN_ON(IS_ERR(page))) {
- put_page(page);
- put_page(page);
- }
- }
-}
-
void *shmem_pin_map(struct file *file)
{
- const size_t n_pte = shmem_npte(file);
- pte_t *stack[32], **ptes, **mem;
- struct vm_struct *area;
- unsigned long pfn;
-
- mem = stack;
- if (n_pte > ARRAY_SIZE(stack)) {
- mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
- if (!mem)
+ const size_t n_pages = shmem_npages(file);
+ struct page **pages, *stack[32];
+ void *vaddr;
+ long i;
+
+ pages = stack;
+ if (n_pages > ARRAY_SIZE(stack)) {
+ pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
return NULL;
}
- area = alloc_vm_area(n_pte << PAGE_SHIFT, mem);
- if (!area) {
- if (mem != stack)
- kvfree(mem);
- return NULL;
- }
-
- ptes = mem;
- for (pfn = 0; pfn < n_pte; pfn++) {
- struct page *page;
-
- page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
- GFP_KERNEL);
- if (IS_ERR(page))
+ for (i = 0; i < n_pages; i++) {
+ pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i,
+ GFP_KERNEL);
+ if (IS_ERR(pages[i]))
goto err_page;
-
- **ptes++ = mk_pte(page, PAGE_KERNEL);
}
- if (mem != stack)
- kvfree(mem);
+ vaddr = vmap(pages, n_pages, 0, PAGE_KERNEL);
+ if (!vaddr)
+ goto err_page;
+ if (pages != stack)
+ kvfree(pages);
mapping_set_unevictable(file->f_mapping);
- return area->addr;
+ return vaddr;
err_page:
- if (mem != stack)
- kvfree(mem);
-
- __shmem_unpin_map(file, area->addr, pfn);
+ while (--i >= 0)
+ put_page(pages[i]);
+ if (pages != stack)
+ kvfree(pages);
return NULL;
}
void shmem_unpin_map(struct file *file, void *ptr)
{
+ long i = shmem_npages(file);
+
mapping_clear_unevictable(file->f_mapping);
- __shmem_unpin_map(file, ptr, shmem_npte(file));
+ vunmap(ptr);
+
+ for (i = 0; i < shmem_npages(file); i++) {
+ struct page *page;
+
+ page = shmem_read_mapping_page_gfp(file->f_mapping, i,
+ GFP_KERNEL);
+ if (!WARN_ON(IS_ERR(page))) {
+ put_page(page);
+ put_page(page);
+ }
+ }
}
static int __shmem_rw(struct file *file, loff_t off,
--
2.28.0
Powered by blists - more mailing lists