[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230620180148.GA419134@sumitra.com>
Date: Tue, 20 Jun 2023 11:01:48 -0700
From: Sumitra Sharma <sumitraartsy@...il.com>
To: Patrik Jakobsson <patrik.r.jakobsson@...il.com>,
David Airlie <airlied@...il.com>,
Daniel Vetter <daniel@...ll.ch>,
dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org
Cc: Ira Weiny <ira.weiny@...el.com>, Fabio <fmdefrancesco@...il.com>,
Deepak R Varma <drv@...lo.com>,
Sumitra Sharma <sumitraartsy@...il.com>
Subject: [PATCH] drm/gma500: Replace kmap{,_atomic}() with page_address()
Remove unnecessary calls to kmap{,_atomic}() when acquiring
pages using GFP_DMA32.
The GFP_DMA32 uses the DMA32 zone to satisfy the allocation
requests. Therefore, pages allocated with GFP_DMA32 cannot
come from Highmem.
Avoid using calls to kmap_local_page() / kunmap_local() and
kmap() / kunmap() in the psb_mmu_alloc_pd function. Instead,
utilize page_address().
Remove the usage of kmap_atomic() / kunmap_atomic() in the
psb_mmu_alloc_pt function. Use page_address() instead.
Substitute kmap_atomic(pt->p) / kunmap_atomic(pt->v) calls
in the psb_mmu_pt_alloc_map_lock() and psb_mmu_pt_unmap_unlock()
functions with page_address(pt->p). This is possible as
pt = psb_mmu_alloc_pt(pd) allocates a page using
pt->p = alloc_page(GFP_DMA32).
Suggested-by: Ira Weiny <ira.weiny@...el.com>
Signed-off-by: Sumitra Sharma <sumitraartsy@...il.com>
---
drivers/gpu/drm/gma500/mmu.c | 17 +++++------------
1 file changed, 5 insertions(+), 12 deletions(-)
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index a70b01ccdf70..59aa5661e56a 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -184,20 +184,15 @@ struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
pd->invalid_pte = 0;
}
- v = kmap_local_page(pd->dummy_pt);
+ v = page_address(pd->dummy_pt);
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
v[i] = pd->invalid_pte;
- kunmap_local(v);
-
- v = kmap_local_page(pd->p);
+ v = page_address(pd->p);
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
v[i] = pd->invalid_pde;
- kunmap_local(v);
-
- clear_page(kmap(pd->dummy_page));
- kunmap(pd->dummy_page);
+ clear_page(page_address(pd->dummy_page));
pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
if (!pd->tables)
@@ -279,7 +274,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
spin_lock(lock);
- v = kmap_atomic(pt->p);
+ v = page_address(pt->p);
clf = (uint8_t *) v;
ptes = (uint32_t *) v;
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
@@ -293,7 +288,6 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
}
mb();
}
- kunmap_atomic(v);
spin_unlock(lock);
pt->count = 0;
@@ -339,7 +333,7 @@ static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
atomic_set(&pd->driver->needs_tlbflush, 1);
}
}
- pt->v = kmap_atomic(pt->p);
+ pt->v = page_address(pt->p);
return pt;
}
@@ -365,7 +359,6 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
struct psb_mmu_pd *pd = pt->pd;
uint32_t *v;
- kunmap_atomic(pt->v);
if (pt->count == 0) {
v = kmap_atomic(pd->p);
v[pt->index] = pd->invalid_pde;
--
2.25.1
Powered by blists - more mailing lists