[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20221016180938.17403-4-fmdefrancesco@gmail.com>
Date: Sun, 16 Oct 2022 20:09:38 +0200
From: "Fabio M. De Francesco" <fmdefrancesco@...il.com>
To: Jani Nikula <jani.nikula@...ux.intel.com>,
Joonas Lahtinen <joonas.lahtinen@...ux.intel.com>,
Rodrigo Vivi <rodrigo.vivi@...el.com>,
Tvrtko Ursulin <tvrtko.ursulin@...ux.intel.com>,
David Airlie <airlied@...il.com>,
Daniel Vetter <daniel@...ll.ch>,
Zhenyu Wang <zhenyuw@...ux.intel.com>,
Zhi Wang <zhi.a.wang@...el.com>,
Matthew Auld <matthew.auld@...el.com>,
Thomas Hellström
<thomas.hellstrom@...ux.intel.com>,
Nirmoy Das <nirmoy.das@...el.com>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Christian König <christian.koenig@....com>,
Matt Roper <matthew.d.roper@...el.com>,
John Harrison <John.C.Harrison@...el.com>,
Daniele Ceraolo Spurio <daniele.ceraolospurio@...el.com>,
Lucas De Marchi <lucas.demarchi@...el.com>,
intel-gfx@...ts.freedesktop.org, dri-devel@...ts.freedesktop.org,
linux-kernel@...r.kernel.org, intel-gvt-dev@...ts.freedesktop.org
Cc: "Fabio M. De Francesco" <fmdefrancesco@...il.com>,
"Venkataramanan, Anirudh" <anirudh.venkataramanan@...el.com>,
Ira Weiny <ira.weiny@...el.com>
Subject: [RESEND PATCH 3/3] drm/i915/gem: Replace kmap() with kmap_local_page()
kmap() is being deprecated in favor of kmap_local_page().
There are two main problems with kmap(): (1) It comes with an overhead as
mapping space is restricted and protected by a global lock for
synchronization and (2) it also requires global TLB invalidation when the
kmap’s pool wraps and it might block when the mapping space is fully
utilized until a slot becomes available.
With kmap_local_page() the mappings are per thread, CPU local, can take
page faults, and can be called from any context (including interrupts).
It is faster than kmap() in kernels with HIGHMEM enabled. Furthermore,
the tasks can be preempted and, when they are scheduled to run again, the
kernel virtual addresses are restored and are still valid.
Since its use in i915/gem is safe everywhere, it should be preferred.
Therefore, replace kmap() with kmap_local_page() in i915/gem. Instead of
open-coding local map + memcpy + local unmap, use memcpy_to_page() in a
suited call site.
Cc: "Venkataramanan, Anirudh" <anirudh.venkataramanan@...el.com>
Suggested-by: Ira Weiny <ira.weiny@...el.com>
Signed-off-by: Fabio M. De Francesco <fmdefrancesco@...il.com>
---
drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 6 ++----
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 8 ++++----
2 files changed, 6 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4eed3dd90ba8..2bc6ab9964ff 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -640,16 +640,14 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
do {
unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
struct page *page;
- void *pgdata, *vaddr;
+ void *pgdata;
err = aops->write_begin(file, file->f_mapping, offset, len,
&page, &pgdata);
if (err < 0)
goto fail;
- vaddr = kmap(page);
- memcpy(vaddr, data, len);
- kunmap(page);
+ memcpy_to_page(page, 0, data, len);
err = aops->write_end(file, file->f_mapping, offset, len, len,
page, pgdata);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 3ced9948a331..bb25b50b5688 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -153,7 +153,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
intel_gt_flush_ggtt_writes(to_gt(i915));
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
- cpu = kmap(p) + offset_in_page(offset);
+ cpu = kmap_local_page(p) + offset_in_page(offset);
drm_clflush_virt_range(cpu, sizeof(*cpu));
if (*cpu != (u32)page) {
pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
@@ -171,7 +171,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
}
*cpu = 0;
drm_clflush_virt_range(cpu, sizeof(*cpu));
- kunmap(p);
+ kunmap_local(cpu);
out:
i915_gem_object_lock(obj, NULL);
@@ -249,7 +249,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
intel_gt_flush_ggtt_writes(to_gt(i915));
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
- cpu = kmap(p) + offset_in_page(offset);
+ cpu = kmap_local_page(p) + offset_in_page(offset);
drm_clflush_virt_range(cpu, sizeof(*cpu));
if (*cpu != (u32)page) {
pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
@@ -267,7 +267,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
}
*cpu = 0;
drm_clflush_virt_range(cpu, sizeof(*cpu));
- kunmap(p);
+ kunmap_local(cpu);
if (err)
return err;
--
2.37.1
Powered by blists - more mailing lists