[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250319145425.51935-27-robdclark@gmail.com>
Date: Wed, 19 Mar 2025 07:52:38 -0700
From: Rob Clark <robdclark@...il.com>
To: dri-devel@...ts.freedesktop.org
Cc: freedreno@...ts.freedesktop.org,
linux-arm-msm@...r.kernel.org,
Rob Clark <robdclark@...omium.org>,
Rob Clark <robdclark@...il.com>,
Abhinav Kumar <quic_abhinavk@...cinc.com>,
Dmitry Baryshkov <lumag@...nel.org>,
Sean Paul <sean@...rly.run>,
Marijn Suijten <marijn.suijten@...ainline.org>,
David Airlie <airlied@...il.com>,
Simona Vetter <simona@...ll.ch>,
linux-kernel@...r.kernel.org (open list)
Subject: [PATCH v2 26/34] drm/msm: Pre-allocate vm_bo objects
From: Rob Clark <robdclark@...omium.org>
Use drm_gpuvm_bo_obtain() in the synchronous part of the VM_BIND submit,
to hold a reference to the vm_bo for the duration of the submit. This
ensures that the vm_bo already exists before the async part of the job,
which is in the fence signalling path (and therefore cannot allocate
memory).
Signed-off-by: Rob Clark <robdclark@...omium.org>
---
drivers/gpu/drm/msm/msm_gem.h | 1 +
drivers/gpu/drm/msm/msm_gem_vma.c | 19 +++++++++++++++++--
2 files changed, 18 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index cb76959fa8a8..d2ffaa11ec1a 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -369,6 +369,7 @@ struct msm_gem_submit {
uint32_t flags;
uint32_t handle;
struct drm_gem_object *obj;
+ struct drm_gpuvm_bo *vm_bo;
uint64_t iova;
uint64_t bo_offset;
uint64_t range;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 5c7d44b004fb..b1808d95002f 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -278,8 +278,18 @@ msm_vma_job_prepare(struct msm_gem_submit *submit)
for (int i = 0; i < submit->nr_bos; i++) {
unsigned op = submit->bos[i].flags & MSM_SUBMIT_BO_OP_MASK;
- if (submit->bos[i].obj)
- msm_gem_assert_locked(submit->bos[i].obj);
+ if (submit->bos[i].obj) {
+ struct drm_gem_object *obj = submit->bos[i].obj;
+
+ msm_gem_assert_locked(obj);
+
+ /*
+ * Ensure the vm_bo is already allocated by
+ * holding a ref until the submit is retired
+ */
+ submit->bos[i].vm_bo =
+ drm_gpuvm_bo_obtain(submit->vm, obj);
+ }
/*
* OP_MAP/OP_MAP_NULL has one new VMA for the new mapping,
@@ -309,6 +319,11 @@ msm_vma_job_cleanup(struct msm_gem_submit *submit)
{
struct drm_gpuva *vma;
+ for (int i = 0; i < submit->nr_bos; i++) {
+ /* If we're holding an extra ref to the vm_bo, drop it now: */
+ drm_gpuvm_bo_put(submit->bos[i].vm_bo);
+ }
+
while (!list_empty(&submit->preallocated_vmas)) {
vma = list_first_entry(&submit->preallocated_vmas,
struct drm_gpuva,
--
2.48.1
Powered by blists - more mailing lists