[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221022072445.054445928@linuxfoundation.org>
Date: Sat, 22 Oct 2022 09:20:46 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Chris Wilson <chris.p.wilson@...el.com>,
Thomas Hellström
<thomas.hellstrom@...ux.intel.com>,
Matthew Auld <matthew.auld@...el.com>,
Tvrtko Ursulin <tvrtko.ursulin@...el.com>
Subject: [PATCH 5.19 167/717] drm/i915/gt: Use i915_vm_put on ppgtt_create error paths
From: Chris Wilson <chris.p.wilson@...el.com>
commit 20e377e7b2e7c327039f10db80ba5bcc1f6c882d upstream.
Now that the scratch page and page directories have a reference back to
the i915_address_space, we cannot do an immediate free of the ppgtt upon
error as those buffer objects will perform a later i915_vm_put in their
deferred frees.
The downside is that by replacing the onion unwind along the error
paths, the ppgtt cleanup must handle a partially constructed vm. This
includes ensuring that the vm->cleanup is set prior to the error path.
Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/6900
Signed-off-by: Chris Wilson <chris.p.wilson@...el.com>
Fixes: 4d8151ae5329 ("drm/i915: Don't free shared locks while shared")
Cc: Thomas Hellström <thomas.hellstrom@...ux.intel.com>
Cc: Matthew Auld <matthew.auld@...el.com>
Cc: <stable@...r.kernel.org> # v5.14+
Reviewed-by: Matthew Auld <matthew.auld@...el.com>
Signed-off-by: Matthew Auld <matthew.auld@...el.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220926153333.102195-1-matthew.auld@intel.com
(cherry picked from commit c286558f58535cf97b717b946d6c96d774a09d17)
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@...el.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/gpu/drm/i915/gt/gen6_ppgtt.c | 16 ++++-----
drivers/gpu/drm/i915/gt/gen8_ppgtt.c | 58 ++++++++++++++++++-----------------
drivers/gpu/drm/i915/gt/intel_gtt.c | 3 +
3 files changed, 41 insertions(+), 36 deletions(-)
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -247,6 +247,7 @@ err_scratch1:
i915_gem_object_put(vm->scratch[1]);
err_scratch0:
i915_gem_object_put(vm->scratch[0]);
+ vm->scratch[0] = NULL;
return ret;
}
@@ -268,9 +269,10 @@ static void gen6_ppgtt_cleanup(struct i9
gen6_ppgtt_free_pd(ppgtt);
free_scratch(vm);
- mutex_destroy(&ppgtt->flush);
+ if (ppgtt->base.pd)
+ free_pd(&ppgtt->base.vm, ppgtt->base.pd);
- free_pd(&ppgtt->base.vm, ppgtt->base.pd);
+ mutex_destroy(&ppgtt->flush);
}
static void pd_vma_bind(struct i915_address_space *vm,
@@ -449,19 +451,17 @@ struct i915_ppgtt *gen6_ppgtt_create(str
err = gen6_ppgtt_init_scratch(ppgtt);
if (err)
- goto err_free;
+ goto err_put;
ppgtt->base.pd = gen6_alloc_top_pd(ppgtt);
if (IS_ERR(ppgtt->base.pd)) {
err = PTR_ERR(ppgtt->base.pd);
- goto err_scratch;
+ goto err_put;
}
return &ppgtt->base;
-err_scratch:
- free_scratch(&ppgtt->base.vm);
-err_free:
- kfree(ppgtt);
+err_put:
+ i915_vm_put(&ppgtt->base.vm);
return ERR_PTR(err);
}
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -196,7 +196,10 @@ static void gen8_ppgtt_cleanup(struct i9
if (intel_vgpu_active(vm->i915))
gen8_ppgtt_notify_vgt(ppgtt, false);
- __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
+ if (ppgtt->pd)
+ __gen8_ppgtt_cleanup(vm, ppgtt->pd,
+ gen8_pd_top_count(vm), vm->top);
+
free_scratch(vm);
}
@@ -803,8 +806,10 @@ static int gen8_init_scratch(struct i915
struct drm_i915_gem_object *obj;
obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
goto free_scratch;
+ }
ret = map_pt_dma(vm, obj);
if (ret) {
@@ -823,7 +828,8 @@ static int gen8_init_scratch(struct i915
free_scratch:
while (i--)
i915_gem_object_put(vm->scratch[i]);
- return -ENOMEM;
+ vm->scratch[0] = NULL;
+ return ret;
}
static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
@@ -901,6 +907,7 @@ err_pd:
struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
unsigned long lmem_pt_obj_flags)
{
+ struct i915_page_directory *pd;
struct i915_ppgtt *ppgtt;
int err;
@@ -946,21 +953,7 @@ struct i915_ppgtt *gen8_ppgtt_create(str
ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
}
- err = gen8_init_scratch(&ppgtt->vm);
- if (err)
- goto err_free;
-
- ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
- if (IS_ERR(ppgtt->pd)) {
- err = PTR_ERR(ppgtt->pd);
- goto err_free_scratch;
- }
-
- if (!i915_vm_is_4lvl(&ppgtt->vm)) {
- err = gen8_preallocate_top_level_pdp(ppgtt);
- if (err)
- goto err_free_pd;
- }
+ ppgtt->vm.pte_encode = gen8_pte_encode;
ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->vm.insert_entries = gen8_ppgtt_insert;
@@ -971,22 +964,31 @@ struct i915_ppgtt *gen8_ppgtt_create(str
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
ppgtt->vm.clear_range = gen8_ppgtt_clear;
ppgtt->vm.foreach = gen8_ppgtt_foreach;
+ ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
- ppgtt->vm.pte_encode = gen8_pte_encode;
+ err = gen8_init_scratch(&ppgtt->vm);
+ if (err)
+ goto err_put;
+
+ pd = gen8_alloc_top_pd(&ppgtt->vm);
+ if (IS_ERR(pd)) {
+ err = PTR_ERR(pd);
+ goto err_put;
+ }
+ ppgtt->pd = pd;
+
+ if (!i915_vm_is_4lvl(&ppgtt->vm)) {
+ err = gen8_preallocate_top_level_pdp(ppgtt);
+ if (err)
+ goto err_put;
+ }
if (intel_vgpu_active(gt->i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
- ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
-
return ppgtt;
-err_free_pd:
- __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
- gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
-err_free_scratch:
- free_scratch(&ppgtt->vm);
-err_free:
- kfree(ppgtt);
+err_put:
+ i915_vm_put(&ppgtt->vm);
return ERR_PTR(err);
}
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -405,6 +405,9 @@ void free_scratch(struct i915_address_sp
{
int i;
+ if (!vm->scratch[0])
+ return;
+
for (i = 0; i <= vm->top; i++)
i915_gem_object_put(vm->scratch[i]);
}
Powered by blists - more mailing lists