[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210512144822.028545946@linuxfoundation.org>
Date: Wed, 12 May 2021 16:42:59 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org,
Christian König <christian.koenig@....com>,
James Zhu <James.Zhu@....com>,
Alex Deucher <alexander.deucher@....com>
Subject: [PATCH 5.10 069/530] drm/amdgpu: fix concurrent VM flushes on Vega/Navi v2
From: Christian König <christian.koenig@....com>
commit 20a5f5a98e1bb3d40acd97e89299e8c2d22784be upstream.
Starting with Vega the hardware supports concurrent flushes
of VMID which can be used to implement per process VMID
allocation.
But concurrent flushes are mutual exclusive with back to
back VMID allocations, fix this to avoid a VMID used in
two ways at the same time.
v2: don't set ring to NULL
Signed-off-by: Christian König <christian.koenig@....com>
Reviewed-by: James Zhu <James.Zhu@....com>
Tested-by: James Zhu <James.Zhu@....com>
Signed-off-by: Alex Deucher <alexander.deucher@....com>
Cc: stable@...r.kernel.org
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 19 +++++++++++--------
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 ++++++
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 1 +
3 files changed, 18 insertions(+), 8 deletions(-)
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -215,7 +215,11 @@ static int amdgpu_vmid_grab_idle(struct
/* Check if we have an idle VMID */
i = 0;
list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
- fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
+ /* Don't use per engine and per process VMID at the same time */
+ struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
+ NULL : ring;
+
+ fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
if (!fences[i])
break;
++i;
@@ -280,7 +284,7 @@ static int amdgpu_vmid_grab_reserved(str
if (updates && (*id)->flushed_updates &&
updates->context == (*id)->flushed_updates->context &&
!dma_fence_is_later(updates, (*id)->flushed_updates))
- updates = NULL;
+ updates = NULL;
if ((*id)->owner != vm->immediate.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr ||
@@ -289,6 +293,10 @@ static int amdgpu_vmid_grab_reserved(str
!dma_fence_is_signaled((*id)->last_flush))) {
struct dma_fence *tmp;
+ /* Don't use per engine and per process VMID at the same time */
+ if (adev->vm_manager.concurrent_flush)
+ ring = NULL;
+
/* to prevent one context starved by another context */
(*id)->pd_gpu_addr = 0;
tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
@@ -364,12 +372,7 @@ static int amdgpu_vmid_grab_used(struct
if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
needs_flush = true;
- /* Concurrent flushes are only possible starting with Vega10 and
- * are broken on Navi10 and Navi14.
- */
- if (needs_flush && (adev->asic_type < CHIP_VEGA10 ||
- adev->asic_type == CHIP_NAVI10 ||
- adev->asic_type == CHIP_NAVI14))
+ if (needs_flush && !adev->vm_manager.concurrent_flush)
continue;
/* Good, we can use this VMID. Remember this submission as
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -3173,6 +3173,12 @@ void amdgpu_vm_manager_init(struct amdgp
{
unsigned i;
+ /* Concurrent flushes are only possible starting with Vega10 and
+ * are broken on Navi10 and Navi14.
+ */
+ adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
+ adev->asic_type == CHIP_NAVI10 ||
+ adev->asic_type == CHIP_NAVI14);
amdgpu_vmid_mgr_init(adev);
adev->vm_manager.fence_context =
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -325,6 +325,7 @@ struct amdgpu_vm_manager {
/* Handling of VMIDs */
struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
unsigned int first_kfd_vmid;
+ bool concurrent_flush;
/* Handling of VM fences */
u64 fence_context;
Powered by blists - more mailing lists