[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191210210402.8367-15-sashal@kernel.org>
Date: Tue, 10 Dec 2019 15:58:27 -0500
From: Sasha Levin <sashal@...nel.org>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
Cc: Christian König <christian.koenig@....com>,
Felix Kuehling <Felix.Kuehling@....com>,
Alex Deucher <alexander.deucher@....com>,
Sasha Levin <sashal@...nel.org>,
dri-devel@...ts.freedesktop.org
Subject: [PATCH AUTOSEL 5.4 015/350] drm/ttm: return -EBUSY on pipelining with no_gpu_wait (v2)
From: Christian König <christian.koenig@....com>
[ Upstream commit 3084cf46cf8110826a42de8c8ef30e8fa48974c2 ]
Setting the no_gpu_wait flag means that the allocate BO must be available
immediately and we can't wait for any GPU operation to finish.
v2: squash in mem leak fix, rebase
Signed-off-by: Christian König <christian.koenig@....com>
Acked-by: Felix Kuehling <Felix.Kuehling@....com>
Signed-off-by: Alex Deucher <alexander.deucher@....com>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
drivers/gpu/drm/ttm/ttm_bo.c | 44 +++++++++++++++++++++---------------
1 file changed, 26 insertions(+), 18 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 98819462f025f..f078036998092 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -926,7 +926,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *mem,
+ bool no_wait_gpu)
{
struct dma_fence *fence;
int ret;
@@ -935,19 +936,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
- if (fence) {
- dma_resv_add_shared_fence(bo->base.resv, fence);
+ if (!fence)
+ return 0;
- ret = dma_resv_reserve_shared(bo->base.resv, 1);
- if (unlikely(ret)) {
- dma_fence_put(fence);
- return ret;
- }
+ if (no_wait_gpu)
+ return -EBUSY;
+
+ dma_resv_add_shared_fence(bo->base.resv, fence);
- dma_fence_put(bo->moving);
- bo->moving = fence;
+ ret = dma_resv_reserve_shared(bo->base.resv, 1);
+ if (unlikely(ret)) {
+ dma_fence_put(fence);
+ return ret;
}
+ dma_fence_put(bo->moving);
+ bo->moving = fence;
return 0;
}
@@ -978,7 +982,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
} while (1);
- return ttm_bo_add_move_fence(bo, man, mem);
+ return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
}
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -1120,14 +1124,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (unlikely(ret))
goto error;
- if (mem->mm_node) {
- ret = ttm_bo_add_move_fence(bo, man, mem);
- if (unlikely(ret)) {
- (*man->func->put_node)(man, mem);
- goto error;
- }
- return 0;
+ if (!mem->mm_node)
+ continue;
+
+ ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
+ if (unlikely(ret)) {
+ (*man->func->put_node)(man, mem);
+ if (ret == -EBUSY)
+ continue;
+
+ goto error;
}
+ return 0;
}
for (i = 0; i < placement->num_busy_placement; ++i) {
--
2.20.1
Powered by blists - more mailing lists