[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <MWHPR1201MB01274F458B316CF0BD44AFB7FDCC0@MWHPR1201MB0127.namprd12.prod.outlook.com>
Date: Fri, 23 Feb 2018 09:29:00 +0000
From: "He, Roger" <Hongbo.He@....com>
To: Christian König
<ckoenig.leichtzumerken@...il.com>,
"amd-gfx@...ts.freedesktop.org" <amd-gfx@...ts.freedesktop.org>,
"dri-devel@...ts.freedesktop.org" <dri-devel@...ts.freedesktop.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: RE: [PATCH 4/4] drm/ttm: keep BOs reserved until end of eviction
looks good to me. Reviewed-by: Roger He <Hongbo.He@....com>
-----Original Message-----
From: dri-devel [mailto:dri-devel-bounces@...ts.freedesktop.org] On Behalf Of Christian K?nig
Sent: Tuesday, February 20, 2018 8:58 PM
To: amd-gfx@...ts.freedesktop.org; dri-devel@...ts.freedesktop.org; linux-kernel@...r.kernel.org
Subject: [PATCH 4/4] drm/ttm: keep BOs reserved until end of eviction
This avoids problems when BOs are evicted but directly moved back into the domain from other threads.
Signed-off-by: Christian König <christian.koenig@....com>
---
drivers/gpu/drm/ttm/ttm_bo.c | 37 +++++++++++++++++++++++++++++--------
1 file changed, 29 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 3a44c2ee4155..593a0216faff 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -742,7 +742,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
const struct ttm_place *place,
- struct ttm_operation_ctx *ctx)
+ struct ttm_operation_ctx *ctx,
+ struct list_head *evicted)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; @@ -792,17 +793,28 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
ret = ttm_bo_evict(bo, ctx);
if (locked) {
- ttm_bo_unreserve(bo);
+ list_add_tail(&bo->lru, evicted);
} else {
spin_lock(&glob->lru_lock);
ttm_bo_add_to_lru(bo);
spin_unlock(&glob->lru_lock);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
}
- kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
+static void ttm_mem_evict_cleanup(struct list_head *evicted) {
+ struct ttm_buffer_object *bo, *tmp;
+
+ list_for_each_entry_safe(bo, tmp, evicted, lru) {
+ list_del_init(&bo->lru);
+ ttm_bo_unreserve(bo);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+ }
+}
+
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) {
struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; @@ -852,20 +864,26 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, {
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct list_head evicted;
int ret;
+ INIT_LIST_HEAD(&evicted);
do {
ret = (*man->func->get_node)(man, bo, place, mem);
if (unlikely(ret != 0))
return ret;
if (mem->mm_node)
break;
- ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
+ ret = ttm_mem_evict_first(bdev, mem_type, place, ctx, &evicted);
if (unlikely(ret != 0))
- return ret;
+ goto error;
} while (1);
mem->mem_type = mem_type;
- return ttm_bo_add_move_fence(bo, man, mem);
+ ret = ttm_bo_add_move_fence(bo, man, mem);
+
+error:
+ ttm_mem_evict_cleanup(&evicted);
+ return ret;
}
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, @@ -1345,6 +1363,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
struct ttm_operation_ctx ctx = { false, false };
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
+ struct list_head evicted;
struct dma_fence *fence;
int ret;
unsigned i;
@@ -1352,18 +1371,20 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
/*
* Can't use standard list traversal since we're unlocking.
*/
-
+ INIT_LIST_HEAD(&evicted);
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
+ ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx,
+ &evicted);
if (ret)
return ret;
spin_lock(&glob->lru_lock);
}
}
spin_unlock(&glob->lru_lock);
+ ttm_mem_evict_cleanup(&evicted);
spin_lock(&man->move_lock);
fence = dma_fence_get(man->move);
--
2.14.1
_______________________________________________
dri-devel mailing list
dri-devel@...ts.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
Powered by blists - more mailing lists