[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251121101315.3585-9-pierre-eric.pelloux-prayer@amd.com>
Date: Fri, 21 Nov 2025 11:12:18 +0100
From: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@....com>
To: Alex Deucher <alexander.deucher@....com>,
Christian König <christian.koenig@....com>, David Airlie
<airlied@...il.com>, Simona Vetter <simona@...ll.ch>
CC: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@....com>,
<amd-gfx@...ts.freedesktop.org>, <dri-devel@...ts.freedesktop.org>,
<linux-kernel@...r.kernel.org>
Subject: [PATCH v3 08/28] drm/amdgpu: pass the entity to use to amdgpu_ttm_map_buffer
This way the caller can select the one it wants to use.
Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@....com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 55 ++++++++++++++++---------
1 file changed, 35 insertions(+), 20 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 353682c0e8f0..3d850893b97f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -177,6 +177,7 @@ amdgpu_ttm_job_submit(struct amdgpu_device *adev, struct amdgpu_job *job, u32 nu
/**
* amdgpu_ttm_map_buffer - Map memory into the GART windows
* @adev: the device being used
+ * @entity: entity to run the window setup job
* @bo: buffer object to map
* @mem: memory object to map
* @mm_cur: range to map
@@ -189,6 +190,7 @@ amdgpu_ttm_job_submit(struct amdgpu_device *adev, struct amdgpu_job *job, u32 nu
* the physical address for local memory.
*/
static int amdgpu_ttm_map_buffer(struct amdgpu_device *adev,
+ struct amdgpu_ttm_buffer_entity *entity,
struct ttm_buffer_object *bo,
struct ttm_resource *mem,
struct amdgpu_res_cursor *mm_cur,
@@ -235,7 +237,7 @@ static int amdgpu_ttm_map_buffer(struct amdgpu_device *adev,
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
- r = amdgpu_job_alloc_with_ib(adev, &adev->mman.default_entity.base,
+ r = amdgpu_job_alloc_with_ib(adev, &entity->base,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED, &job,
@@ -275,6 +277,7 @@ static int amdgpu_ttm_map_buffer(struct amdgpu_device *adev,
/**
* amdgpu_ttm_copy_mem_to_mem - Helper function for copy
* @adev: amdgpu device
+ * @entity: entity to run the jobs
* @src: buffer/address where to read from
* @dst: buffer/address where to write to
* @size: number of bytes to copy
@@ -289,6 +292,7 @@ static int amdgpu_ttm_map_buffer(struct amdgpu_device *adev,
*/
__attribute__((nonnull))
static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ struct amdgpu_ttm_buffer_entity *entity,
const struct amdgpu_copy_mem *src,
const struct amdgpu_copy_mem *dst,
uint64_t size, bool tmz,
@@ -320,12 +324,14 @@ static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
/* Map src to window 0 and dst to window 1. */
- r = amdgpu_ttm_map_buffer(adev, src->bo, src->mem, &src_mm,
+ r = amdgpu_ttm_map_buffer(adev, entity,
+ src->bo, src->mem, &src_mm,
0, tmz, &cur_size, &from);
if (r)
goto error;
- r = amdgpu_ttm_map_buffer(adev, dst->bo, dst->mem, &dst_mm,
+ r = amdgpu_ttm_map_buffer(adev, entity,
+ dst->bo, dst->mem, &dst_mm,
1, tmz, &cur_size, &to);
if (r)
goto error;
@@ -394,7 +400,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
src.offset = 0;
dst.offset = 0;
- r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
+ r = amdgpu_ttm_copy_mem_to_mem(adev,
+ &adev->mman.move_entity,
+ &src, &dst,
new_mem->size,
amdgpu_bo_encrypted(abo),
bo->base.resv, &fence);
@@ -2220,17 +2228,16 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
}
static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
+ struct amdgpu_ttm_buffer_entity *entity,
unsigned int num_dw,
struct dma_resv *resv,
bool vm_needs_flush,
struct amdgpu_job **job,
- bool delayed, u64 k_job_id)
+ u64 k_job_id)
{
enum amdgpu_ib_pool_type pool = AMDGPU_IB_POOL_DELAYED;
int r;
- struct drm_sched_entity *entity = delayed ? &adev->mman.clear_entity.base :
- &adev->mman.move_entity.base;
- r = amdgpu_job_alloc_with_ib(adev, entity,
+ r = amdgpu_job_alloc_with_ib(adev, &entity->base,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4, pool, job, k_job_id);
if (r) {
@@ -2275,8 +2282,8 @@ int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset,
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
- r = amdgpu_ttm_prepare_job(adev, num_dw,
- resv, vm_needs_flush, &job, false,
+ r = amdgpu_ttm_prepare_job(adev, &adev->mman.move_entity, num_dw,
+ resv, vm_needs_flush, &job,
AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
if (r)
goto error_free;
@@ -2301,11 +2308,13 @@ int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset,
return r;
}
-static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev, uint32_t src_data,
+static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev,
+ struct amdgpu_ttm_buffer_entity *entity,
+ uint32_t src_data,
uint64_t dst_addr, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence,
- bool vm_needs_flush, bool delayed,
+ bool vm_needs_flush,
u64 k_job_id)
{
unsigned int num_loops, num_dw;
@@ -2317,8 +2326,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev, uint32_t src_data,
max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
- r = amdgpu_ttm_prepare_job(adev, num_dw, resv, vm_needs_flush,
- &job, delayed, k_job_id);
+ r = amdgpu_ttm_prepare_job(adev, entity, num_dw, resv,
+ vm_needs_flush, &job, k_job_id);
if (r)
return r;
@@ -2379,13 +2388,14 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
/* Never clear more than 256MiB at once to avoid timeouts */
size = min(cursor.size, 256ULL << 20);
- r = amdgpu_ttm_map_buffer(adev, &bo->tbo, bo->tbo.resource, &cursor,
+ r = amdgpu_ttm_map_buffer(adev, &adev->mman.clear_entity,
+ &bo->tbo, bo->tbo.resource, &cursor,
1, false, &size, &addr);
if (r)
goto err;
- r = amdgpu_ttm_fill_mem(adev, 0, addr, size, resv,
- &next, true, true,
+ r = amdgpu_ttm_fill_mem(adev, &adev->mman.clear_entity, 0, addr, size, resv,
+ &next, true,
AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
if (r)
goto err;
@@ -2409,10 +2419,14 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
u64 k_job_id)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct amdgpu_ttm_buffer_entity *entity;
struct dma_fence *fence = NULL;
struct amdgpu_res_cursor dst;
int r;
+ entity = delayed ? &adev->mman.clear_entity :
+ &adev->mman.move_entity;
+
if (!adev->mman.buffer_funcs_enabled) {
dev_err(adev->dev,
"Trying to clear memory with ring turned off.\n");
@@ -2429,13 +2443,14 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
/* Never fill more than 256MiB at once to avoid timeouts */
cur_size = min(dst.size, 256ULL << 20);
- r = amdgpu_ttm_map_buffer(adev, &bo->tbo, bo->tbo.resource, &dst,
+ r = amdgpu_ttm_map_buffer(adev, &adev->mman.default_entity,
+ &bo->tbo, bo->tbo.resource, &dst,
1, false, &cur_size, &to);
if (r)
goto error;
- r = amdgpu_ttm_fill_mem(adev, src_data, to, cur_size, resv,
- &next, true, delayed, k_job_id);
+ r = amdgpu_ttm_fill_mem(adev, entity, src_data, to, cur_size, resv,
+ &next, true, k_job_id);
if (r)
goto error;
--
2.43.0
Powered by blists - more mailing lists