[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0dbd6677-0ce6-495c-83d5-09bc3c0d6f2e@amd.com>
Date: Fri, 21 Nov 2025 16:05:56 +0100
From: Christian König <christian.koenig@....com>
To: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@....com>,
Alex Deucher <alexander.deucher@....com>, David Airlie <airlied@...il.com>,
Simona Vetter <simona@...ll.ch>
Cc: amd-gfx@...ts.freedesktop.org, dri-devel@...ts.freedesktop.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 17/28] drm/amdgpu: introduce
amdgpu_sdma_set_vm_pte_scheds
On 11/21/25 11:12, Pierre-Eric Pelloux-Prayer wrote:
> All sdma versions used the same logic, so add a helper and move the
> common code to a single place.
>
> ---
> v2: pass amdgpu_vm_pte_funcs as well
> v3: drop all the *_set_vm_pte_funcs one liners
> ---
>
> Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@....com>
Reviewed-by: Christian König <christian.koenig@....com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++
> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 17 ++++++++++++
> drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 31 ++++++---------------
> drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 31 ++++++---------------
> drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 31 ++++++---------------
> drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 35 ++++++------------------
> drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 35 ++++++------------------
> drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 31 ++++++---------------
> drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 31 ++++++---------------
> drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c | 29 ++++++--------------
> drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c | 29 ++++++--------------
> drivers/gpu/drm/amd/amdgpu/si_dma.c | 31 ++++++---------------
> 12 files changed, 105 insertions(+), 228 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 790e84fec949..a50e3c0a4b18 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1612,6 +1612,8 @@ struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
> bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
> ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring);
> ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset);
> +void amdgpu_sdma_set_vm_pte_scheds(struct amdgpu_device *adev,
> + const struct amdgpu_vm_pte_funcs *vm_pte_funcs);
>
> /* atpx handler */
> #if defined(CONFIG_VGA_SWITCHEROO)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 193de267984e..5061d5b0f875 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -3228,3 +3228,20 @@ void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
> task_info->process_name, task_info->tgid,
> task_info->task.comm, task_info->task.pid);
> }
> +
> +void amdgpu_sdma_set_vm_pte_scheds(struct amdgpu_device *adev,
> + const struct amdgpu_vm_pte_funcs *vm_pte_funcs)
> +{
> + struct drm_gpu_scheduler *sched;
> + int i;
> +
> + for (i = 0; i < adev->sdma.num_instances; i++) {
> + if (adev->sdma.has_page_queue)
> + sched = &adev->sdma.instance[i].page.sched;
> + else
> + sched = &adev->sdma.instance[i].ring.sched;
> + adev->vm_manager.vm_pte_scheds[i] = sched;
> + }
> + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> + adev->vm_manager.vm_pte_funcs = vm_pte_funcs;
> +}
> diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
> index 9e8715b4739d..22780c09177d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
> +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
> @@ -53,7 +53,6 @@ static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
> static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
> static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
> static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
> -static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
> static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block);
>
> u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
> @@ -919,6 +918,14 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
> }
> }
>
> +static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
> + .copy_pte_num_dw = 7,
> + .copy_pte = cik_sdma_vm_copy_pte,
> +
> + .write_pte = cik_sdma_vm_write_pte,
> + .set_pte_pde = cik_sdma_vm_set_pte_pde,
> +};
> +
> static int cik_sdma_early_init(struct amdgpu_ip_block *ip_block)
> {
> struct amdgpu_device *adev = ip_block->adev;
> @@ -933,7 +940,7 @@ static int cik_sdma_early_init(struct amdgpu_ip_block *ip_block)
> cik_sdma_set_ring_funcs(adev);
> cik_sdma_set_irq_funcs(adev);
> cik_sdma_set_buffer_funcs(adev);
> - cik_sdma_set_vm_pte_funcs(adev);
> + amdgpu_sdma_set_vm_pte_scheds(adev, &cik_sdma_vm_pte_funcs);
>
> return 0;
> }
> @@ -1337,26 +1344,6 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
> adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
> }
>
> -static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
> - .copy_pte_num_dw = 7,
> - .copy_pte = cik_sdma_vm_copy_pte,
> -
> - .write_pte = cik_sdma_vm_write_pte,
> - .set_pte_pde = cik_sdma_vm_set_pte_pde,
> -};
> -
> -static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
> -{
> - unsigned i;
> -
> - adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
> - for (i = 0; i < adev->sdma.num_instances; i++) {
> - adev->vm_manager.vm_pte_scheds[i] =
> - &adev->sdma.instance[i].ring.sched;
> - }
> - adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> -}
> -
> const struct amdgpu_ip_block_version cik_sdma_ip_block =
> {
> .type = AMD_IP_BLOCK_TYPE_SDMA,
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
> index 92ce580647cd..0090ace49024 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
> @@ -51,7 +51,6 @@
>
> static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
> static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
> -static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
> static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
>
> MODULE_FIRMWARE("amdgpu/topaz_sdma.bin");
> @@ -809,6 +808,14 @@ static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring,
> amdgpu_ring_write(ring, val);
> }
>
> +static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
> + .copy_pte_num_dw = 7,
> + .copy_pte = sdma_v2_4_vm_copy_pte,
> +
> + .write_pte = sdma_v2_4_vm_write_pte,
> + .set_pte_pde = sdma_v2_4_vm_set_pte_pde,
> +};
> +
> static int sdma_v2_4_early_init(struct amdgpu_ip_block *ip_block)
> {
> struct amdgpu_device *adev = ip_block->adev;
> @@ -822,7 +829,7 @@ static int sdma_v2_4_early_init(struct amdgpu_ip_block *ip_block)
>
> sdma_v2_4_set_ring_funcs(adev);
> sdma_v2_4_set_buffer_funcs(adev);
> - sdma_v2_4_set_vm_pte_funcs(adev);
> + amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v2_4_vm_pte_funcs);
> sdma_v2_4_set_irq_funcs(adev);
>
> return 0;
> @@ -1232,26 +1239,6 @@ static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
> adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
> }
>
> -static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
> - .copy_pte_num_dw = 7,
> - .copy_pte = sdma_v2_4_vm_copy_pte,
> -
> - .write_pte = sdma_v2_4_vm_write_pte,
> - .set_pte_pde = sdma_v2_4_vm_set_pte_pde,
> -};
> -
> -static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
> -{
> - unsigned i;
> -
> - adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
> - for (i = 0; i < adev->sdma.num_instances; i++) {
> - adev->vm_manager.vm_pte_scheds[i] =
> - &adev->sdma.instance[i].ring.sched;
> - }
> - adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> -}
> -
> const struct amdgpu_ip_block_version sdma_v2_4_ip_block = {
> .type = AMD_IP_BLOCK_TYPE_SDMA,
> .major = 2,
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
> index 1c076bd1cf73..2526d393162a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
> @@ -51,7 +51,6 @@
>
> static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
> static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
> -static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
> static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev);
>
> MODULE_FIRMWARE("amdgpu/tonga_sdma.bin");
> @@ -1082,6 +1081,14 @@ static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring,
> amdgpu_ring_write(ring, val);
> }
>
> +static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
> + .copy_pte_num_dw = 7,
> + .copy_pte = sdma_v3_0_vm_copy_pte,
> +
> + .write_pte = sdma_v3_0_vm_write_pte,
> + .set_pte_pde = sdma_v3_0_vm_set_pte_pde,
> +};
> +
> static int sdma_v3_0_early_init(struct amdgpu_ip_block *ip_block)
> {
> struct amdgpu_device *adev = ip_block->adev;
> @@ -1102,7 +1109,7 @@ static int sdma_v3_0_early_init(struct amdgpu_ip_block *ip_block)
>
> sdma_v3_0_set_ring_funcs(adev);
> sdma_v3_0_set_buffer_funcs(adev);
> - sdma_v3_0_set_vm_pte_funcs(adev);
> + amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v3_0_vm_pte_funcs);
> sdma_v3_0_set_irq_funcs(adev);
>
> return 0;
> @@ -1674,26 +1681,6 @@ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
> adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
> }
>
> -static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
> - .copy_pte_num_dw = 7,
> - .copy_pte = sdma_v3_0_vm_copy_pte,
> -
> - .write_pte = sdma_v3_0_vm_write_pte,
> - .set_pte_pde = sdma_v3_0_vm_set_pte_pde,
> -};
> -
> -static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
> -{
> - unsigned i;
> -
> - adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
> - for (i = 0; i < adev->sdma.num_instances; i++) {
> - adev->vm_manager.vm_pte_scheds[i] =
> - &adev->sdma.instance[i].ring.sched;
> - }
> - adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> -}
> -
> const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
> {
> .type = AMD_IP_BLOCK_TYPE_SDMA,
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> index f38004e6064e..a35d9951e22a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> @@ -129,7 +129,6 @@ static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_0[] = {
>
> static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
> static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
> -static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
> static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
> static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev);
>
> @@ -1751,6 +1750,14 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
> }
> }
>
> +static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
> + .copy_pte_num_dw = 7,
> + .copy_pte = sdma_v4_0_vm_copy_pte,
> +
> + .write_pte = sdma_v4_0_vm_write_pte,
> + .set_pte_pde = sdma_v4_0_vm_set_pte_pde,
> +};
> +
> static int sdma_v4_0_early_init(struct amdgpu_ip_block *ip_block)
> {
> struct amdgpu_device *adev = ip_block->adev;
> @@ -1769,7 +1776,7 @@ static int sdma_v4_0_early_init(struct amdgpu_ip_block *ip_block)
>
> sdma_v4_0_set_ring_funcs(adev);
> sdma_v4_0_set_buffer_funcs(adev);
> - sdma_v4_0_set_vm_pte_funcs(adev);
> + amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v4_0_vm_pte_funcs);
> sdma_v4_0_set_irq_funcs(adev);
> sdma_v4_0_set_ras_funcs(adev);
>
> @@ -2615,30 +2622,6 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
> adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
> }
>
> -static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
> - .copy_pte_num_dw = 7,
> - .copy_pte = sdma_v4_0_vm_copy_pte,
> -
> - .write_pte = sdma_v4_0_vm_write_pte,
> - .set_pte_pde = sdma_v4_0_vm_set_pte_pde,
> -};
> -
> -static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
> -{
> - struct drm_gpu_scheduler *sched;
> - unsigned i;
> -
> - adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
> - for (i = 0; i < adev->sdma.num_instances; i++) {
> - if (adev->sdma.has_page_queue)
> - sched = &adev->sdma.instance[i].page.sched;
> - else
> - sched = &adev->sdma.instance[i].ring.sched;
> - adev->vm_manager.vm_pte_scheds[i] = sched;
> - }
> - adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> -}
> -
> static void sdma_v4_0_get_ras_error_count(uint32_t value,
> uint32_t instance,
> uint32_t *sec_count)
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
> index a1443990d5c6..7f77367848d4 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
> @@ -104,7 +104,6 @@ static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_4_2[] = {
>
> static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev);
> static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
> -static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
> static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
> static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
> static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
> @@ -1347,6 +1346,14 @@ static const struct amdgpu_sdma_funcs sdma_v4_4_2_sdma_funcs = {
> .soft_reset_kernel_queue = &sdma_v4_4_2_soft_reset_engine,
> };
>
> +static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs = {
> + .copy_pte_num_dw = 7,
> + .copy_pte = sdma_v4_4_2_vm_copy_pte,
> +
> + .write_pte = sdma_v4_4_2_vm_write_pte,
> + .set_pte_pde = sdma_v4_4_2_vm_set_pte_pde,
> +};
> +
> static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
> {
> struct amdgpu_device *adev = ip_block->adev;
> @@ -1362,7 +1369,7 @@ static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
>
> sdma_v4_4_2_set_ring_funcs(adev);
> sdma_v4_4_2_set_buffer_funcs(adev);
> - sdma_v4_4_2_set_vm_pte_funcs(adev);
> + amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v4_4_2_vm_pte_funcs);
> sdma_v4_4_2_set_irq_funcs(adev);
> sdma_v4_4_2_set_ras_funcs(adev);
> return 0;
> @@ -2316,30 +2323,6 @@ static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev)
> adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
> }
>
> -static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs = {
> - .copy_pte_num_dw = 7,
> - .copy_pte = sdma_v4_4_2_vm_copy_pte,
> -
> - .write_pte = sdma_v4_4_2_vm_write_pte,
> - .set_pte_pde = sdma_v4_4_2_vm_set_pte_pde,
> -};
> -
> -static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev)
> -{
> - struct drm_gpu_scheduler *sched;
> - unsigned i;
> -
> - adev->vm_manager.vm_pte_funcs = &sdma_v4_4_2_vm_pte_funcs;
> - for (i = 0; i < adev->sdma.num_instances; i++) {
> - if (adev->sdma.has_page_queue)
> - sched = &adev->sdma.instance[i].page.sched;
> - else
> - sched = &adev->sdma.instance[i].ring.sched;
> - adev->vm_manager.vm_pte_scheds[i] = sched;
> - }
> - adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> -}
> -
> /**
> * sdma_v4_4_2_update_reset_mask - update reset mask for SDMA
> * @adev: Pointer to the AMDGPU device structure
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
> index 8ddc4df06a1f..7ce13c5d4e61 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
> @@ -110,7 +110,6 @@ static const struct amdgpu_hwip_reg_entry sdma_reg_list_5_0[] = {
>
> static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
> static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
> -static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
> static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
> static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring);
> static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring);
> @@ -1357,6 +1356,13 @@ static const struct amdgpu_sdma_funcs sdma_v5_0_sdma_funcs = {
> .soft_reset_kernel_queue = &sdma_v5_0_soft_reset_engine,
> };
>
> +static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
> + .copy_pte_num_dw = 7,
> + .copy_pte = sdma_v5_0_vm_copy_pte,
> + .write_pte = sdma_v5_0_vm_write_pte,
> + .set_pte_pde = sdma_v5_0_vm_set_pte_pde,
> +};
> +
> static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block)
> {
> struct amdgpu_device *adev = ip_block->adev;
> @@ -1368,7 +1374,7 @@ static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block)
>
> sdma_v5_0_set_ring_funcs(adev);
> sdma_v5_0_set_buffer_funcs(adev);
> - sdma_v5_0_set_vm_pte_funcs(adev);
> + amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v5_0_vm_pte_funcs);
> sdma_v5_0_set_irq_funcs(adev);
> sdma_v5_0_set_mqd_funcs(adev);
>
> @@ -2073,27 +2079,6 @@ static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev)
> }
> }
>
> -static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
> - .copy_pte_num_dw = 7,
> - .copy_pte = sdma_v5_0_vm_copy_pte,
> - .write_pte = sdma_v5_0_vm_write_pte,
> - .set_pte_pde = sdma_v5_0_vm_set_pte_pde,
> -};
> -
> -static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
> -{
> - unsigned i;
> -
> - if (adev->vm_manager.vm_pte_funcs == NULL) {
> - adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
> - for (i = 0; i < adev->sdma.num_instances; i++) {
> - adev->vm_manager.vm_pte_scheds[i] =
> - &adev->sdma.instance[i].ring.sched;
> - }
> - adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> - }
> -}
> -
> const struct amdgpu_ip_block_version sdma_v5_0_ip_block = {
> .type = AMD_IP_BLOCK_TYPE_SDMA,
> .major = 5,
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> index 51101b0aa2fa..98beff18cf28 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> @@ -111,7 +111,6 @@ static const struct amdgpu_hwip_reg_entry sdma_reg_list_5_2[] = {
>
> static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev);
> static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev);
> -static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev);
> static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev);
> static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring);
> static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring);
> @@ -1248,6 +1247,13 @@ static void sdma_v5_2_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
> amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
> }
>
> +static const struct amdgpu_vm_pte_funcs sdma_v5_2_vm_pte_funcs = {
> + .copy_pte_num_dw = 7,
> + .copy_pte = sdma_v5_2_vm_copy_pte,
> + .write_pte = sdma_v5_2_vm_write_pte,
> + .set_pte_pde = sdma_v5_2_vm_set_pte_pde,
> +};
> +
> static int sdma_v5_2_early_init(struct amdgpu_ip_block *ip_block)
> {
> struct amdgpu_device *adev = ip_block->adev;
> @@ -1259,7 +1265,7 @@ static int sdma_v5_2_early_init(struct amdgpu_ip_block *ip_block)
>
> sdma_v5_2_set_ring_funcs(adev);
> sdma_v5_2_set_buffer_funcs(adev);
> - sdma_v5_2_set_vm_pte_funcs(adev);
> + amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v5_2_vm_pte_funcs);
> sdma_v5_2_set_irq_funcs(adev);
> sdma_v5_2_set_mqd_funcs(adev);
>
> @@ -2084,27 +2090,6 @@ static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev)
> }
> }
>
> -static const struct amdgpu_vm_pte_funcs sdma_v5_2_vm_pte_funcs = {
> - .copy_pte_num_dw = 7,
> - .copy_pte = sdma_v5_2_vm_copy_pte,
> - .write_pte = sdma_v5_2_vm_write_pte,
> - .set_pte_pde = sdma_v5_2_vm_set_pte_pde,
> -};
> -
> -static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev)
> -{
> - unsigned i;
> -
> - if (adev->vm_manager.vm_pte_funcs == NULL) {
> - adev->vm_manager.vm_pte_funcs = &sdma_v5_2_vm_pte_funcs;
> - for (i = 0; i < adev->sdma.num_instances; i++) {
> - adev->vm_manager.vm_pte_scheds[i] =
> - &adev->sdma.instance[i].ring.sched;
> - }
> - adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> - }
> -}
> -
> const struct amdgpu_ip_block_version sdma_v5_2_ip_block = {
> .type = AMD_IP_BLOCK_TYPE_SDMA,
> .major = 5,
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
> index e3f725bc2f29..c32331b72ba0 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
> @@ -119,7 +119,6 @@ static const struct amdgpu_hwip_reg_entry sdma_reg_list_6_0[] = {
>
> static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev);
> static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev);
> -static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev);
> static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev);
> static int sdma_v6_0_start(struct amdgpu_device *adev);
>
> @@ -1268,6 +1267,13 @@ static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev)
> }
> }
>
> +static const struct amdgpu_vm_pte_funcs sdma_v6_0_vm_pte_funcs = {
> + .copy_pte_num_dw = 7,
> + .copy_pte = sdma_v6_0_vm_copy_pte,
> + .write_pte = sdma_v6_0_vm_write_pte,
> + .set_pte_pde = sdma_v6_0_vm_set_pte_pde,
> +};
> +
> static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block)
> {
> struct amdgpu_device *adev = ip_block->adev;
> @@ -1296,7 +1302,7 @@ static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block)
>
> sdma_v6_0_set_ring_funcs(adev);
> sdma_v6_0_set_buffer_funcs(adev);
> - sdma_v6_0_set_vm_pte_funcs(adev);
> + amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v6_0_vm_pte_funcs);
> sdma_v6_0_set_irq_funcs(adev);
> sdma_v6_0_set_mqd_funcs(adev);
> sdma_v6_0_set_ras_funcs(adev);
> @@ -1889,25 +1895,6 @@ static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev)
> adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
> }
>
> -static const struct amdgpu_vm_pte_funcs sdma_v6_0_vm_pte_funcs = {
> - .copy_pte_num_dw = 7,
> - .copy_pte = sdma_v6_0_vm_copy_pte,
> - .write_pte = sdma_v6_0_vm_write_pte,
> - .set_pte_pde = sdma_v6_0_vm_set_pte_pde,
> -};
> -
> -static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev)
> -{
> - unsigned i;
> -
> - adev->vm_manager.vm_pte_funcs = &sdma_v6_0_vm_pte_funcs;
> - for (i = 0; i < adev->sdma.num_instances; i++) {
> - adev->vm_manager.vm_pte_scheds[i] =
> - &adev->sdma.instance[i].ring.sched;
> - }
> - adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> -}
> -
> const struct amdgpu_ip_block_version sdma_v6_0_ip_block = {
> .type = AMD_IP_BLOCK_TYPE_SDMA,
> .major = 6,
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
> index 7fee98d37720..9318d23eb71e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
> @@ -119,7 +119,6 @@ static const struct amdgpu_hwip_reg_entry sdma_reg_list_7_0[] = {
>
> static void sdma_v7_0_set_ring_funcs(struct amdgpu_device *adev);
> static void sdma_v7_0_set_buffer_funcs(struct amdgpu_device *adev);
> -static void sdma_v7_0_set_vm_pte_funcs(struct amdgpu_device *adev);
> static void sdma_v7_0_set_irq_funcs(struct amdgpu_device *adev);
> static int sdma_v7_0_start(struct amdgpu_device *adev);
>
> @@ -1253,6 +1252,13 @@ static void sdma_v7_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
> amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
> }
>
> +static const struct amdgpu_vm_pte_funcs sdma_v7_0_vm_pte_funcs = {
> + .copy_pte_num_dw = 8,
> + .copy_pte = sdma_v7_0_vm_copy_pte,
> + .write_pte = sdma_v7_0_vm_write_pte,
> + .set_pte_pde = sdma_v7_0_vm_set_pte_pde,
> +};
> +
> static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block)
> {
> struct amdgpu_device *adev = ip_block->adev;
> @@ -1283,7 +1289,7 @@ static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block)
>
> sdma_v7_0_set_ring_funcs(adev);
> sdma_v7_0_set_buffer_funcs(adev);
> - sdma_v7_0_set_vm_pte_funcs(adev);
> + amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v7_0_vm_pte_funcs);
> sdma_v7_0_set_irq_funcs(adev);
> sdma_v7_0_set_mqd_funcs(adev);
>
> @@ -1831,25 +1837,6 @@ static void sdma_v7_0_set_buffer_funcs(struct amdgpu_device *adev)
> adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
> }
>
> -static const struct amdgpu_vm_pte_funcs sdma_v7_0_vm_pte_funcs = {
> - .copy_pte_num_dw = 8,
> - .copy_pte = sdma_v7_0_vm_copy_pte,
> - .write_pte = sdma_v7_0_vm_write_pte,
> - .set_pte_pde = sdma_v7_0_vm_set_pte_pde,
> -};
> -
> -static void sdma_v7_0_set_vm_pte_funcs(struct amdgpu_device *adev)
> -{
> - unsigned i;
> -
> - adev->vm_manager.vm_pte_funcs = &sdma_v7_0_vm_pte_funcs;
> - for (i = 0; i < adev->sdma.num_instances; i++) {
> - adev->vm_manager.vm_pte_scheds[i] =
> - &adev->sdma.instance[i].ring.sched;
> - }
> - adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> -}
> -
> const struct amdgpu_ip_block_version sdma_v7_0_ip_block = {
> .type = AMD_IP_BLOCK_TYPE_SDMA,
> .major = 7,
> diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
> index 7f18e4875287..b85df997ed49 100644
> --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
> +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
> @@ -37,7 +37,6 @@ const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
>
> static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
> static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
> -static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
> static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
>
> /**
> @@ -473,6 +472,14 @@ static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring,
> amdgpu_ring_write(ring, val);
> }
>
> +static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
> + .copy_pte_num_dw = 5,
> + .copy_pte = si_dma_vm_copy_pte,
> +
> + .write_pte = si_dma_vm_write_pte,
> + .set_pte_pde = si_dma_vm_set_pte_pde,
> +};
> +
> static int si_dma_early_init(struct amdgpu_ip_block *ip_block)
> {
> struct amdgpu_device *adev = ip_block->adev;
> @@ -481,7 +488,7 @@ static int si_dma_early_init(struct amdgpu_ip_block *ip_block)
>
> si_dma_set_ring_funcs(adev);
> si_dma_set_buffer_funcs(adev);
> - si_dma_set_vm_pte_funcs(adev);
> + amdgpu_sdma_set_vm_pte_scheds(adev, &si_dma_vm_pte_funcs);
> si_dma_set_irq_funcs(adev);
>
> return 0;
> @@ -830,26 +837,6 @@ static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
> adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
> }
>
> -static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
> - .copy_pte_num_dw = 5,
> - .copy_pte = si_dma_vm_copy_pte,
> -
> - .write_pte = si_dma_vm_write_pte,
> - .set_pte_pde = si_dma_vm_set_pte_pde,
> -};
> -
> -static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
> -{
> - unsigned i;
> -
> - adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
> - for (i = 0; i < adev->sdma.num_instances; i++) {
> - adev->vm_manager.vm_pte_scheds[i] =
> - &adev->sdma.instance[i].ring.sched;
> - }
> - adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> -}
> -
> const struct amdgpu_ip_block_version si_dma_ip_block =
> {
> .type = AMD_IP_BLOCK_TYPE_SDMA,
Powered by blists - more mailing lists