lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <180b5bcc-507b-4136-b56b-77553c898ce8@damsy.net>
Date: Wed, 5 Nov 2025 11:17:54 +0100
From: Pierre-Eric Pelloux-Prayer <pierre-eric@...sy.net>
To: Christian König <christian.koenig@....com>,
 Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@....com>,
 Alex Deucher <alexander.deucher@....com>, David Airlie <airlied@...il.com>,
 Simona Vetter <simona@...ll.ch>
Cc: amd-gfx@...ts.freedesktop.org, dri-devel@...ts.freedesktop.org,
 linux-kernel@...r.kernel.org
Subject: Re: [PATCH v1 16/20] drm/amdgpu: give ttm entities access to all the
 sdma scheds



Le 05/11/2025 à 09:45, Christian König a écrit :
> 
> 
> On 11/4/25 09:35, Pierre-Eric Pelloux-Prayer wrote:
>> Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@....com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 23 ++++++++++-------------
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h |  1 +
>>   2 files changed, 11 insertions(+), 13 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> index 47eaab9350ae..d88bdb2ac083 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> @@ -2187,8 +2187,8 @@ u32 amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
>>   	u32 num_clear_entities, num_move_entities;
>>   	u32 windows, w;
>>   
>> -	num_clear_entities = adev->sdma.num_instances;
>> -	num_move_entities = MIN(adev->sdma.num_instances, TTM_FENCES_MAX_SLOT_COUNT);
>> +	num_clear_entities = MIN(adev->mman.num_buffer_funcs_rings, TTM_FENCES_MAX_SLOT_COUNT);
>> +	num_move_entities = MIN(adev->mman.num_buffer_funcs_rings, TTM_FENCES_MAX_SLOT_COUNT);
>>   	man->pipelined_eviction.n_fences = num_move_entities;
>>   	windows = adev->gmc.is_app_apu ? 0 : (2 * num_move_entities + num_clear_entities);
>>   
>> @@ -2197,13 +2197,8 @@ u32 amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
>>   		return windows;
>>   
>>   	if (enable) {
>> -		struct amdgpu_ring *ring;
>> -		struct drm_gpu_scheduler *sched;
>> -
>> -		ring = adev->mman.buffer_funcs_rings[0];
>> -		sched = &ring->sched;
>>   		r = drm_sched_entity_init(&adev->mman.default_entity.base,
>> -					  DRM_SCHED_PRIORITY_KERNEL, &sched,
>> +					  DRM_SCHED_PRIORITY_KERNEL, adev->mman.scheds,
>>   					  1, NULL);
>>   		if (r) {
>>   			dev_err(adev->dev, "Failed setting up entity (%d)\n",
>> @@ -2215,8 +2210,8 @@ u32 amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
>>   		atomic_set(&adev->mman.next_move_entity, 0);
>>   		for (i = 0; i < num_move_entities; i++) {
>>   			r = drm_sched_entity_init(&adev->mman.move_entities[i].base,
>> -						  DRM_SCHED_PRIORITY_NORMAL, &sched,
>> -						  1, NULL);
>> +						  DRM_SCHED_PRIORITY_NORMAL, adev->mman.scheds,
>> +						  adev->mman.num_buffer_funcs_rings, NULL);
>>   			if (r) {
>>   				dev_err(adev->dev,
>>   					"Failed setting up TTM BO move entities (%d)\n",
>> @@ -2238,8 +2233,8 @@ u32 amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
>>   
>>   		for (i = 0; i < num_clear_entities; i++) {
>>   			r = drm_sched_entity_init(&adev->mman.clear_entities[i].base,
>> -						  DRM_SCHED_PRIORITY_NORMAL, &sched,
>> -						  1, NULL);
>> +						  DRM_SCHED_PRIORITY_NORMAL, adev->mman.scheds,
>> +						  adev->mman.num_buffer_funcs_rings, NULL);
>>   			if (r) {
>>   				for (j = 0; j < num_move_entities; j++)
>>   					drm_sched_entity_destroy(
>> @@ -2650,8 +2645,10 @@ void amdgpu_sdma_set_buffer_funcs_rings(struct amdgpu_device *adev)
>>   	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
>>   	int i;
>>   
>> -	for (i = 0; i < adev->sdma.num_instances; i++)
>> +	for (i = 0; i < adev->sdma.num_instances; i++) {
>>   		adev->mman.buffer_funcs_rings[i] = &adev->sdma.instance[i].ring;
>> +		adev->mman.scheds[i] = &adev->sdma.instance[i].ring.sched;
>> +	}
>>   
>>   	adev->mman.num_buffer_funcs_rings = hub->sdma_invalidation_workaround ?
>>   		1 : adev->sdma.num_instances;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> index d7fee371b814..c059a3d52b57 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> @@ -68,6 +68,7 @@ struct amdgpu_mman {
>>   	const struct amdgpu_buffer_funcs	*buffer_funcs;
>>   	struct amdgpu_ring			*buffer_funcs_rings[AMDGPU_MAX_RINGS];
>>   	u32					num_buffer_funcs_rings;
>> +	struct drm_gpu_scheduler		*scheds[AMDGPU_MAX_RINGS];
> 
> That looks like you are duplicating the data of buffer_funcs_rings. Why?

I need a sched array to pass to drm_sched_entity_init.

That being said, I think buffer_funcs_rings could be dropped and its users be 
updated to use scheds.

Pierre-Eric


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ