lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <63778ba1-6ae5-0cfe-9a29-6b8cbd8118e3@amd.com>
Date:   Thu, 17 Aug 2023 13:28:08 +0530
From:   "Yadav, Arvind" <arvyadav@....com>
To:     Shashank Sharma <shashank.sharma@....com>,
        Arvind Yadav <Arvind.Yadav@....com>, Christian.Koenig@....com,
        alexander.deucher@....com, Xinhui.Pan@....com, airlied@...il.com,
        daniel@...ll.ch, amd-gfx@...ts.freedesktop.org,
        dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/3] drm/amdgpu: Add new api to switch on/off power
 profile mode


On 8/14/2023 8:28 PM, Shashank Sharma wrote:
> Hey Arvind,
>
> On 14/08/2023 09:34, Arvind Yadav wrote:
>> This patch adds a function which will allow to
>> change the GPU power profile based on a submitted job.
>> This can optimize the power performance when the
>> workload is on.
>>
>> Cc: Shashank Sharma <shashank.sharma@....com>
>> Cc: Christian Koenig <christian.koenig@....com>
>> Cc: Alex Deucher <alexander.deucher@....com>
>> Signed-off-by: Arvind Yadav <Arvind.Yadav@....com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/Makefile           |   2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h           |   3 +
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c    |   2 +
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_workload.c  | 156 ++++++++++++++++++
>>   drivers/gpu/drm/amd/include/amdgpu_workload.h |  44 +++++
>>   5 files changed, 206 insertions(+), 1 deletion(-)
>>   create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_workload.c
>>   create mode 100644 drivers/gpu/drm/amd/include/amdgpu_workload.h
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile 
>> b/drivers/gpu/drm/amd/amdgpu/Makefile
>> index 415a7fa395c4..6a9e187d61e1 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/Makefile
>> +++ b/drivers/gpu/drm/amd/amdgpu/Makefile
>> @@ -60,7 +60,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
>>       amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
>>       amdgpu_fw_attestation.o amdgpu_securedisplay.o \
>>       amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
>> -    amdgpu_ring_mux.o
>> +    amdgpu_ring_mux.o amdgpu_workload.o
>>     amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
>>   diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> index 02b827785e39..1939fa1af8a6 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> @@ -107,6 +107,7 @@
>>   #include "amdgpu_fdinfo.h"
>>   #include "amdgpu_mca.h"
>>   #include "amdgpu_ras.h"
>> +#include "amdgpu_workload.h"
>>     #define MAX_GPU_INSTANCE        16
>>   @@ -1050,6 +1051,8 @@ struct amdgpu_device {
>>         bool                            job_hang;
>>       bool                            dc_enabled;
>> +
>> +    struct amdgpu_smu_workload    smu_workload;
>>   };
>>     static inline struct amdgpu_device *drm_to_adev(struct drm_device 
>> *ddev)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> index 5c7d40873ee2..0ec18b8fe29f 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> @@ -3672,6 +3672,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>>         INIT_WORK(&adev->xgmi_reset_work, 
>> amdgpu_device_xgmi_reset_func);
>>   +    amdgpu_smu_workload_init(adev);
>> +
>>       adev->gfx.gfx_off_req_count = 1;
>>       adev->gfx.gfx_off_residency = 0;
>>       adev->gfx.gfx_off_entrycount = 0;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_workload.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_workload.c
>> new file mode 100644
>> index 000000000000..ce0339d75c12
>> --- /dev/null
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_workload.c
>> @@ -0,0 +1,156 @@
>> +// SPDX-License-Identifier: MIT
>> +/*
>> + * Copyright 2023 Advanced Micro Devices, Inc.
>> + *
>> + * Permission is hereby granted, free of charge, to any person 
>> obtaining a
>> + * copy of this software and associated documentation files (the 
>> "Software"),
>> + * to deal in the Software without restriction, including without 
>> limitation
>> + * the rights to use, copy, modify, merge, publish, distribute, 
>> sublicense,
>> + * and/or sell copies of the Software, and to permit persons to whom 
>> the
>> + * Software is furnished to do so, subject to the following conditions:
>> + *
>> + * The above copyright notice and this permission notice shall be 
>> included in
>> + * all copies or substantial portions of the Software.
>> + *
>> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
>> EXPRESS OR
>> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 
>> MERCHANTABILITY,
>> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO 
>> EVENT SHALL
>> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, 
>> DAMAGES OR
>> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
>> OTHERWISE,
>> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
>> USE OR
>> + * OTHER DEALINGS IN THE SOFTWARE.
>> + *
>> + */
>> +
>> +#include "amdgpu.h"
>> +
>> +/* 100 millsecond timeout */
>> +#define SMU_IDLE_TIMEOUT    msecs_to_jiffies(100)
>> +
>> +static enum PP_SMC_POWER_PROFILE
>> +ring_to_power_profile(uint32_t ring_type)
>> +{
>> +    switch (ring_type) {
>> +    case AMDGPU_RING_TYPE_GFX:
>> +        return PP_SMC_POWER_PROFILE_FULLSCREEN3D;
>> +    case AMDGPU_RING_TYPE_COMPUTE:
>> +        return PP_SMC_POWER_PROFILE_COMPUTE;
>> +    case AMDGPU_RING_TYPE_UVD:
>> +    case AMDGPU_RING_TYPE_VCE:
>> +    case AMDGPU_RING_TYPE_UVD_ENC:
>> +    case AMDGPU_RING_TYPE_VCN_DEC:
>> +    case AMDGPU_RING_TYPE_VCN_ENC:
>> +    case AMDGPU_RING_TYPE_VCN_JPEG:
>> +        return PP_SMC_POWER_PROFILE_VIDEO;
>> +    default:
>> +        return PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
>> +    }
>> +}
>> +
>> +static void
>> +amdgpu_power_profile_set(struct amdgpu_device *adev,
>> +             enum PP_SMC_POWER_PROFILE profile)
> This function expects the caller to hold the smu_workload_mutex, may 
> be we should document it.
>> +{
>> +    int ret = amdgpu_dpm_switch_power_profile(adev, profile, true);
> I think we should pass this return value to caller instead of keeping 
> the function void.
Noted,
>> +
>> +    if (ret == 0) {
>> +        /* Set the bit for the submitted workload profile */
>> +        adev->smu_workload.submit_workload_status |= (1 << profile);
>> + atomic_inc(&adev->smu_workload.power_profile_ref[profile]);
>> +    } else {
>> +        DRM_ERROR("Failed to set power profile, error %d\n", ret);
> This can be a warning instead of error.
Noted,
>> +    }
>> +
>> +}
>> +
>> +static void
>> +amdgpu_power_profile_clear(struct amdgpu_device *adev,
>> +               enum PP_SMC_POWER_PROFILE profile)
>> +{
>> +    int ret = amdgpu_dpm_switch_power_profile(adev, profile, false);
> same for return value here as well.
Noted,
>
>> +
>> +    if (ret == 0) {
>> +         /* Clear the bit for the submitted workload profile */
>> +        adev->smu_workload.submit_workload_status &= ~(1 << profile);
>> +    } else
>> +        DRM_ERROR("Failed to clear power profile, error %d\n", ret);
>> +
>> +}
>> +
>> +static void amdgpu_smu_idle_work_handler(struct work_struct *work)
>> +{
>> +
>> +    struct amdgpu_smu_workload *wl = container_of(work,
>> +                              struct amdgpu_smu_workload,
>> +                              smu_delayed_work.work);
>> +    struct amdgpu_device *adev = wl->adev;
>> +    bool reschedule = false;
>> +
>> +    mutex_lock(&adev->smu_workload.workload_lock);
>> +    for (int index  = fls(adev->smu_workload.submit_workload_status);
> This can be kept outside the for() for better readability and alignment.
Noted,
>> +         index >= 0; index--) {
>> +        if 
>> (!atomic_read(&adev->smu_workload.power_profile_ref[index]) &&
>> +            adev->smu_workload.submit_workload_status & (1 << index)) {
>> +            amdgpu_power_profile_clear(adev, index);
>> +        } else if 
>> (atomic_read(&adev->smu_workload.power_profile_ref[index]))
>> +            reschedule = true;
>> +    }
>> +
>
> This block can be re-arranged a bit for better readability, pls consider:
>
> for () {
>
>     atomic_t val = 
> atomic_read(&adev->smu_workload.power_profile_ref[index];
>
>     if (val) {
>
>         reschedule = true;
>
>         break;
>
>     } else {
>
>         if (adev->smu_workload.submit_workload_status & (1 << index))
>
>             amdgpu_power_profile_clear(adev, index);
>
>     }
>
> }
>
Noted,
>> +    if (reschedule)
>> + schedule_delayed_work(&adev->smu_workload.smu_delayed_work,
>> +                      SMU_IDLE_TIMEOUT);
> pls check the return value of work
Noted,
>> +
>> +    mutex_unlock(&adev->smu_workload.workload_lock);
>> +}
>> +
>> +void amdgpu_put_workload_profile(struct amdgpu_device *adev,
>> +                 uint32_t ring_type)
>> +{
>> +
>> +    enum PP_SMC_POWER_PROFILE profile = 
>> ring_to_power_profile(ring_type);
>> +
>> +    if (profile == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT)
>> +        return;
>> +
>> +    mutex_lock(&adev->smu_workload.workload_lock);
>> + atomic_dec(&adev->smu_workload.power_profile_ref[profile]);
>> + schedule_delayed_work(&adev->smu_workload.smu_delayed_work, 
>> SMU_IDLE_TIMEOUT);
>> +    mutex_unlock(&adev->smu_workload.workload_lock);
>> +}
>> +
>> +void amdgpu_set_workload_profile(struct amdgpu_device *adev,
>> +                 uint32_t ring_type)
> I would prefer if you can split this patch into two, one just to set 
> profile, other to clear profile and schedule work.
Noted,
>> +{
>> +    enum PP_SMC_POWER_PROFILE profile = 
>> ring_to_power_profile(ring_type);
>> +
>> +    if (profile == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT)
>> +        return;
>> +
>> +    mutex_lock(&adev->smu_workload.workload_lock);
>> + cancel_delayed_work_sync(&adev->smu_workload.smu_delayed_work);
> Please check the return value here and proceed only when we were able 
> to cancel successfully.
Noted,
>> +
>> +    amdgpu_power_profile_set(adev, profile);
>> +
>> +    /* Clear the already finished jobs of higher power profile*/
>> +    for (int index = fls(adev->smu_workload.submit_workload_status);
>> +         index > profile; index--) {
>> +        if 
>> (!atomic_read(&adev->smu_workload.power_profile_ref[index]) &&
>> +            adev->smu_workload.submit_workload_status & (1 << index)) {
>> +            amdgpu_power_profile_clear(adev, index);
>> +        }
>> +    }
>> +
>> +    mutex_unlock(&adev->smu_workload.workload_lock);
>> +}
>> +
>> +void amdgpu_smu_workload_init(struct amdgpu_device *adev)
>> +{
>> +    struct amdgpu_smu_workload wl;
>> +
>> +    wl.adev = adev;
>> +    wl.submit_workload_status = 0;
>> +    adev->smu_workload = wl;
>
> Why do we need variable wl at all, which is a local variable of the 
> stack ? You can just do:
Noted,
>
> adev->smu_workload.adev = adev;
> adev->smu_workload.submit_workload_status = 0;
>
>> +
>> +    mutex_init(&adev->smu_workload.workload_lock);
>> + INIT_DELAYED_WORK(&adev->smu_workload.smu_delayed_work, 
>> amdgpu_smu_idle_work_handler);
>
> Are we missing the respective amdgpu_smu_workload_fini which will 
> destroy the mutex ?
>
Noted,

Regards,

~Arvind

> - Shashank
>
>> +}
>> diff --git a/drivers/gpu/drm/amd/include/amdgpu_workload.h 
>> b/drivers/gpu/drm/amd/include/amdgpu_workload.h
>> new file mode 100644
>> index 000000000000..09804c3d2869
>> --- /dev/null
>> +++ b/drivers/gpu/drm/amd/include/amdgpu_workload.h
>> @@ -0,0 +1,44 @@
>> +/* SPDX-License-Identifier: MIT */
>> +/*
>> + * Copyright 2023 Advanced Micro Devices, Inc.
>> + *
>> + * Permission is hereby granted, free of charge, to any person 
>> obtaining a
>> + * copy of this software and associated documentation files (the 
>> "Software"),
>> + * to deal in the Software without restriction, including without 
>> limitation
>> + * the rights to use, copy, modify, merge, publish, distribute, 
>> sublicense,
>> + * and/or sell copies of the Software, and to permit persons to whom 
>> the
>> + * Software is furnished to do so, subject to the following conditions:
>> + *
>> + * The above copyright notice and this permission notice shall be 
>> included in
>> + * all copies or substantial portions of the Software.
>> + *
>> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
>> EXPRESS OR
>> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 
>> MERCHANTABILITY,
>> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO 
>> EVENT SHALL
>> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, 
>> DAMAGES OR
>> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
>> OTHERWISE,
>> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
>> USE OR
>> + * OTHER DEALINGS IN THE SOFTWARE.
>> + *
>> + */
>> +
>> +#ifndef _AMDGPU_WORKLOAD_H_
>> +#define _AMDGPU_WORKLOAD_H_
>> +
>> +struct amdgpu_smu_workload {
>> +    struct amdgpu_device    *adev;
>> +    struct mutex        workload_lock;
>> +    struct delayed_work    smu_delayed_work;
>> +    uint32_t        submit_workload_status;
>> +    atomic_t power_profile_ref[PP_SMC_POWER_PROFILE_COUNT];
>> +};
>> +
>> +void amdgpu_set_workload_profile(struct amdgpu_device *adev,
>> +                 uint32_t ring_type);
>> +
>> +void amdgpu_put_workload_profile(struct amdgpu_device *adev,
>> +                 uint32_t ring_type);
>> +
>> +void amdgpu_smu_workload_init(struct amdgpu_device *adev);
>> +
>> +#endif

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ