[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230530024227.2139632-8-evan.quan@amd.com>
Date: Tue, 30 May 2023 10:42:25 +0800
From: Evan Quan <evan.quan@....com>
To: <rafael@...nel.org>, <lenb@...nel.org>,
<alexander.deucher@....com>, <christian.koenig@....com>,
<Xinhui.Pan@....com>, <airlied@...il.com>, <daniel@...ll.ch>,
<evan.quan@....com>, <kvalo@...nel.org>, <nbd@....name>,
<lorenzo@...nel.org>, <ryder.lee@...iatek.com>,
<shayne.chen@...iatek.com>, <sean.wang@...iatek.com>,
<matthias.bgg@...il.com>,
<angelogioacchino.delregno@...labora.com>,
<Mario.Limonciello@....com>, <Lijo.Lazar@....com>
CC: <linux-kernel@...r.kernel.org>, <linux-acpi@...r.kernel.org>,
<amd-gfx@...ts.freedesktop.org>, <dri-devel@...ts.freedesktop.org>,
<linux-wireless@...r.kernel.org>, <ath11k@...ts.infradead.org>,
<ath12k@...ts.infradead.org>
Subject: [PATCH 7/9] drm/amd/pm: add flood detection for wbrf events
To protect PMFW from being overloaded.
Signed-off-by: Evan Quan <evan.quan@....com>
--
v1->v2:
- utilize the delayed work(Lijo, Christian)
- split the new module parameter changes out(Christian, Alex)
v2->v3:
- simplify the flood detection further per latest design
v3->v4:
- drop unneeded wbrf_mutex(Lijo, Christian)
- use schedule_delayed_work() to avoid possible concurrent
access(Chrisitan)
---
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 28 ++++++++++++++++---
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 7 +++++
2 files changed, 31 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 89f876cc60e6..2619e310ef54 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1272,6 +1272,22 @@ static void smu_wbrf_event_handler(struct amdgpu_device *adev)
{
struct smu_context *smu = adev->powerplay.pp_handle;
+ schedule_delayed_work(&smu->wbrf_delayed_work,
+ msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
+}
+
+/**
+ * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
+ *
+ * @work: struct work_struct pointer
+ *
+ * Flood is over and driver will consume the latest exclusion ranges.
+ */
+static void smu_wbrf_delayed_work_handler(struct work_struct *work)
+{
+ struct smu_context *smu =
+ container_of(work, struct smu_context, wbrf_delayed_work.work);
+
smu_wbrf_handle_exclusion_ranges(smu);
}
@@ -1311,6 +1327,9 @@ static int smu_wbrf_init(struct smu_context *smu)
if (!smu->wbrf_supported)
return 0;
+ INIT_DELAYED_WORK(&smu->wbrf_delayed_work,
+ smu_wbrf_delayed_work_handler);
+
ret = amdgpu_acpi_register_wbrf_notify_handler(adev,
smu_wbrf_event_handler);
if (ret)
@@ -1321,11 +1340,10 @@ static int smu_wbrf_init(struct smu_context *smu)
* before our driver loaded. To make sure our driver
* is awared of those exclusion ranges.
*/
- ret = smu_wbrf_handle_exclusion_ranges(smu);
- if (ret)
- dev_err(adev->dev, "Failed to handle wbrf exclusion ranges\n");
+ schedule_delayed_work(&smu->wbrf_delayed_work,
+ msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
- return ret;
+ return 0;
}
/**
@@ -1343,6 +1361,8 @@ static void smu_wbrf_fini(struct smu_context *smu)
return;
amdgpu_acpi_unregister_wbrf_notify_handler(adev);
+
+ cancel_delayed_work_sync(&smu->wbrf_delayed_work);
}
static int smu_smc_hw_setup(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index ff0af3da0be2..aa63cc43d41c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -478,6 +478,12 @@ struct stb_context {
#define WORKLOAD_POLICY_MAX 7
+/*
+ * Configure wbrf event handling pace as there can be only one
+ * event processed every SMU_WBRF_EVENT_HANDLING_PACE ms.
+ */
+#define SMU_WBRF_EVENT_HANDLING_PACE 10
+
struct smu_context
{
struct amdgpu_device *adev;
@@ -576,6 +582,7 @@ struct smu_context
/* data structures for wbrf feature support */
bool wbrf_supported;
+ struct delayed_work wbrf_delayed_work;
};
struct i2c_adapter;
--
2.34.1
Powered by blists - more mailing lists