[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210802134345.209372674@linuxfoundation.org>
Date: Mon, 2 Aug 2021 15:44:33 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org,
Shyam Sundar S K <Shyam-sundar.S-k@....com>,
Raul E Rangel <rrangel@...omium.org>,
Hans de Goede <hdegoede@...hat.com>,
Sasha Levin <sashal@...nel.org>
Subject: [PATCH 5.13 036/104] platform/x86: amd-pmc: Fix command completion code
From: Shyam Sundar S K <Shyam-sundar.S-k@....com>
[ Upstream commit 95e1b60f8dc8f225b14619e9aca9bdd7d99167db ]
The protocol to submit a job request to SMU is to wait for
AMD_PMC_REGISTER_RESPONSE to return 1,meaning SMU is ready to take
requests. PMC driver has to make sure that the response code is always
AMD_PMC_RESULT_OK before making any command submissions.
When we submit a message to SMU, we have to wait until it processes
the request. Adding a read_poll_timeout() check as this was missing in
the existing code.
Also, add a mutex to protect amd_pmc_send_cmd() calls to SMU.
Fixes: 156ec4731cb2 ("platform/x86: amd-pmc: Add AMD platform support for S2Idle")
Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@....com>
Acked-by: Raul E Rangel <rrangel@...omium.org>
Reviewed-by: Hans de Goede <hdegoede@...hat.com>
Link: https://lore.kernel.org/r/20210629084803.248498-2-Shyam-sundar.S-k@amd.com
Signed-off-by: Hans de Goede <hdegoede@...hat.com>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
drivers/platform/x86/amd-pmc.c | 38 ++++++++++++++++++++++++++++++++--
1 file changed, 36 insertions(+), 2 deletions(-)
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index b9da58ee9b1e..1b5f149932c1 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -68,6 +68,7 @@ struct amd_pmc_dev {
u32 base_addr;
u32 cpu_id;
struct device *dev;
+ struct mutex lock; /* generic mutex lock */
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
@@ -138,9 +139,10 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
u8 msg;
u32 val;
+ mutex_lock(&dev->lock);
/* Wait until we get a valid response */
rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
- val, val > 0, PMC_MSG_DELAY_MIN_US,
+ val, val != 0, PMC_MSG_DELAY_MIN_US,
PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "failed to talk to SMU\n");
@@ -156,7 +158,37 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
/* Write message ID to message ID register */
msg = (dev->cpu_id == AMD_CPU_ID_RN) ? MSG_OS_HINT_RN : MSG_OS_HINT_PCO;
amd_pmc_reg_write(dev, AMD_PMC_REGISTER_MESSAGE, msg);
- return 0;
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
+ val, val != 0, PMC_MSG_DELAY_MIN_US,
+ PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "SMU response timed out\n");
+ goto out_unlock;
+ }
+
+ switch (val) {
+ case AMD_PMC_RESULT_OK:
+ break;
+ case AMD_PMC_RESULT_CMD_REJECT_BUSY:
+ dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
+ rc = -EBUSY;
+ goto out_unlock;
+ case AMD_PMC_RESULT_CMD_UNKNOWN:
+ dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
+ rc = -EINVAL;
+ goto out_unlock;
+ case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
+ case AMD_PMC_RESULT_FAILED:
+ default:
+ dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
+ rc = -EIO;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&dev->lock);
+ return rc;
}
static int __maybe_unused amd_pmc_suspend(struct device *dev)
@@ -259,6 +291,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
amd_pmc_dump_registers(dev);
+ mutex_init(&dev->lock);
platform_set_drvdata(pdev, dev);
amd_pmc_dbgfs_register(dev);
return 0;
@@ -269,6 +302,7 @@ static int amd_pmc_remove(struct platform_device *pdev)
struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
amd_pmc_dbgfs_unregister(dev);
+ mutex_destroy(&dev->lock);
return 0;
}
--
2.30.2
Powered by blists - more mailing lists