[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20230609132013.11609-1-joro@8bytes.org>
Date: Fri, 9 Jun 2023 15:20:13 +0200
From: Joerg Roedel <joro@...tes.org>
To: iommu@...ts.linux.dev
Cc: linux-kernel@...r.kernel.org,
Suravee Suthikulpanit <suravee.suthikulpanit@....com>,
Jerry Snitselaar <jsnitsel@...hat.com>,
Will Deacon <will@...nel.org>,
Robin Murphy <robin.murphy@....com>,
Joerg Roedel <jroedel@...e.de>
Subject: [PATCH] iommu/amd: Fix compile error for unused function
From: Joerg Roedel <jroedel@...e.de>
Recent changes introduced a compile error:
drivers/iommu/amd/iommu.c:1285:13: error: ‘iommu_flush_irt_and_complete’ defined but not used [-Werror=unused-function]
1285 | static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
This happens with defconfig-x86_64 because AMD IOMMU is enabled but
CONFIG_IRQ_REMAP is disabled. Move the function under #ifdef
CONFIG_IRQ_REMAP to fix the error.
Signed-off-by: Joerg Roedel <jroedel@...e.de>
---
drivers/iommu/amd/iommu.c | 52 +++++++++++++++++++--------------------
1 file changed, 26 insertions(+), 26 deletions(-)
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index e47c8c520708..13097619fc4c 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1275,32 +1275,6 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
iommu_completion_wait(iommu);
}
-static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
-{
- int ret;
- u64 data;
- unsigned long flags;
- struct iommu_cmd cmd, cmd2;
-
- if (iommu->irtcachedis_enabled)
- return;
-
- build_inv_irt(&cmd, devid);
- data = atomic64_add_return(1, &iommu->cmd_sem_val);
- build_completion_wait(&cmd2, iommu, data);
-
- raw_spin_lock_irqsave(&iommu->lock, flags);
- ret = __iommu_queue_command_sync(iommu, &cmd, true);
- if (ret)
- goto out;
- ret = __iommu_queue_command_sync(iommu, &cmd2, false);
- if (ret)
- goto out;
- wait_on_sem(iommu, data);
-out:
- raw_spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
void iommu_flush_all_caches(struct amd_iommu *iommu)
{
if (iommu_feature(iommu, FEATURE_IA)) {
@@ -2831,6 +2805,32 @@ EXPORT_SYMBOL(amd_iommu_device_info);
static struct irq_chip amd_ir_chip;
static DEFINE_SPINLOCK(iommu_table_lock);
+static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
+{
+ int ret;
+ u64 data;
+ unsigned long flags;
+ struct iommu_cmd cmd, cmd2;
+
+ if (iommu->irtcachedis_enabled)
+ return;
+
+ build_inv_irt(&cmd, devid);
+ data = atomic64_add_return(1, &iommu->cmd_sem_val);
+ build_completion_wait(&cmd2, iommu, data);
+
+ raw_spin_lock_irqsave(&iommu->lock, flags);
+ ret = __iommu_queue_command_sync(iommu, &cmd, true);
+ if (ret)
+ goto out;
+ ret = __iommu_queue_command_sync(iommu, &cmd2, false);
+ if (ret)
+ goto out;
+ wait_on_sem(iommu, data);
+out:
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
struct irq_remap_table *table)
{
--
2.40.1
Powered by blists - more mailing lists