[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1469439131-11308-8-git-send-email-suravee.suthikulpanit@amd.com>
Date: Mon, 25 Jul 2016 04:32:06 -0500
From: Suravee Suthikulpanit <suravee.suthikulpanit@....com>
To: <rkrcmar@...hat.com>, <joro@...tes.org>, <pbonzini@...hat.com>,
<alex.williamson@...hat.com>
CC: <kvm@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<sherry.hurwitz@....com>,
Suravee Suthikulpanit <Suravee.Suthikulpanit@....com>
Subject: [PART2 PATCH v5 07/12] iommu/amd: Introduce amd_iommu_update_ga()
From: Suravee Suthikulpanit <Suravee.Suthikulpanit@....com>
Introduces a new IOMMU API, amd_iommu_update_ga(), which allows
KVM (SVM) to update existing posted interrupt IOMMU IRTE when
load/unload vcpu.
Signed-off-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@....com>
---
drivers/iommu/amd_iommu.c | 50 +++++++++++++++++++++++++++++++++++++++++
drivers/iommu/amd_iommu_types.h | 1 +
include/linux/amd-iommu.h | 22 ++++++++++++++++++
3 files changed, 73 insertions(+)
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 861d723..e6a271c 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4458,4 +4458,54 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
return 0;
}
+
+static int
+update_irte_ga(struct irte_ga *irte, unsigned int devid,
+ u64 base, int cpu, bool is_run)
+{
+ struct irq_remap_table *irt = get_irq_table(devid, false);
+ unsigned long flags;
+
+ if (!irt)
+ return -ENODEV;
+
+ spin_lock_irqsave(&irt->lock, flags);
+
+ if (irte->lo.fields_vapic.guest_mode) {
+ irte->hi.fields.ga_root_ptr = (base >> 12);
+ if (cpu >= 0)
+ irte->lo.fields_vapic.destination = cpu;
+ irte->lo.fields_vapic.is_run = is_run;
+ barrier();
+ }
+
+ spin_unlock_irqrestore(&irt->lock, flags);
+
+ return 0;
+}
+
+int amd_iommu_update_ga(u32 cpu, u64 base, bool is_run,
+ struct amd_iommu_pi_data *pi)
+{
+ struct amd_ir_data *ir_data = pi->ir_data;
+ int devid = ir_data->irq_2_irte.devid;
+ struct irte_ga *irte = (struct irte_ga *) ir_data->entry;
+ struct amd_iommu *iommu;
+
+ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
+ return 0;
+
+ iommu = amd_iommu_rlookup_table[devid];
+ if (!iommu)
+ return -ENODEV;
+
+ if (!irte->lo.fields_vapic.guest_mode)
+ return 0;
+
+ update_irte_ga((struct irte_ga *)ir_data->ref, devid, base, cpu, is_run);
+ iommu_flush_irt(iommu, devid);
+ iommu_completion_wait(iommu);
+ return 0;
+}
+EXPORT_SYMBOL(amd_iommu_update_ga);
#endif
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 947c74a..623ee9e 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -824,6 +824,7 @@ struct amd_ir_data {
union {
struct msi_msg msi_entry;
};
+ void *ref; /* Pointer to the actual irte */
};
#ifdef CONFIG_IRQ_REMAP
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 465d096..2094d90 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -22,6 +22,19 @@
#include <linux/types.h>
+/*
+ * This is mainly used to communicate information back-and-forth
+ * between SVM and IOMMU for setting up and tearing down posted
+ * interrupt
+ */
+struct amd_iommu_pi_data {
+ struct list_head node; /* For per-vcpu pi_list */
+ u32 ga_tag;
+ bool is_guest_mode;
+ struct vcpu_data *vcpu_data;
+ struct amd_ir_data *ir_data;
+};
+
#ifdef CONFIG_AMD_IOMMU
struct task_struct;
@@ -179,6 +192,9 @@ static inline int amd_iommu_detect(void) { return -ENODEV; }
/* IOMMU AVIC Function */
extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
+extern int
+amd_iommu_update_ga(u32 cpu, u64 base, bool is_run, struct amd_iommu_pi_data *pi);
+
#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
static inline int
@@ -187,6 +203,12 @@ amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
return 0;
}
+static inline int
+amd_iommu_update_ga(u32 cpu, u64 base, bool is_run, struct amd_iommu_pi_data *pi)
+{
+ return 0;
+}
+
#endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
#endif /* _ASM_X86_AMD_IOMMU_H */
--
1.9.1
Powered by blists - more mailing lists