[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <172062976875.2215.17731870703902999167.tip-bot2@tip-bot2>
Date: Wed, 10 Jul 2024 16:42:48 -0000
From: "tip-bot2 for Marc Zyngier" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Marc Zyngier <maz@...nel.org>, Thomas Gleixner <tglx@...utronix.de>,
Nianyao Tang <tangnianyao@...wei.com>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject:
[tip: irq/core] irqchip/gic-v4: Substitute vmovp_lock for a per-VM lock
The following commit has been merged into the irq/core branch of tip:
Commit-ID: 7ae6f82a97f6f1dc32414e09e15375721c691b3d
Gitweb: https://git.kernel.org/tip/7ae6f82a97f6f1dc32414e09e15375721c691b3d
Author: Marc Zyngier <maz@...nel.org>
AuthorDate: Fri, 05 Jul 2024 10:31:54 +01:00
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitterDate: Wed, 10 Jul 2024 18:40:09 +02:00
irqchip/gic-v4: Substitute vmovp_lock for a per-VM lock
vmovp_lock is abused in a number of cases to serialise updates
to vlpi_count[] and deal with map/unmap of a VM to ITSs.
Instead, provide a per-VM lock and revisit the use of vlpi_count[]
so that it is always wrapped in this per-VM vmapp_lock.
This reduces the potential contention on a concurrent VMOVP command,
and paves the way for subsequent VPE locking that holding vmovp_lock
actively prevents due to the lock ordering.
Signed-off-by: Marc Zyngier <maz@...nel.org>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Tested-by: Nianyao Tang <tangnianyao@...wei.com>
Link: https://lore.kernel.org/r/20240705093155.871070-3-maz@kernel.org
---
drivers/irqchip/irq-gic-v3-its.c | 27 ++++++++++++---------------
include/linux/irqchip/arm-gic-v4.h | 8 ++++++++
2 files changed, 20 insertions(+), 15 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 8d31e4a..c85826a 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1318,7 +1318,6 @@ static void its_send_vmovp(struct its_vpe *vpe)
{
struct its_cmd_desc desc = {};
struct its_node *its;
- unsigned long flags;
int col_id = vpe->col_idx;
desc.its_vmovp_cmd.vpe = vpe;
@@ -1331,6 +1330,12 @@ static void its_send_vmovp(struct its_vpe *vpe)
}
/*
+ * Protect against concurrent updates of the mapping state on
+ * individual VMs.
+ */
+ guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
+
+ /*
* Yet another marvel of the architecture. If using the
* its_list "feature", we need to make sure that all ITSs
* receive all VMOVP commands in the same order. The only way
@@ -1338,8 +1343,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
*
* Wall <-- Head.
*/
- raw_spin_lock_irqsave(&vmovp_lock, flags);
-
+ guard(raw_spinlock)(&vmovp_lock);
desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
@@ -1354,8 +1358,6 @@ static void its_send_vmovp(struct its_vpe *vpe)
desc.its_vmovp_cmd.col = &its->collections[col_id];
its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
}
-
- raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
@@ -1792,12 +1794,10 @@ static bool gic_requires_eager_mapping(void)
static void its_map_vm(struct its_node *its, struct its_vm *vm)
{
- unsigned long flags;
-
if (gic_requires_eager_mapping())
return;
- raw_spin_lock_irqsave(&vmovp_lock, flags);
+ guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
/*
* If the VM wasn't mapped yet, iterate over the vpes and get
@@ -1815,19 +1815,15 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
its_send_vinvall(its, vpe);
}
}
-
- raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
{
- unsigned long flags;
-
/* Not using the ITS list? Everything is always mapped. */
if (gic_requires_eager_mapping())
return;
- raw_spin_lock_irqsave(&vmovp_lock, flags);
+ guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
if (!--vm->vlpi_count[its->list_nr]) {
int i;
@@ -1835,8 +1831,6 @@ static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
for (i = 0; i < vm->nr_vpes; i++)
its_send_vmapp(its, vm->vpes[i], false);
}
-
- raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
@@ -3944,6 +3938,8 @@ static void its_vpe_invall(struct its_vpe *vpe)
{
struct its_node *its;
+ guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
+
list_for_each_entry(its, &its_nodes, entry) {
if (!is_v4(its))
continue;
@@ -4549,6 +4545,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
vm->db_lpi_base = base;
vm->nr_db_lpis = nr_ids;
vm->vprop_page = vprop_page;
+ raw_spin_lock_init(&vm->vmapp_lock);
if (gic_rdists->has_rvpeid)
irqchip = &its_vpe_4_1_irq_chip;
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
index 2c63375..ecabed6 100644
--- a/include/linux/irqchip/arm-gic-v4.h
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -25,6 +25,14 @@ struct its_vm {
irq_hw_number_t db_lpi_base;
unsigned long *db_bitmap;
int nr_db_lpis;
+ /*
+ * Ensures mutual exclusion between updates to vlpi_count[]
+ * and map/unmap when using the ITSList mechanism.
+ *
+ * The lock order for any sequence involving the ITSList is
+ * vmapp_lock -> vpe_lock ->vmovp_lock.
+ */
+ raw_spinlock_t vmapp_lock;
u32 vlpi_count[GICv4_ITS_LIST_MAX];
};
Powered by blists - more mailing lists