[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b33ea949-a53e-2c81-27fe-31067c94dc76@huawei.com>
Date: Fri, 30 Jun 2023 18:33:38 +0800
From: Kunkun Jiang <jiangkunkun@...wei.com>
To: Marc Zyngier <maz@...nel.org>, <linux-kernel@...r.kernel.org>
CC: Thomas Gleixner <tglx@...utronix.de>,
Zenghui Yu <yuzenghui@...wei.com>, <wanghaibin.wang@...wei.com>
Subject: Re: [PATCH] irqchip/gic-v4.1: Properly lock VPEs when doing a
directLPI invalidation
Hi Marc,
On 2023/6/17 15:32, Marc Zyngier wrote:
> We normally rely on the irq_to_cpuid_[un]lock() primitives to make
> sure nothing will change col->idx while performing a LPI invalidation.
>
> However, these primitives do not cover VPE doorbells, and we have
> some open-coded locking for that. Unfortunately, this locking is
> pretty bogus.
>
> Instead, extend the above primitives to cover VPE doorbells and
> convert the whole thing to it.
I've tested this patch 20+ times with a multi-core VM, which has
pass-through devices (netwrok card and SSD) and GICv4 or GICv4.1
enabled. Both Guest and Host found no exception.
Tested-by: Kunkun Jiang <jiangkunkun@...wei.com>
>
> Fixes: f3a059219bc7 ("irqchip/gic-v4.1: Ensure mutual exclusion between vPE affinity change and RD access")
> Reported-by: Kunkun Jiang <jiangkunkun@...wei.com>
> Signed-off-by: Marc Zyngier <maz@...nel.org>
> Cc: Zenghui Yu <yuzenghui@...wei.com>
> Cc: wanghaibin.wang@...wei.com
> ---
> drivers/irqchip/irq-gic-v3-its.c | 75 ++++++++++++++++++++------------
> 1 file changed, 46 insertions(+), 29 deletions(-)
>
> diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
> index 0ec2b1e1df75..c5cb2830e853 100644
> --- a/drivers/irqchip/irq-gic-v3-its.c
> +++ b/drivers/irqchip/irq-gic-v3-its.c
> @@ -273,13 +273,23 @@ static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
> raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
> }
>
> +static struct irq_chip its_vpe_irq_chip;
> +
> static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
> {
> - struct its_vlpi_map *map = get_vlpi_map(d);
> + struct its_vpe *vpe = NULL;
> int cpu;
>
> - if (map) {
> - cpu = vpe_to_cpuid_lock(map->vpe, flags);
> + if (d->chip == &its_vpe_irq_chip) {
> + vpe = irq_data_get_irq_chip_data(d);
> + } else {
> + struct its_vlpi_map *map = get_vlpi_map(d);
> + if (map)
> + vpe = map->vpe;
> + }
> +
> + if (vpe) {
> + cpu = vpe_to_cpuid_lock(vpe, flags);
> } else {
> /* Physical LPIs are already locked via the irq_desc lock */
> struct its_device *its_dev = irq_data_get_irq_chip_data(d);
> @@ -293,10 +303,18 @@ static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
>
> static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
> {
> - struct its_vlpi_map *map = get_vlpi_map(d);
> + struct its_vpe *vpe = NULL;
> +
> + if (d->chip == &its_vpe_irq_chip) {
> + vpe = irq_data_get_irq_chip_data(d);
> + } else {
> + struct its_vlpi_map *map = get_vlpi_map(d);
> + if (map)
> + vpe = map->vpe;
> + }
>
> - if (map)
> - vpe_to_cpuid_unlock(map->vpe, flags);
> + if (vpe)
> + vpe_to_cpuid_unlock(vpe, flags);
> }
>
> static struct its_collection *valid_col(struct its_collection *col)
> @@ -1433,14 +1451,29 @@ static void wait_for_syncr(void __iomem *rdbase)
> cpu_relax();
> }
>
> -static void direct_lpi_inv(struct irq_data *d)
> +static void __direct_lpi_inv(struct irq_data *d, u64 val)
> {
> - struct its_vlpi_map *map = get_vlpi_map(d);
> void __iomem *rdbase;
> unsigned long flags;
> - u64 val;
> int cpu;
>
> + /* Target the redistributor this LPI is currently routed to */
> + cpu = irq_to_cpuid_lock(d, &flags);
> + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
> +
> + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
> + gic_write_lpir(val, rdbase + GICR_INVLPIR);
> + wait_for_syncr(rdbase);
> +
> + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
> + irq_to_cpuid_unlock(d, flags);
> +}
> +
> +static void direct_lpi_inv(struct irq_data *d)
> +{
> + struct its_vlpi_map *map = get_vlpi_map(d);
> + u64 val;
> +
> if (map) {
> struct its_device *its_dev = irq_data_get_irq_chip_data(d);
>
> @@ -1453,15 +1486,7 @@ static void direct_lpi_inv(struct irq_data *d)
> val = d->hwirq;
> }
>
> - /* Target the redistributor this LPI is currently routed to */
> - cpu = irq_to_cpuid_lock(d, &flags);
> - raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
> - rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
> - gic_write_lpir(val, rdbase + GICR_INVLPIR);
> -
> - wait_for_syncr(rdbase);
> - raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
> - irq_to_cpuid_unlock(d, flags);
> + __direct_lpi_inv(d, val);
> }
>
> static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
> @@ -3952,18 +3977,10 @@ static void its_vpe_send_inv(struct irq_data *d)
> {
> struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
>
> - if (gic_rdists->has_direct_lpi) {
> - void __iomem *rdbase;
> -
> - /* Target the redistributor this VPE is currently known on */
> - raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
> - rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
> - gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
> - wait_for_syncr(rdbase);
> - raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
> - } else {
> + if (gic_rdists->has_direct_lpi)
> + __direct_lpi_inv(d, d->parent_data->hwirq);
> + else
> its_vpe_send_cmd(vpe, its_send_inv);
> - }
> }
>
> static void its_vpe_mask_irq(struct irq_data *d)
Powered by blists - more mailing lists