[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <538d89d9-f268-23c7-f94d-1163eee3beec@loongson.cn>
Date: Thu, 6 Jul 2023 08:30:42 +0800
From: bibo mao <maobibo@...ngson.cn>
To: Huacai Chen <chenhuacai@...nel.org>,
Jiaxun Yang <jiaxun.yang@...goat.com>,
Marc Zyngier <maz@...nel.org>
Cc: linux-mips@...r.kernel.org, linux-kernel@...r.kernel.org,
Jianmin Lv <lvjianmin@...ngson.cn>,
loongson-kernel@...ts.loongnix.cn
Subject: Re: [PATCH 2/2] irqchip/loongson-eiointc: simplify irq route on one
eioi-node system
will add simple route for embedded system like 2K500 and 2K2000 in next patch.
- ret = eiointc_init(priv, parent_irq, 0);
+ /*
+ * 2k0500 and 2k2000 has only one eio node
+ * set nodemap as 1 for simple irq routing
+ * what about for future embedded board more than 4 cpus??
+ */
+ ret = eiointc_init(priv, parent_irq, 1);
Regards
Bbibo Mao
在 2023/7/4 21:21, Bibo Mao 写道:
> Some LoongArch systems has only one eioi node such as 3A5000 machine
> and qemu virt-machine. If there is only one eioi node, all cpus can
> access eioi register directly; if there is multiple eioi nodes, cpus
> can only access specified eioi node, so anysend or ipi need be used
> to configure irq routing. IRQ routing is simple on such system with
> one node, hacking method like anysend is not necessary.
>
> Signed-off-by: Bibo Mao <maobibo@...ngson.cn>
> Change-Id: I351e615ab15b79a1a3a4b96943a32c0ff5df2b13
> ---
> drivers/irqchip/irq-loongson-eiointc.c | 71 ++++++++++++++++++++++++--
> 1 file changed, 66 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
> index 1c5a5b59f199..4759c0ae8edb 100644
> --- a/drivers/irqchip/irq-loongson-eiointc.c
> +++ b/drivers/irqchip/irq-loongson-eiointc.c
> @@ -127,6 +127,48 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af
> return IRQ_SET_MASK_OK;
> }
>
> +static int eiointc_single_set_irq_affinity(struct irq_data *d,
> + const struct cpumask *affinity, bool force)
> +{
> + unsigned int cpu;
> + unsigned long flags;
> + uint32_t vector, regaddr, data, coremap;
> + struct cpumask mask;
> + struct eiointc_priv *priv = d->domain->host_data;
> +
> + cpumask_and(&mask, affinity, cpu_online_mask);
> + cpumask_and(&mask, &mask, &priv->cpuspan_map);
> + if (cpumask_empty(&mask))
> + return -EINVAL;
> +
> + cpu = cpumask_first(&mask);
> + vector = d->hwirq;
> + regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
> + data = ~BIT(vector & 0x1F);
> + coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE);
> +
> + /*
> + * simplify for only one eio node
> + * access eio registers directly rather than
> + * use any_send hack method here
> + */
> + raw_spin_lock_irqsave(&affinity_lock, flags);
> + iocsr_write32(EIOINTC_ALL_ENABLE & data, regaddr);
> + /*
> + * get irq route info for continuous 4 vectors
> + * and set affinity for specified vector
> + */
> + data = iocsr_read32(EIOINTC_REG_ROUTE + (vector & ~3));
> + data &= ~(0xff << ((vector & 3) * 8));
> + data |= coremap << ((vector & 3) * 8);
> + iocsr_write32(data, EIOINTC_REG_ROUTE + (vector & ~3));
> + iocsr_write32(EIOINTC_ALL_ENABLE, regaddr);
> + raw_spin_unlock_irqrestore(&affinity_lock, flags);
> +
> + irq_data_update_effective_affinity(d, cpumask_of(cpu));
> + return IRQ_SET_MASK_OK;
> +}
> +
> static int eiointc_index(int node)
> {
> int i;
> @@ -237,22 +279,39 @@ static struct irq_chip eiointc_irq_chip = {
> .irq_set_affinity = eiointc_set_irq_affinity,
> };
>
> +static struct irq_chip eiointc_irq_chipi_single = {
> + .name = "EIOINTC-S",
> + .irq_ack = eiointc_ack_irq,
> + .irq_mask = eiointc_mask_irq,
> + .irq_unmask = eiointc_unmask_irq,
> +#ifdef CONFIG_SMP
> + .irq_set_affinity = eiointc_single_set_irq_affinity,
> +#endif
> +};
> +
> static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
> unsigned int nr_irqs, void *arg)
> {
> int ret;
> unsigned int i, type;
> unsigned long hwirq = 0;
> - struct eiointc *priv = domain->host_data;
> + struct eiointc_priv *priv = domain->host_data;
> + struct irq_chip *chip;
>
> ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
> if (ret)
> return ret;
>
> - for (i = 0; i < nr_irqs; i++) {
> - irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip,
> + /*
> + * use simple irq route method for single node eiointc
> + */
> + if (nodes_weight(priv->node_map) == 1)
> + chip = &eiointc_irq_chipi_single;
> + else
> + chip = &eiointc_irq_chip;
> + for (i = 0; i < nr_irqs; i++)
> + irq_domain_set_info(domain, virq + i, hwirq + i, chip,
> priv, handle_edge_irq, NULL, NULL);
> - }
>
> return 0;
> }
> @@ -309,6 +368,7 @@ static void eiointc_resume(void)
> int i, j;
> struct irq_desc *desc;
> struct irq_data *irq_data;
> + struct irq_chip *chip;
>
> eiointc_router_init(0);
>
> @@ -318,7 +378,8 @@ static void eiointc_resume(void)
> if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) {
> raw_spin_lock(&desc->lock);
> irq_data = irq_domain_get_irq_data(eiointc_priv[i]->eiointc_domain, irq_desc_get_irq(desc));
> - eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0);
> + chip = irq_data_get_irq_chip(irq_data);
> + chip->irq_set_affinity(irq_data, irq_data->common->affinity, 0);
> raw_spin_unlock(&desc->lock);
> }
> }
Powered by blists - more mailing lists