[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87ldskwdyb.ffs@tglx>
Date: Tue, 01 Apr 2025 09:14:36 +0200
From: Thomas Gleixner <tglx@...utronix.de>
To: Tianyang Zhang <zhangtianyang@...ngson.cn>, chenhuacai@...nel.org,
kernel@...0n.name, corbet@....net, alexs@...nel.org, si.yanteng@...ux.dev,
jiaxun.yang@...goat.com, peterz@...radead.org, wangliupu@...ngson.cn,
lvjianmin@...ngson.cn, maobibo@...ngson.cn, siyanteng@...oftware.com.cn,
gaosong@...ngson.cn, yangtiezhu@...ngson.cn
Cc: loongarch@...ts.linux.dev, linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org, Tianyang Zhang <zhangtianyang@...ngson.cn>
Subject: Re: [PATCH v2 2/2] irq/irq-loongarch-ir:Add Redirect irqchip support
On Mon, Mar 31 2025 at 14:41, Tianyang Zhang wrote:
> irq_data_update_effective_affinity(data, cpumask_of(cpu));
> @@ -242,6 +233,7 @@ static void avecintc_irq_dispatch(struct irq_desc *desc)
> d = this_cpu_read(irq_map[vector]);
> if (d) {
> generic_handle_irq_desc(d);
> +
Stray newline.
> } else {
> +
> +static void invalid_enqueue(struct redirect_queue *rqueue, struct irde_inv_cmd *cmd)
> +{
> + struct irde_inv_cmd *inv_addr;
> + u32 tail;
> +
> + guard(raw_spinlock_irqsave)(&rqueue->lock);
> +
> + while (invalid_queue_is_full(rqueue->node, &tail))
> + cpu_relax();
> +
> + inv_addr = (struct irde_inv_cmd *)(rqueue->base + tail * sizeof(struct irde_inv_cmd));
> + memcpy(inv_addr, cmd, sizeof(struct irde_inv_cmd));
> + tail = (tail + 1) % INVALID_QUEUE_SIZE;
> +
> + wmb();
Undocumented memory barrier.
> +
> + iocsr_write32(tail, LOONGARCH_IOCSR_REDIRECT_CQT);
> +}
> +
> +static void smp_call_invalid_enqueue(void *arg)
> +{
> + struct smp_invalid_arg *s_arg = (struct smp_invalid_arg *)arg;
> +
> + invalid_enqueue(s_arg->queue, s_arg->cmd);
> +}
> +
> +static void irde_invlid_entry_node(struct redirect_item *item)
> +{
> + struct redirect_queue *rqueue;
> + struct smp_invalid_arg arg;
> + struct irde_inv_cmd cmd;
> + volatile u64 raddr = 0;
> + int node = item->table->node, cpu;
> +
> + rqueue = &(irde_descs[node].inv_queue);
> + cmd.cmd_info = 0;
> + cmd.index.type = INVALID_INDEX;
> + cmd.index.need_notice = 1;
> + cmd.index.index = item->index;
> + cmd.notice_addr = (u64)(__pa(&raddr));
> +
> + if (cpu_to_node(smp_processor_id()) == node)
> + invalid_enqueue(rqueue, &cmd);
> + else {
if () lacks brackets
> + for_each_cpu(cpu, cpumask_of_node(node)) {
> + if (cpu_online(cpu))
> + break;
> + }
> + arg.queue = rqueue;
> + arg.cmd = &cmd;
> + smp_call_function_single(cpu, smp_call_invalid_enqueue, &arg, 0);
> + }
> +
> + while (!raddr)
> + cpu_relax();
> +
> +}
> +static int redirect_table_free(struct redirect_item *item)
> +{
> + struct redirect_table *ird_table;
> + struct redirect_entry *entry;
> + unsigned long flags;
> +
> + ird_table = item->table;
> +
> + entry = item->entry;
> + memset(entry, 0, sizeof(struct redirect_entry));
> +
> + raw_spin_lock_irqsave(&ird_table->lock, flags);
> + bitmap_release_region(ird_table->bitmap, item->index, 0);
> + raw_spin_unlock_irqrestore(&ird_table->lock, flags);
scoped_guard(raw_spinlock_irqsave, &ird_table->lock)
bitmap_release_region(ird_table->bitmap, item->index, 0);
> +static int redirect_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force)
> +{
> + struct redirect_item *item = data->chip_data;
> + struct avecintc_data *adata;
> + int ret;
> +
> + ret = irq_chip_set_affinity_parent(data, dest, force);
> + if (ret == IRQ_SET_MASK_OK_DONE)
> + return IRQ_SET_MASK_OK;
Again bracket rules. Also what is this return value translation about?
> + else if (ret) {
> + pr_err("IRDE:set_affinity error %d\n", ret);
> + return ret;
> + }
> +
> + adata = irq_data_get_avec_data(data);
> +
> + redirect_domain_prepare_entry(item, adata);
> +
> + irde_invlid_entry_node(item);
This cannot work when irde_invlid_entry_node() goes into the SMP
function call path, because smp_call_function_single() cannot be invoked
with interrupts disabled.
> +
> +static void redirect_free_resources(struct irq_domain *domain,
> + unsigned int virq, unsigned int nr_irqs)
Please align the arguments as documented.
> +{
> + struct irq_data *irq_data;
> + struct redirect_item *item;
> +
Please move the variables into the scope where they are used
> + for (int i = 0; i < nr_irqs; i++) {
> + irq_data = irq_domain_get_irq_data(domain, virq + i);
> + if (irq_data && irq_data->chip_data) {
> + item = irq_data->chip_data;
> + redirect_table_free(item);
> + kfree(item);
> + }
> + }
> +}
> +
> +static int redirect_alloc(struct irq_domain *domain,
> + unsigned int virq, unsigned int nr_irqs,
> + void *arg)
More random coding style
> +{
> + struct redirect_table *ird_table;
> + struct avecintc_data *avec_data;
> + struct irq_data *irq_data;
> + int ret, i, node;
> +
> +#ifdef CONFIG_NUMA
> + node = ((msi_alloc_info_t *)arg)->desc->dev->numa_node;
Bah.
> +#else
> + node = 0;
> +#endif
msi_alloc_info_t *info = arg;
node = dev_to_node(info->desc->dev);
> +static int redirect_table_init(int node)
> +{
> + struct redirect_table *ird_table = &(irde_descs[node].ird_table);
> + struct page *pages;
> + unsigned long *bitmap;
Use proper reverse fir tree ordering
Thanks,
tglx
Powered by blists - more mailing lists