[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a916e71b-de79-391a-55be-f8d7583ca344@loongson.cn>
Date: Thu, 17 Apr 2025 10:19:39 +0800
From: Tianyang Zhang <zhangtianyang@...ngson.cn>
To: Thomas Gleixner <tglx@...utronix.de>, chenhuacai@...nel.org,
kernel@...0n.name, corbet@....net, alexs@...nel.org, si.yanteng@...ux.dev,
jiaxun.yang@...goat.com, peterz@...radead.org, wangliupu@...ngson.cn,
lvjianmin@...ngson.cn, maobibo@...ngson.cn, siyanteng@...oftware.com.cn,
gaosong@...ngson.cn, yangtiezhu@...ngson.cn
Cc: loongarch@...ts.linux.dev, linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 2/2] irq/irq-loongarch-ir:Add Redirect irqchip support
Hi, Thomas
在 2025/4/1 下午3:14, Thomas Gleixner 写道:
> On Mon, Mar 31 2025 at 14:41, Tianyang Zhang wrote:
>> irq_data_update_effective_affinity(data, cpumask_of(cpu));
>> @@ -242,6 +233,7 @@ static void avecintc_irq_dispatch(struct irq_desc *desc)
>> d = this_cpu_read(irq_map[vector]);
>> if (d) {
>> generic_handle_irq_desc(d);
>> +
> Stray newline.
OK, I got it
>
>> } else {
>> +
>> +static void invalid_enqueue(struct redirect_queue *rqueue, struct irde_inv_cmd *cmd)
>> +{
>> + struct irde_inv_cmd *inv_addr;
>> + u32 tail;
>> +
>> + guard(raw_spinlock_irqsave)(&rqueue->lock);
>> +
>> + while (invalid_queue_is_full(rqueue->node, &tail))
>> + cpu_relax();
>> +
>> + inv_addr = (struct irde_inv_cmd *)(rqueue->base + tail * sizeof(struct irde_inv_cmd));
>> + memcpy(inv_addr, cmd, sizeof(struct irde_inv_cmd));
>> + tail = (tail + 1) % INVALID_QUEUE_SIZE;
>> +
>> + wmb();
> Undocumented memory barrier.
OK , I got it
>
>> +
>> + iocsr_write32(tail, LOONGARCH_IOCSR_REDIRECT_CQT);
>> +}
>> +
>> +static void smp_call_invalid_enqueue(void *arg)
>> +{
>> + struct smp_invalid_arg *s_arg = (struct smp_invalid_arg *)arg;
>> +
>> + invalid_enqueue(s_arg->queue, s_arg->cmd);
>> +}
>> +
>> +static void irde_invlid_entry_node(struct redirect_item *item)
>> +{
>> + struct redirect_queue *rqueue;
>> + struct smp_invalid_arg arg;
>> + struct irde_inv_cmd cmd;
>> + volatile u64 raddr = 0;
>> + int node = item->table->node, cpu;
>> +
>> + rqueue = &(irde_descs[node].inv_queue);
>> + cmd.cmd_info = 0;
>> + cmd.index.type = INVALID_INDEX;
>> + cmd.index.need_notice = 1;
>> + cmd.index.index = item->index;
>> + cmd.notice_addr = (u64)(__pa(&raddr));
>> +
>> + if (cpu_to_node(smp_processor_id()) == node)
>> + invalid_enqueue(rqueue, &cmd);
>> + else {
> if () lacks brackets
OK , I got it
>
>> + for_each_cpu(cpu, cpumask_of_node(node)) {
>> + if (cpu_online(cpu))
>> + break;
>> + }
>> + arg.queue = rqueue;
>> + arg.cmd = &cmd;
>> + smp_call_function_single(cpu, smp_call_invalid_enqueue, &arg, 0);
>> + }
>> +
>> + while (!raddr)
>> + cpu_relax();
>> +
>> +}
>
>> +static int redirect_table_free(struct redirect_item *item)
>> +{
>> + struct redirect_table *ird_table;
>> + struct redirect_entry *entry;
>> + unsigned long flags;
>> +
>> + ird_table = item->table;
>> +
>> + entry = item->entry;
>> + memset(entry, 0, sizeof(struct redirect_entry));
>> +
>> + raw_spin_lock_irqsave(&ird_table->lock, flags);
>> + bitmap_release_region(ird_table->bitmap, item->index, 0);
>> + raw_spin_unlock_irqrestore(&ird_table->lock, flags);
> scoped_guard(raw_spinlock_irqsave, &ird_table->lock)
> bitmap_release_region(ird_table->bitmap, item->index, 0);
Initially, I thought there were no complex branches here, so I directly
used raw_spin_lock_irqsave.
However, considering the unified code style, scoped_guard is more
appropriate
>
>> +static int redirect_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force)
>> +{
>> + struct redirect_item *item = data->chip_data;
>> + struct avecintc_data *adata;
>> + int ret;
>> +
>> + ret = irq_chip_set_affinity_parent(data, dest, force);
>> + if (ret == IRQ_SET_MASK_OK_DONE)
>> + return IRQ_SET_MASK_OK;
> Again bracket rules. Also what is this return value ranslation about?
My initial aim was to keep the return value consistent with the previous
one.
I'll study the differences between the two return values and make
appropriate changes. Thank you.
>
>> + else if (ret) {
>> + pr_err("IRDE:set_affinity error %d\n", ret);
>> + return ret;
>> + }
>> +
>> + adata = irq_data_get_avec_data(data);
>> +
>> + redirect_domain_prepare_entry(item, adata);
>> +
>> + irde_invlid_entry_node(item);
> This cannot work when irde_invlid_entry_node() goes into the SMP
> function call path, because smp_call_function_single() cannot be invoked
> with interrupts disabled.
My oversight. This approach is indeed risky. I'll rethink the design.
Thanks for your advice.
>> +{
>> + struct redirect_table *ird_table;
>> + struct avecintc_data *avec_data;
>> + struct irq_data *irq_data;
>> + int ret, i, node;
>> +
>> +#ifdef CONFIG_NUMA
>> + node = ((msi_alloc_info_t *)arg)->desc->dev->numa_node;
> Bah.
>
>> +#else
>> + node = 0;
>> +#endif
> msi_alloc_info_t *info = arg;
>
> node = dev_to_node(info->desc->dev);
This is appropriate and safe. Thank you.
>
>> +static int redirect_table_init(int node)
>> +{
>> + struct redirect_table *ird_table = &(irde_descs[node].ird_table);
>> + struct page *pages;
>> + unsigned long *bitmap;
> Use proper reverse fir tree ordering
>
> Thanks,
>
> tglx
I'll strive to fix other code style issues.
Thank you for your reply.
Tianyang
Powered by blists - more mailing lists