[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date: Tue, 12 Dec 2023 18:22:20 +0000
From: Souradeep Chakrabarti <schakrabarti@...rosoft.com>
To: Suman Ghosh <sumang@...vell.com>, Souradeep Chakrabarti
<schakrabarti@...ux.microsoft.com>, KY Srinivasan <kys@...rosoft.com>,
Haiyang Zhang <haiyangz@...rosoft.com>, "wei.liu@...nel.org"
<wei.liu@...nel.org>, Dexuan Cui <decui@...rosoft.com>, "davem@...emloft.net"
<davem@...emloft.net>, "edumazet@...gle.com" <edumazet@...gle.com>,
"kuba@...nel.org" <kuba@...nel.org>, "pabeni@...hat.com" <pabeni@...hat.com>,
Long Li <longli@...rosoft.com>, "yury.norov@...il.com"
<yury.norov@...il.com>, "leon@...nel.org" <leon@...nel.org>,
"cai.huoqing@...ux.dev" <cai.huoqing@...ux.dev>,
"ssengar@...ux.microsoft.com" <ssengar@...ux.microsoft.com>,
"vkuznets@...hat.com" <vkuznets@...hat.com>, "tglx@...utronix.de"
<tglx@...utronix.de>, "linux-hyperv@...r.kernel.org"
<linux-hyperv@...r.kernel.org>, "netdev@...r.kernel.org"
<netdev@...r.kernel.org>, "linux-kernel@...r.kernel.org"
<linux-kernel@...r.kernel.org>, "linux-rdma@...r.kernel.org"
<linux-rdma@...r.kernel.org>
CC: Paul Rosswurm <paulros@...rosoft.com>
Subject: RE: [EXT] [PATCH V5 net-next] net: mana: Assigning IRQ affinity on HT
cores
>-----Original Message-----
>From: Suman Ghosh <sumang@...vell.com>
>Sent: Tuesday, December 12, 2023 11:48 PM
>To: Souradeep Chakrabarti <schakrabarti@...ux.microsoft.com>; KY Srinivasan
><kys@...rosoft.com>; Haiyang Zhang <haiyangz@...rosoft.com>;
>wei.liu@...nel.org; Dexuan Cui <decui@...rosoft.com>; davem@...emloft.net;
>edumazet@...gle.com; kuba@...nel.org; pabeni@...hat.com; Long Li
><longli@...rosoft.com>; yury.norov@...il.com; leon@...nel.org;
>cai.huoqing@...ux.dev; ssengar@...ux.microsoft.com; vkuznets@...hat.com;
>tglx@...utronix.de; linux-hyperv@...r.kernel.org; netdev@...r.kernel.org; linux-
>kernel@...r.kernel.org; linux-rdma@...r.kernel.org
>Cc: Souradeep Chakrabarti <schakrabarti@...rosoft.com>; Paul Rosswurm
><paulros@...rosoft.com>
>Subject: [EXTERNAL] RE: [EXT] [PATCH V5 net-next] net: mana: Assigning IRQ
>affinity on HT cores
>
>[Some people who received this message don't often get email from
>sumang@...vell.com. Learn why this is important at
>https://aka.ms/LearnAboutSenderIdentification ]
>
>Hi Souradeep,
>
>Please find inline for couple of comments.
>
>>+
>>+ if (!zalloc_cpumask_var(&curr, GFP_KERNEL)) {
>>+ err = -ENOMEM;
>>+ return err;
>>+ }
>>+ if (!zalloc_cpumask_var(&cpus, GFP_KERNEL)) {
>[Suman] memory leak here, should free 'curr'.
This will be taken care in next version.
>>+ err = -ENOMEM;
>>+ return err;
>>+ }
>>+
>>+ rcu_read_lock();
>>+ for_each_numa_hop_mask(next, next_node) {
>>+ cpumask_andnot(curr, next, prev);
>>+ for (w = cpumask_weight(curr), cnt = 0; cnt < w; ) {
>>+ cpumask_copy(cpus, curr);
>>+ for_each_cpu(cpu, cpus) {
>>+ irq_set_affinity_and_hint(irqs[i],
>>topology_sibling_cpumask(cpu));
>>+ if (++i == nvec)
>>+ goto done;
>>+ cpumask_andnot(cpus, cpus,
>>topology_sibling_cpumask(cpu));
>>+ ++cnt;
>>+ }
>>+ }
>>+ prev = next;
>>+ }
>>+done:
>>+ rcu_read_unlock();
>>+ free_cpumask_var(curr);
>>+ free_cpumask_var(cpus);
>>+ return err;
>>+}
>>+
>> static int mana_gd_setup_irqs(struct pci_dev *pdev) {
>>- unsigned int max_queues_per_port = num_online_cpus();
>> struct gdma_context *gc = pci_get_drvdata(pdev);
>>+ unsigned int max_queues_per_port;
>> struct gdma_irq_context *gic;
>> unsigned int max_irqs, cpu;
>>- int nvec, irq;
>>+ int start_irq_index = 1;
>>+ int nvec, *irqs, irq;
>> int err, i = 0, j;
>>
>>+ cpus_read_lock();
>>+ max_queues_per_port = num_online_cpus();
>> if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
>> max_queues_per_port = MANA_MAX_NUM_QUEUES;
>>
>>@@ -1261,6 +1302,14 @@ static int mana_gd_setup_irqs(struct pci_dev
>>*pdev)
>> nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
>> if (nvec < 0)
>[Suman] cpus_read_unlock()?
Thanks for pointing, it will be taken care off in the V6.
>> return nvec;
>>+ if (nvec <= num_online_cpus())
>>+ start_irq_index = 0;
>>+
>>+ irqs = kmalloc_array((nvec - start_irq_index), sizeof(int),
>>GFP_KERNEL);
>>+ if (!irqs) {
>>+ err = -ENOMEM;
>>+ goto free_irq_vector;
>>+ }
>>
>> gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
>> GFP_KERNEL); @@ -1287,21 +1336,44 @@
>>static int mana_gd_setup_irqs(struct pci_dev
>>*pdev)
>> goto free_irq;
>> }
>>
>>- err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
>>- if (err)
>>- goto free_irq;
>>-
>>- cpu = cpumask_local_spread(i, gc->numa_node);
>>- irq_set_affinity_and_hint(irq, cpumask_of(cpu));
>>+ if (!i) {
>>+ err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
>>+ if (err)
>>+ goto free_irq;
>>+
>>+ /* If number of IRQ is one extra than number of
>>+ online
>>CPUs,
>>+ * then we need to assign IRQ0 (hwc irq) and IRQ1 to
>>+ * same CPU.
>>+ * Else we will use different CPUs for IRQ0 and IRQ1.
>>+ * Also we are using cpumask_local_spread instead of
>>+ * cpumask_first for the node, because the node can be
>>+ * mem only.
>>+ */
>>+ if (start_irq_index) {
>>+ cpu = cpumask_local_spread(i, gc->numa_node);
>>+ irq_set_affinity_and_hint(irq, cpumask_of(cpu));
>>+ } else {
>>+ irqs[start_irq_index] = irq;
>>+ }
>>+ } else {
>>+ irqs[i - start_irq_index] = irq;
>>+ err = request_irq(irqs[i - start_irq_index],
>>mana_gd_intr, 0,
>>+ gic->name, gic);
>>+ if (err)
>>+ goto free_irq;
>>+ }
>> }
>>
>>+ err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node);
>>+ if (err)
>>+ goto free_irq;
>> err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
>> if (err)
>> goto free_irq;
>>
>> gc->max_num_msix = nvec;
>> gc->num_msix_usable = nvec;
>>-
>>+ cpus_read_unlock();
>> return 0;
>>
>> free_irq:
>>@@ -1314,8 +1386,10 @@ static int mana_gd_setup_irqs(struct pci_dev
>>*pdev)
>> }
>>
>> kfree(gc->irq_contexts);
>>+ kfree(irqs);
>> gc->irq_contexts = NULL;
>> free_irq_vector:
>>+ cpus_read_unlock();
>> pci_free_irq_vectors(pdev);
>> return err;
>> }
>>--
>>2.34.1
>>
Powered by blists - more mailing lists