lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 26 Mar 2021 09:07:00 +0000
From:   "Ong, Boon Leong" <boon.leong.ong@...el.com>
To:     "Voon, Weifeng" <weifeng.voon@...el.com>,
        "David S . Miller" <davem@...emloft.net>,
        Maxime Coquelin <mcoquelin.stm32@...il.com>
CC:     "netdev@...r.kernel.org" <netdev@...r.kernel.org>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
        Jose Abreu <joabreu@...opsys.com>,
        Jakub Kicinski <kuba@...nel.org>,
        Giuseppe Cavallaro <peppe.cavallaro@...com>,
        Andrew Lunn <andrew@...n.ch>,
        Alexandre Torgue <alexandre.torgue@...com>,
        "linux-stm32@...md-mailman.stormreply.com" 
        <linux-stm32@...md-mailman.stormreply.com>,
        "linux-arm-kernel@...ts.infradead.org" 
        <linux-arm-kernel@...ts.infradead.org>,
        "Wong, Vee Khee" <vee.khee.wong@...el.com>
Subject: RE: [PATCH v2 net-next 4/5] stmmac: intel: add support for
 multi-vector msi and msi-x

>+static int stmmac_config_multi_msi(struct pci_dev *pdev,
>+				   struct plat_stmmacenet_data *plat,
>+				   struct stmmac_resources *res)
>+{
For optimum RX & TX queue processing on the same IRQ, we should use
irq_set_affinity_hint() to set those RXQ and TXQ IRQ to the same CPU.
This will benefit processing for up-coming XDP TX and XDP TX ZC processing.

cpumask_t cpu_mask;

>+	int ret;
>+	int i;
>+
>+	if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
>+	    plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
>+		dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
>+			 __func__);
>+		return -1;
>+	}
>+
>+	ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
>+				    PCI_IRQ_MSI | PCI_IRQ_MSIX);
>+	if (ret < 0) {
>+		dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
>+			 __func__);
>+		return ret;
>+	}
>+
>+	/* For RX MSI */
>+	for (i = 0; i < plat->rx_queues_to_use; i++) {
>+		res->rx_irq[i] = pci_irq_vector(pdev,
>+						plat->msi_rx_base_vec + i * 2);

		cpumask_clear(&cpu_mask);
		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
		irq_set_affinity_hint(res->rx_irq[i], &cpu_mask);

>+	}
>+
>+	/* For TX MSI */
>+	for (i = 0; i < plat->tx_queues_to_use; i++) {
>+		res->tx_irq[i] = pci_irq_vector(pdev,
>+						plat->msi_tx_base_vec + i * 2);

		cpumask_clear(&cpu_mask);
		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
		irq_set_affinity_hint(res->tx_irq[i], &cpu_mask);

>+	}
>+
>+	if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
>+		res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
>+	if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
>+		res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
>+	if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
>+		res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
>+	if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
>+		res->sfty_ce_irq = pci_irq_vector(pdev, plat-
>>msi_sfty_ce_vec);
>+	if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
>+		res->sfty_ue_irq = pci_irq_vector(pdev, plat-
>>msi_sfty_ue_vec);
>+
>+	plat->multi_msi_en = 1;
>+	dev_info(&pdev->dev, "%s: multi MSI enablement successful\n",
>__func__);
>+
>+	return 0;
>+}


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ