lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 22 Sep 2016 10:51:17 +0200
From:   Alexander Gordeev <agordeev@...hat.com>
To:     Christoph Hellwig <hch@....de>
Cc:     axboe@...com, tglx@...utronix.de, keith.busch@...el.com,
        linux-block@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 03/13] genirq/msi: Switch to new irq spreading
 infrastructure

On Wed, Sep 14, 2016 at 04:18:49PM +0200, Christoph Hellwig wrote:
>  static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
> -			      struct msix_entry *entries, int nvec)
> +			      struct msix_entry *entries, int nvec,
> +			      bool affinity)
>  {
> -	const struct cpumask *mask = NULL;
> +	struct cpumask *curmsk, *masks = NULL;
>  	struct msi_desc *entry;
> -	int cpu = -1, i;
> -
> -	for (i = 0; i < nvec; i++) {
> -		if (dev->irq_affinity) {
> -			cpu = cpumask_next(cpu, dev->irq_affinity);
> -			if (cpu >= nr_cpu_ids)
> -				cpu = cpumask_first(dev->irq_affinity);
> -			mask = cpumask_of(cpu);
> -		}
> +	int ret, i;
>  
> -		entry = alloc_msi_entry(&dev->dev, 1, NULL);
> +	if (affinity) {
> +		masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
> +		if (!masks)
> +			pr_err("Unable to allocate affinity masks, ignoring\n");

Okay, so if we can tolerate affinity mask failure here, then we should be
able to tolerate it everywhere. Therefore, this piece of code (I pointed
in my other mail) in __pci_enable_msi_range() should not bail out:

		if (affinity) {
			nvec = irq_calc_affinity_vectors(dev->irq_affinity,
					nvec);
			if (nvec < minvec)
				return -ENOSPC;
		}

> +	}
> +
> +	for (i = 0, curmsk = masks; i < nvec; i++) {
> +		entry = alloc_msi_entry(&dev->dev, 1, curmsk);
>  		if (!entry) {
>  			if (!i)
>  				iounmap(base);
>  			else
>  				free_msi_irqs(dev);
>  			/* No enough memory. Don't try again */
> -			return -ENOMEM;
> +			ret = -ENOMEM;
> +			goto out;
>  		}
>  
>  		entry->msi_attrib.is_msix	= 1;
> @@ -710,11 +720,14 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
>  			entry->msi_attrib.entry_nr = i;
>  		entry->msi_attrib.default_irq	= dev->irq;
>  		entry->mask_base		= base;
> -		entry->affinity			= mask;
>  
>  		list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
> +		if (masks)
> +			curmsk++;
>  	}
> -
> +	ret = 0;
> +out:
> +	kfree(masks);
>  	return 0;

	return ret;

>  }

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ