lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:   Sun, 15 Apr 2018 18:46:42 -0500
From:   Thomas Falcon <tlfalcon@...ux.vnet.ibm.com>
To:     netdev@...r.kernel.org
Cc:     linuxppc-dev@...ts.ozlabs.org, jallen@...ux.vnet.ibm.com,
        nfont@...ux.vnet.ibm.com, benh@...nel.crashing.org
Subject: Re: [PATCH] ibmvnic: Clear pending interrupt after device reset

On 04/15/2018 06:27 PM, Thomas Falcon wrote:
> Due to a firmware bug, the hypervisor can send an interrupt to a
> transmit or receive queue just prior to a partition migration, not
> allowing the device enough time to handle it and send an EOI. When
> the partition migrates, the interrupt is lost but an "EOI-pending"
> flag for the interrupt line is still set in firmware. No further
> interrupts will be sent until that flag is cleared, effectively
> freezing that queue. To workaround this, the driver will disable the
> hardware interrupt and send an H_EOI signal prior to re-enabling it.
> This will flush the pending EOI and allow the driver to continue
> operation.

Excuse me, I misspelled the linuxppc-dev email address.

Tom

> Signed-off-by: Thomas Falcon <tlfalcon@...ux.vnet.ibm.com>
> ---
>  drivers/net/ethernet/ibm/ibmvnic.c | 15 +++++++++++----
>  1 file changed, 11 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
> index f84a920..ef7995fc 100644
> --- a/drivers/net/ethernet/ibm/ibmvnic.c
> +++ b/drivers/net/ethernet/ibm/ibmvnic.c
> @@ -1034,16 +1034,14 @@ static int __ibmvnic_open(struct net_device *netdev)
>  		netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
>  		if (prev_state == VNIC_CLOSED)
>  			enable_irq(adapter->rx_scrq[i]->irq);
> -		else
> -			enable_scrq_irq(adapter, adapter->rx_scrq[i]);
> +		enable_scrq_irq(adapter, adapter->rx_scrq[i]);
>  	}
>
>  	for (i = 0; i < adapter->req_tx_queues; i++) {
>  		netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
>  		if (prev_state == VNIC_CLOSED)
>  			enable_irq(adapter->tx_scrq[i]->irq);
> -		else
> -			enable_scrq_irq(adapter, adapter->tx_scrq[i]);
> +		enable_scrq_irq(adapter, adapter->tx_scrq[i]);
>  	}
>
>  	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
> @@ -1184,6 +1182,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
>  			if (adapter->tx_scrq[i]->irq) {
>  				netdev_dbg(netdev,
>  					   "Disabling tx_scrq[%d] irq\n", i);
> +				disable_scrq_irq(adapter, adapter->tx_scrq[i]);
>  				disable_irq(adapter->tx_scrq[i]->irq);
>  			}
>  	}
> @@ -1193,6 +1192,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
>  			if (adapter->rx_scrq[i]->irq) {
>  				netdev_dbg(netdev,
>  					   "Disabling rx_scrq[%d] irq\n", i);
> +				disable_scrq_irq(adapter, adapter->rx_scrq[i]);
>  				disable_irq(adapter->rx_scrq[i]->irq);
>  			}
>  		}
> @@ -2601,12 +2601,19 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
>  {
>  	struct device *dev = &adapter->vdev->dev;
>  	unsigned long rc;
> +	u64 val;
>
>  	if (scrq->hw_irq > 0x100000000ULL) {
>  		dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
>  		return 1;
>  	}
>
> +	val = (0xff000000) | scrq->hw_irq;
> +	rc = plpar_hcall_norets(H_EOI, val);
> +	if (rc)
> +		dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
> +			val, rc);
> +
>  	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
>  				H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
>  	if (rc)


Powered by blists - more mailing lists