lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 21 Apr 2020 15:13:20 +0000
From:   <Claudiu.Beznea@...rochip.com>
To:     <Nicolas.Ferre@...rochip.com>,
        <linux-arm-kernel@...ts.infradead.org>, <netdev@...r.kernel.org>,
        <harini.katakam@...inx.com>
CC:     <linux-kernel@...r.kernel.org>, <davem@...emloft.net>,
        <alexandre.belloni@...tlin.com>, <sergio.prado@...abworks.com>,
        <antoine.tenart@...tlin.com>, <f.fainelli@...il.com>,
        <linux@...linux.org.uk>, <andrew@...n.ch>,
        <michal.simek@...inx.com>
Subject: Re: [PATCH v2 6/7] net: macb: WoL support for GEM type of Ethernet
 controller

Hi Nicolas,

On 21.04.2020 13:41, nicolas.ferre@...rochip.com wrote:
> From: Nicolas Ferre <nicolas.ferre@...rochip.com>
> 
> Adapt the Wake-on-Lan feature to the Cadence GEM Ethernet controller.
> This controller has different register layout and cannot be handled by
> previous code.
> We disable completely interrupts on all the queues but the queue 0.
> Handling of WoL interrupt is done in another interrupt handler
> positioned depending on the controller version used, just between
> suspend() and resume() calls.
> It allows to lower pressure on the generic interrupt hot path by
> removing the need to handle 2 tests for each IRQ: the first figuring out
> the controller revision, the second for actually knowing if the WoL bit
> is set.
> 
> Queue management in suspend()/resume() functions inspired from RFC patch
> by Harini Katakam <harinik@...inx.com>, thanks!
> 
> Cc: Claudiu Beznea <claudiu.beznea@...rochip.com>
> Cc: Harini Katakam <harini.katakam@...inx.com>
> Signed-off-by: Nicolas Ferre <nicolas.ferre@...rochip.com>
> ---
> Changes in v2:
> - Addition of pm_wakeup_event() in WoL IRQ
> - In macb_resume(), removal of setting the MPE bit in NCR register which is not
>   needed in any case: IP is reset on the usual path and kept alive if WoL is used
> - In macb_resume(), complete reset of the controller is kept only for non-WoL
>   case. For the WoL case, we only replace the usual IRQ handler.
> 
>  drivers/net/ethernet/cadence/macb.h      |   3 +
>  drivers/net/ethernet/cadence/macb_main.c | 134 ++++++++++++++++++++---
>  2 files changed, 119 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
> index ab827fb4b6b9..4f1b41569260 100644
> --- a/drivers/net/ethernet/cadence/macb.h
> +++ b/drivers/net/ethernet/cadence/macb.h
> @@ -90,6 +90,7 @@
>  #define GEM_SA3T		0x009C /* Specific3 Top */
>  #define GEM_SA4B		0x00A0 /* Specific4 Bottom */
>  #define GEM_SA4T		0x00A4 /* Specific4 Top */
> +#define GEM_WOL			0x00b8 /* Wake on LAN */
>  #define GEM_EFTSH		0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */
>  #define GEM_EFRSH		0x00ec /* PTP Event Frame Received Seconds Register 47:32 */
>  #define GEM_PEFTSH		0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */
> @@ -396,6 +397,8 @@
>  #define MACB_PDRSFT_SIZE	1
>  #define MACB_SRI_OFFSET		26 /* TSU Seconds Register Increment */
>  #define MACB_SRI_SIZE		1
> +#define GEM_WOL_OFFSET		28 /* Enable wake-on-lan interrupt */
> +#define GEM_WOL_SIZE		1
>  
>  /* Timer increment fields */
>  #define MACB_TI_CNS_OFFSET	0
> diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
> index 8cf8e21fbb07..56ce39dd1cc0 100644
> --- a/drivers/net/ethernet/cadence/macb_main.c
> +++ b/drivers/net/ethernet/cadence/macb_main.c
> @@ -1513,6 +1513,35 @@ static void macb_tx_restart(struct macb_queue *queue)
>  	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
>  }
>  
> +static irqreturn_t gem_wol_interrupt(int irq, void *dev_id)
> +{
> +	struct macb_queue *queue = dev_id;
> +	struct macb *bp = queue->bp;
> +	u32 status;
> +
> +	status = queue_readl(queue, ISR);
> +
> +	if (unlikely(!status))
> +		return IRQ_NONE;
> +
> +	spin_lock(&bp->lock);
> +
> +	if (status & GEM_BIT(WOL)) {
> +		queue_writel(queue, IDR, GEM_BIT(WOL));
> +		gem_writel(bp, WOL, 0);
> +		netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
> +			    (unsigned int)(queue - bp->queues),
> +			    (unsigned long)status);
> +		if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
> +			queue_writel(queue, ISR, GEM_BIT(WOL));
> +		pm_wakeup_event(&bp->pdev->dev, 0);
> +	}
> +
> +	spin_unlock(&bp->lock);
> +
> +	return IRQ_HANDLED;
> +}
> +
>  static irqreturn_t macb_interrupt(int irq, void *dev_id)
>  {
>  	struct macb_queue *queue = dev_id;
> @@ -3306,6 +3335,8 @@ static const struct ethtool_ops macb_ethtool_ops = {
>  static const struct ethtool_ops gem_ethtool_ops = {
>  	.get_regs_len		= macb_get_regs_len,
>  	.get_regs		= macb_get_regs,
> +	.get_wol		= macb_get_wol,
> +	.set_wol		= macb_set_wol,
>  	.get_link		= ethtool_op_get_link,
>  	.get_ts_info		= macb_get_ts_info,
>  	.get_ethtool_stats	= gem_get_ethtool_stats,
> @@ -4534,20 +4565,54 @@ static int __maybe_unused macb_suspend(struct device *dev)
>  	struct macb_queue *queue = bp->queues;
>  	unsigned long flags;
>  	unsigned int q;
> +	int err;
>  
>  	if (!netif_running(netdev))
>  		return 0;
>  
>  	if (bp->wol & MACB_WOL_ENABLED) {
> -		macb_writel(bp, IER, MACB_BIT(WOL));
> -		macb_writel(bp, WOL, MACB_BIT(MAG));
> -		enable_irq_wake(bp->queues[0].irq);
> -		netif_device_detach(netdev);
> -	} else {
> -		netif_device_detach(netdev);
> +		spin_lock_irqsave(&bp->lock, flags);
> +		/* Flush all status bits */
> +		macb_writel(bp, TSR, -1);
> +		macb_writel(bp, RSR, -1);
>  		for (q = 0, queue = bp->queues; q < bp->num_queues;
> -		     ++q, ++queue)
> -			napi_disable(&queue->napi);
> +		     ++q, ++queue) {
> +			/* Disable all interrupts */
> +			queue_writel(queue, IDR, -1);
> +			queue_readl(queue, ISR);
> +			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
> +				queue_writel(queue, ISR, -1);
> +		}
> +		/* Change interrupt handler and
> +		 * Enable WoL IRQ on queue 0
> +		 */
> +		if (macb_is_gem(bp)) {

Couldn't this starting here:

> +			devm_free_irq(dev, bp->queues[0].irq, bp->queues);
> +			err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
> +					       IRQF_SHARED, netdev->name, bp->queues);
> +			if (err) {
> +				dev_err(dev,
> +					"Unable to request IRQ %d (error %d)\n",
> +					bp->queues[0].irq, err);
> +				return err;
> +			}

ending ^ (and the equivalent in resume function) be avoided by moving the
content of gem_wol_interrupt:

+	if (status & GEM_BIT(WOL)) {
+		queue_writel(queue, IDR, GEM_BIT(WOL));
+		gem_writel(bp, WOL, 0);
+		netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
+			    (unsigned int)(queue - bp->queues),
+			    (unsigned long)status);
+		if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+			queue_writel(queue, ISR, GEM_BIT(WOL));
+		pm_wakeup_event(&bp->pdev->dev, 0);
+	}

at the beginning of macb_interrupt() and issuing a return after
pm_wakeup_event()?

Instead of these:

> +			queue_writel(bp->queues, IER, GEM_BIT(WOL));
> +			gem_writel(bp, WOL, MACB_BIT(MAG));
> +		} else {
> +			queue_writel(bp->queues, IER, MACB_BIT(WOL));
> +			macb_writel(bp, WOL, MACB_BIT(MAG));
> +		}

You could use:
		queue_writel(bp->queues, IER, GEM_BIT(WOL));
		macb_or_gem_writel(bp, WOL, MACB_BIT(MAG))

> +		spin_unlock_irqrestore(&bp->lock, flags);
> +
> +		enable_irq_wake(bp->queues[0].irq);
> +	}
> +
> +	netif_device_detach(netdev);
> +	for (q = 0, queue = bp->queues; q < bp->num_queues;
> +	     ++q, ++queue)
> +		napi_disable(&queue->napi);
> +
> +	if (!(bp->wol & MACB_WOL_ENABLED)) {
>  		rtnl_lock();
>  		phylink_stop(bp->phylink);
>  		rtnl_unlock();
> @@ -4575,7 +4640,9 @@ static int __maybe_unused macb_resume(struct device *dev)
>  	struct net_device *netdev = dev_get_drvdata(dev);
>  	struct macb *bp = netdev_priv(netdev);
>  	struct macb_queue *queue = bp->queues;
> +	unsigned long flags;
>  	unsigned int q;
> +	int err;
>  
>  	if (!netif_running(netdev))
>  		return 0;
> @@ -4584,29 +4651,60 @@ static int __maybe_unused macb_resume(struct device *dev)
>  		pm_runtime_force_resume(dev);
>  
>  	if (bp->wol & MACB_WOL_ENABLED) {
> -		macb_writel(bp, IDR, MACB_BIT(WOL));
> -		macb_writel(bp, WOL, 0);
> +		spin_lock_irqsave(&bp->lock, flags);
> +		/* Disable WoL */
> +		if (macb_is_gem(bp)) {
> +			queue_writel(bp->queues, IDR, GEM_BIT(WOL));
> +			gem_writel(bp, WOL, 0);
> +		} else {
> +			queue_writel(bp->queues, IDR, MACB_BIT(WOL));
> +			macb_writel(bp, WOL, 0);
> +		}
> +		/* Clear ISR on queue 0 */
> +		queue_readl(bp->queues, ISR);
> +		if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
> +			queue_writel(bp->queues, ISR, -1);
> +		/* Replace interrupt handler on queue 0 */
> +		devm_free_irq(dev, bp->queues[0].irq, bp->queues);
> +		err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
> +				       IRQF_SHARED, netdev->name, bp->queues);
> +		if (err) {
> +			dev_err(dev,
> +				"Unable to request IRQ %d (error %d)\n",
> +				bp->queues[0].irq, err);
> +			return err;
> +		}
> +		/* Enable interrupts */
> +		for (q = 0, queue = bp->queues; q < bp->num_queues;
> +		     ++q, ++queue)
> +			queue_writel(queue, IER,
> +				     bp->rx_intr_mask |
> +				     MACB_TX_INT_FLAGS |
> +				     MACB_BIT(HRESP));
> +		spin_unlock_irqrestore(&bp->lock, flags);
> +
>  		disable_irq_wake(bp->queues[0].irq);
> -	} else {
> -		macb_writel(bp, NCR, MACB_BIT(MPE));
> +	}
> +
> +	for (q = 0, queue = bp->queues; q < bp->num_queues;
> +	     ++q, ++queue)
> +		napi_enable(&queue->napi);
>  
> +	if (!(bp->wol & MACB_WOL_ENABLED)) {
>  		if (netdev->hw_features & NETIF_F_NTUPLE)
>  			gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
>  
>  		if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
>  			macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
>  
> -		for (q = 0, queue = bp->queues; q < bp->num_queues;
> -		     ++q, ++queue)
> -			napi_enable(&queue->napi);
> +		macb_init_hw(bp);
> +		macb_set_rx_mode(netdev);
> +		macb_restore_features(bp);

Even WoL may not be available in backup and self-refresh mode (present on
SAMA5D2 devices), switching to BSR mode while WoL is enabled may lead to
ethernet being unavailable after resuming. Did you manage to test this
scenario?

>  		rtnl_lock();
>  		phylink_start(bp->phylink);
>  		rtnl_unlock();
>  	}
>  
> -	macb_init_hw(bp);
> -	macb_set_rx_mode(netdev);
> -	macb_restore_features(bp);
>  	netif_device_attach(netdev);
>  	if (bp->ptp_info)
>  		bp->ptp_info->ptp_init(netdev);
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ