lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: 
 <CO1PR18MB46661C2F9D7D2882C937DD01A1779@CO1PR18MB4666.namprd18.prod.outlook.com>
Date: Wed, 10 May 2023 13:19:15 +0000
From: Subbaraya Sundeep Bhatta <sbhatta@...vell.com>
To: Sarath Babu Naidu Gaddam <sarath.babu.naidu.gaddam@....com>,
        "davem@...emloft.net" <davem@...emloft.net>,
        "edumazet@...gle.com"
	<edumazet@...gle.com>,
        "kuba@...nel.org" <kuba@...nel.org>,
        "pabeni@...hat.com" <pabeni@...hat.com>,
        "robh+dt@...nel.org"
	<robh+dt@...nel.org>,
        "krzysztof.kozlowski+dt@...aro.org"
	<krzysztof.kozlowski+dt@...aro.org>
CC: "linux@...linux.org.uk" <linux@...linux.org.uk>,
        "michal.simek@....com"
	<michal.simek@....com>,
        "radhey.shyam.pandey@....com"
	<radhey.shyam.pandey@....com>,
        "netdev@...r.kernel.org"
	<netdev@...r.kernel.org>,
        "devicetree@...r.kernel.org"
	<devicetree@...r.kernel.org>,
        "linux-arm-kernel@...ts.infradead.org"
	<linux-arm-kernel@...ts.infradead.org>,
        "linux-kernel@...r.kernel.org"
	<linux-kernel@...r.kernel.org>,
        "anirudha.sarangi@....com"
	<anirudha.sarangi@....com>,
        "harini.katakam@....com"
	<harini.katakam@....com>,
        "git@....com" <git@....com>
Subject: RE: [EXT] [PATCH net-next V3 2/3] net: axienet: Preparatory changes
 for dmaengine support

Hi,

>-----Original Message-----
>From: Sarath Babu Naidu Gaddam <sarath.babu.naidu.gaddam@....com>
>Sent: Wednesday, May 10, 2023 2:21 PM
>To: davem@...emloft.net; edumazet@...gle.com; kuba@...nel.org;
>pabeni@...hat.com; robh+dt@...nel.org; krzysztof.kozlowski+dt@...aro.org
>Cc: linux@...linux.org.uk; michal.simek@....com;
>radhey.shyam.pandey@....com; netdev@...r.kernel.org;
>devicetree@...r.kernel.org; linux-arm-kernel@...ts.infradead.org; linux-
>kernel@...r.kernel.org; anirudha.sarangi@....com;
>harini.katakam@....com; sarath.babu.naidu.gaddam@....com;
>git@....com
>Subject: [PATCH net-next V3 2/3] net: axienet: Preparatory changes for
>dmaengine support
>
>The axiethernet driver has in-built dma programming. The aim is to remove
>axiethernet axidma programming  after some time and instead use the
>dmaengine framework to communicate with existing xilinx DMAengine
>controller(xilinx_dma) driver.
>
>Keep the axidma programming code under AXIENET_USE_DMA check so that
>dmaengine changes can be added later.
>
>Perform minor code reordering to minimize conditional AXIENET_USE_DMA
>checks and there is no functional change.
>
>It uses "dmas" property to identify whether it should use a dmaengine framework
>or axiethernet axidma programming.
>
>Signed-off-by: Sarath Babu Naidu Gaddam
><sarath.babu.naidu.gaddam@....com>
>---
>Changes in V3:
>1) New Patch.
>---
> drivers/net/ethernet/xilinx/xilinx_axienet.h  |   2 +
> .../net/ethernet/xilinx/xilinx_axienet_main.c | 317 +++++++++++-------
> 2 files changed, 192 insertions(+), 127 deletions(-)
>
>diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h
>b/drivers/net/ethernet/xilinx/xilinx_axienet.h
>index 575ff9de8985..10917d997d27 100644
>--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
>+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
>@@ -435,6 +435,7 @@ struct axidma_bd {
>  * @coalesce_usec_rx:	IRQ coalesce delay for RX
>  * @coalesce_count_tx:	Store the irq coalesce on TX side.
>  * @coalesce_usec_tx:	IRQ coalesce delay for TX
>+ * @has_dmas:	flag to check dmaengine framework usage.
>  */
> struct axienet_local {
> 	struct net_device *ndev;
>@@ -499,6 +500,7 @@ struct axienet_local {
> 	u32 coalesce_usec_rx;
> 	u32 coalesce_count_tx;
> 	u32 coalesce_usec_tx;
>+	u8  has_dmas;

Hardware always has dma. You are just switching to dmaengine software framework.
use_dmaengine might be the correct name.

> };
>
> /**
>diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
>b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
>index 3e310b55bce2..8678fc09245a 100644
>--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
>+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
>@@ -54,6 +54,8 @@
>
> #define AXIENET_REGS_N		40
>
>+#define AXIENET_USE_DMA(lp) ((lp)->has_dmas)
>+
IMO lp->has_dmas is already simple enough and no need of a macro.
Up to you to remove or keep it.

Thanks,
Sundeep

> /* Match table for of_platform binding */  static const struct of_device_id
>axienet_of_match[] = {
> 	{ .compatible = "xlnx,axi-ethernet-1.00.a", }, @@ -588,10 +590,6 @@
>static int axienet_device_reset(struct net_device *ndev)
> 	struct axienet_local *lp = netdev_priv(ndev);
> 	int ret;
>
>-	ret = __axienet_device_reset(lp);
>-	if (ret)
>-		return ret;
>-
> 	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
> 	lp->options |= XAE_OPTION_VLAN;
> 	lp->options &= (~XAE_OPTION_JUMBO);
>@@ -605,11 +603,17 @@ static int axienet_device_reset(struct net_device
>*ndev)
> 			lp->options |= XAE_OPTION_JUMBO;
> 	}
>
>-	ret = axienet_dma_bd_init(ndev);
>-	if (ret) {
>-		netdev_err(ndev, "%s: descriptor allocation failed\n",
>-			   __func__);
>-		return ret;
>+	if (!AXIENET_USE_DMA(lp)) {
>+		ret = __axienet_device_reset(lp);
>+		if (ret)
>+			return ret;
>+
>+		ret = axienet_dma_bd_init(ndev);
>+		if (ret) {
>+			netdev_err(ndev, "%s: descriptor allocation failed\n",
>+				   __func__);
>+			return ret;
>+		}
> 	}
>
> 	axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); @@ -775,7 +779,7
>@@ static int axienet_tx_poll(struct napi_struct *napi, int budget)  }
>
> /**
>- * axienet_start_xmit - Starts the transmission.
>+ * axienet_start_xmit_legacy - Starts the transmission.
>  * @skb:	sk_buff pointer that contains data to be Txed.
>  * @ndev:	Pointer to net_device structure.
>  *
>@@ -788,7 +792,7 @@ static int axienet_tx_poll(struct napi_struct *napi, int
>budget)
>  * it populates AXI Stream Control fields with appropriate values.
>  */
> static netdev_tx_t
>-axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
>+axienet_start_xmit_legacy(struct sk_buff *skb, struct net_device *ndev)
> {
> 	u32 ii;
> 	u32 num_frag;
>@@ -890,6 +894,27 @@ axienet_start_xmit(struct sk_buff *skb, struct
>net_device *ndev)
> 	return NETDEV_TX_OK;
> }
>
>+/**
>+ * axienet_start_xmit - Starts the transmission.
>+ * @skb:        sk_buff pointer that contains data to be Txed.
>+ * @ndev:       Pointer to net_device structure.
>+ *
>+ * Return: NETDEV_TX_OK, on success
>+ *          NETDEV_TX_BUSY, if any of the descriptors are not free
>+ *
>+ * This function is invoked from upper layers to initiate transmission
>+*/ static netdev_tx_t axienet_start_xmit(struct sk_buff *skb, struct
>+net_device *ndev) {
>+	struct axienet_local *lp = netdev_priv(ndev);
>+
>+	if (!AXIENET_USE_DMA(lp))
>+		return axienet_start_xmit_legacy(skb, ndev);
>+	else
>+		return NETDEV_TX_BUSY;
>+}
>+
> /**
>  * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
>  * @napi:	Pointer to NAPI structure.
>@@ -1124,41 +1149,22 @@ static irqreturn_t axienet_eth_irq(int irq, void
>*_ndev)  static void axienet_dma_err_handler(struct work_struct *work);
>
> /**
>- * axienet_open - Driver open routine.
>- * @ndev:	Pointer to net_device structure
>+ * axienet_init_legacy_dma - init the dma legacy code.
>+ * @ndev:       Pointer to net_device structure
>  *
>  * Return: 0, on success.
>- *	    non-zero error value on failure
>+ *          non-zero error value on failure
>+ *
>+ * This is the dma  initialization code. It also allocates interrupt
>+ * service routines, enables the interrupt lines and ISR handling.
>  *
>- * This is the driver open routine. It calls phylink_start to start the
>- * PHY device.
>- * It also allocates interrupt service routines, enables the interrupt lines
>- * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
>- * descriptors are initialized.
>  */
>-static int axienet_open(struct net_device *ndev)
>+
>+static inline int axienet_init_legacy_dma(struct net_device *ndev)
> {
> 	int ret;
> 	struct axienet_local *lp = netdev_priv(ndev);
>
>-	dev_dbg(&ndev->dev, "axienet_open()\n");
>-
>-	/* When we do an Axi Ethernet reset, it resets the complete core
>-	 * including the MDIO. MDIO must be disabled before resetting.
>-	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
>-	 */
>-	axienet_lock_mii(lp);
>-	ret = axienet_device_reset(ndev);
>-	axienet_unlock_mii(lp);
>-
>-	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
>-	if (ret) {
>-		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
>-		return ret;
>-	}
>-
>-	phylink_start(lp->phylink);
>-
> 	/* Enable worker thread for Axi DMA error handling */
> 	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
>
>@@ -1192,13 +1198,62 @@ static int axienet_open(struct net_device *ndev)
> err_tx_irq:
> 	napi_disable(&lp->napi_tx);
> 	napi_disable(&lp->napi_rx);
>-	phylink_stop(lp->phylink);
>-	phylink_disconnect_phy(lp->phylink);
> 	cancel_work_sync(&lp->dma_err_task);
> 	dev_err(lp->dev, "request_irq() failed\n");
> 	return ret;
> }
>
>+/**
>+ * axienet_open - Driver open routine.
>+ * @ndev:	Pointer to net_device structure
>+ *
>+ * Return: 0, on success.
>+ *	    non-zero error value on failure
>+ *
>+ * This is the driver open routine. It calls phylink_start to start the
>+ * PHY device.
>+ * It also allocates interrupt service routines, enables the interrupt
>+lines
>+ * and ISR handling. Axi Ethernet core is reset through Axi DMA core.
>+Buffer
>+ * descriptors are initialized.
>+ */
>+static int axienet_open(struct net_device *ndev) {
>+	int ret;
>+	struct axienet_local *lp = netdev_priv(ndev);
>+
>+	dev_dbg(&ndev->dev, "%s\n", __func__);
>+
>+	/* When we do an Axi Ethernet reset, it resets the complete core
>+	 * including the MDIO. MDIO must be disabled before resetting.
>+	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
>+	 */
>+	axienet_lock_mii(lp);
>+	ret = axienet_device_reset(ndev);
>+	axienet_unlock_mii(lp);
>+
>+	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
>+	if (ret) {
>+		dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
>+		return ret;
>+	}
>+
>+	phylink_start(lp->phylink);
>+
>+	if (!AXIENET_USE_DMA(lp)) {
>+		ret = axienet_init_legacy_dma(ndev);
>+		if (ret)
>+			goto error_code;
>+	}
>+
>+	return 0;
>+
>+error_code:
>+	phylink_stop(lp->phylink);
>+	phylink_disconnect_phy(lp->phylink);
>+
>+	return ret;
>+}
>+
> /**
>  * axienet_stop - Driver stop routine.
>  * @ndev:	Pointer to net_device structure
>@@ -1215,8 +1270,10 @@ static int axienet_stop(struct net_device *ndev)
>
> 	dev_dbg(&ndev->dev, "axienet_close()\n");
>
>-	napi_disable(&lp->napi_tx);
>-	napi_disable(&lp->napi_rx);
>+	if (!AXIENET_USE_DMA(lp)) {
>+		napi_disable(&lp->napi_tx);
>+		napi_disable(&lp->napi_rx);
>+	}
>
> 	phylink_stop(lp->phylink);
> 	phylink_disconnect_phy(lp->phylink);
>@@ -1224,18 +1281,18 @@ static int axienet_stop(struct net_device *ndev)
> 	axienet_setoptions(ndev, lp->options &
> 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
>
>-	axienet_dma_stop(lp);
>+	if (!AXIENET_USE_DMA(lp)) {
>+		axienet_dma_stop(lp);
>+		cancel_work_sync(&lp->dma_err_task);
>+		free_irq(lp->tx_irq, ndev);
>+		free_irq(lp->rx_irq, ndev);
>+		axienet_dma_bd_release(ndev);
>+	}
>
> 	axienet_iow(lp, XAE_IE_OFFSET, 0);
>
>-	cancel_work_sync(&lp->dma_err_task);
>-
> 	if (lp->eth_irq > 0)
> 		free_irq(lp->eth_irq, ndev);
>-	free_irq(lp->tx_irq, ndev);
>-	free_irq(lp->rx_irq, ndev);
>-
>-	axienet_dma_bd_release(ndev);
> 	return 0;
> }
>
>@@ -1411,14 +1468,16 @@ static void axienet_ethtools_get_regs(struct
>net_device *ndev,
> 	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
> 	data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
> 	data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
>-	data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
>-	data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
>-	data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
>-	data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
>-	data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
>-	data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
>-	data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
>-	data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
>+	if (!AXIENET_USE_DMA(lp)) {
>+		data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
>+		data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
>+		data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
>+		data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
>+		data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
>+		data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
>+		data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
>+		data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
>+	}
> }
>
> static void
>@@ -1878,9 +1937,6 @@ static int axienet_probe(struct platform_device *pdev)
> 	u64_stats_init(&lp->rx_stat_sync);
> 	u64_stats_init(&lp->tx_stat_sync);
>
>-	netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
>-	netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
>-
> 	lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
> 	if (!lp->axi_clk) {
> 		/* For backward compatibility, if named AXI clock is not present,
>@@ -2006,75 +2062,80 @@ static int axienet_probe(struct platform_device
>*pdev)
> 		goto cleanup_clk;
> 	}
>
>-	/* Find the DMA node, map the DMA registers, and decode the DMA IRQs
>*/
>-	np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
>-	if (np) {
>-		struct resource dmares;
>+	if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) {
>+		/* Find the DMA node, map the DMA registers, and decode the
>DMA IRQs */
>+		np = of_parse_phandle(pdev->dev.of_node, "axistream-
>connected", 0);
>
>-		ret = of_address_to_resource(np, 0, &dmares);
>-		if (ret) {
>-			dev_err(&pdev->dev,
>-				"unable to get DMA resource\n");
>+		if (np) {
>+			struct resource dmares;
>+
>+			ret = of_address_to_resource(np, 0, &dmares);
>+			if (ret) {
>+				dev_err(&pdev->dev,
>+					"unable to get DMA resource\n");
>+				of_node_put(np);
>+				goto cleanup_clk;
>+			}
>+			lp->dma_regs = devm_ioremap_resource(&pdev->dev,
>+							     &dmares);
>+			lp->rx_irq = irq_of_parse_and_map(np, 1);
>+			lp->tx_irq = irq_of_parse_and_map(np, 0);
> 			of_node_put(np);
>+			lp->eth_irq = platform_get_irq_optional(pdev, 0);
>+		} else {
>+			/* Check for these resources directly on the Ethernet
>node. */
>+			lp->dma_regs =
>devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
>+			lp->rx_irq = platform_get_irq(pdev, 1);
>+			lp->tx_irq = platform_get_irq(pdev, 0);
>+			lp->eth_irq = platform_get_irq_optional(pdev, 2);
>+		}
>+		if (IS_ERR(lp->dma_regs)) {
>+			dev_err(&pdev->dev, "could not map DMA regs\n");
>+			ret = PTR_ERR(lp->dma_regs);
>+			goto cleanup_clk;
>+		}
>+		if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
>+			dev_err(&pdev->dev, "could not determine irqs\n");
>+			ret = -ENOMEM;
> 			goto cleanup_clk;
> 		}
>-		lp->dma_regs = devm_ioremap_resource(&pdev->dev,
>-						     &dmares);
>-		lp->rx_irq = irq_of_parse_and_map(np, 1);
>-		lp->tx_irq = irq_of_parse_and_map(np, 0);
>-		of_node_put(np);
>-		lp->eth_irq = platform_get_irq_optional(pdev, 0);
>-	} else {
>-		/* Check for these resources directly on the Ethernet node. */
>-		lp->dma_regs =
>devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
>-		lp->rx_irq = platform_get_irq(pdev, 1);
>-		lp->tx_irq = platform_get_irq(pdev, 0);
>-		lp->eth_irq = platform_get_irq_optional(pdev, 2);
>-	}
>-	if (IS_ERR(lp->dma_regs)) {
>-		dev_err(&pdev->dev, "could not map DMA regs\n");
>-		ret = PTR_ERR(lp->dma_regs);
>-		goto cleanup_clk;
>-	}
>-	if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
>-		dev_err(&pdev->dev, "could not determine irqs\n");
>-		ret = -ENOMEM;
>-		goto cleanup_clk;
>-	}
>
>-	/* Autodetect the need for 64-bit DMA pointers.
>-	 * When the IP is configured for a bus width bigger than 32 bits,
>-	 * writing the MSB registers is mandatory, even if they are all 0.
>-	 * We can detect this case by writing all 1's to one such register
>-	 * and see if that sticks: when the IP is configured for 32 bits
>-	 * only, those registers are RES0.
>-	 * Those MSB registers were introduced in IP v7.1, which we check first.
>-	 */
>-	if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
>-		void __iomem *desc = lp->dma_regs +
>XAXIDMA_TX_CDESC_OFFSET + 4;
>-
>-		iowrite32(0x0, desc);
>-		if (ioread32(desc) == 0) {	/* sanity check */
>-			iowrite32(0xffffffff, desc);
>-			if (ioread32(desc) > 0) {
>-				lp->features |= XAE_FEATURE_DMA_64BIT;
>-				addr_width = 64;
>-				dev_info(&pdev->dev,
>-					 "autodetected 64-bit DMA range\n");
>-			}
>+		/* Autodetect the need for 64-bit DMA pointers.
>+		 * When the IP is configured for a bus width bigger than 32 bits,
>+		 * writing the MSB registers is mandatory, even if they are all 0.
>+		 * We can detect this case by writing all 1's to one such register
>+		 * and see if that sticks: when the IP is configured for 32 bits
>+		 * only, those registers are RES0.
>+		 * Those MSB registers were introduced in IP v7.1, which we
>check first.
>+		 */
>+		if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
>+			void __iomem *desc = lp->dma_regs +
>XAXIDMA_TX_CDESC_OFFSET + 4;
>+
> 			iowrite32(0x0, desc);
>+			if (ioread32(desc) == 0) {	/* sanity check */
>+				iowrite32(0xffffffff, desc);
>+				if (ioread32(desc) > 0) {
>+					lp->features |=
>XAE_FEATURE_DMA_64BIT;
>+					addr_width = 64;
>+					dev_info(&pdev->dev,
>+						 "autodetected 64-bit DMA
>range\n");
>+				}
>+				iowrite32(0x0, desc);
>+			}
>+		}
>+		if (!IS_ENABLED(CONFIG_64BIT) && lp->features &
>XAE_FEATURE_DMA_64BIT) {
>+			dev_err(&pdev->dev, "64-bit addressable DMA is not
>compatible with 32-bit archecture\n");
>+			ret = -EINVAL;
>+			goto cleanup_clk;
> 		}
>-	}
>-	if (!IS_ENABLED(CONFIG_64BIT) && lp->features &
>XAE_FEATURE_DMA_64BIT) {
>-		dev_err(&pdev->dev, "64-bit addressable DMA is not compatible
>with 32-bit archecture\n");
>-		ret = -EINVAL;
>-		goto cleanup_clk;
>-	}
>
>-	ret = dma_set_mask_and_coherent(&pdev->dev,
>DMA_BIT_MASK(addr_width));
>-	if (ret) {
>-		dev_err(&pdev->dev, "No suitable DMA available\n");
>-		goto cleanup_clk;
>+		ret = dma_set_mask_and_coherent(&pdev->dev,
>DMA_BIT_MASK(addr_width));
>+		if (ret) {
>+			dev_err(&pdev->dev, "No suitable DMA available\n");
>+			goto cleanup_clk;
>+		}
>+		netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
>+		netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
> 	}
>
> 	/* Check for Ethernet core IRQ (optional) */ @@ -2092,14 +2153,16 @@
>static int axienet_probe(struct platform_device *pdev)
> 	}
>
> 	lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
>-	lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
> 	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
>-	lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
>
>-	/* Reset core now that clocks are enabled, prior to accessing MDIO */
>-	ret = __axienet_device_reset(lp);
>-	if (ret)
>-		goto cleanup_clk;
>+	if (!AXIENET_USE_DMA(lp)) {
>+		lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
>+		lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
>+		/* Reset core now that clocks are enabled, prior to accessing
>MDIO */
>+		ret = __axienet_device_reset(lp);
>+		if (ret)
>+			goto cleanup_clk;
>+	}
>
> 	ret = axienet_mdio_setup(lp);
> 	if (ret)
>--
>2.25.1
>


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ