lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:   Mon, 24 Sep 2018 05:22:40 +0800
From:   kbuild test robot <lkp@...el.com>
To:     Moritz Fischer <mdf@...nel.org>
Cc:     kbuild-all@...org, netdev@...r.kernel.org
Subject: [net-next:master 13/221] drivers/net//ethernet/ni/nixge.c:145:2:
 note: in expansion of macro 'nixge_hw_dma_bd_set_addr'

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git master
head:   12ba7e1045521ec9f251c93ae0a6735cc3f42337
commit: 7e8d5755be0e6c92d3b86a85e54c6a550b1910c5 [13/221] net: nixge: Add support for 64-bit platforms
config: i386-randconfig-x075-09240403 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-1) 7.3.0
reproduce:
        git checkout 7e8d5755be0e6c92d3b86a85e54c6a550b1910c5
        # save the attached .config to linux build tree
        make ARCH=i386 

All warnings (new ones prefixed by >>):

   drivers/net//ethernet/ni/nixge.c: In function 'nixge_hw_dma_bd_release':
   drivers/net//ethernet/ni/nixge.c:254:9: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
      skb = (struct sk_buff *)
            ^
   In file included from include/linux/skbuff.h:17:0,
                    from include/linux/if_ether.h:23,
                    from include/linux/etherdevice.h:25,
                    from drivers/net//ethernet/ni/nixge.c:7:
   drivers/net//ethernet/ni/nixge.c: In function 'nixge_hw_dma_bd_init':
   drivers/net//ethernet/ni/nixge.c:130:37: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast]
      (bd)->field##_lo = lower_32_bits(((u64)addr)); \
                                        ^
   include/linux/kernel.h:234:33: note: in definition of macro 'lower_32_bits'
    #define lower_32_bits(n) ((u32)(n))
                                    ^
>> drivers/net//ethernet/ni/nixge.c:145:2: note: in expansion of macro 'nixge_hw_dma_bd_set_addr'
     nixge_hw_dma_bd_set_addr((bd), sw_id_offset, (addr))
     ^~~~~~~~~~~~~~~~~~~~~~~~
>> drivers/net//ethernet/ni/nixge.c:326:3: note: in expansion of macro 'nixge_hw_dma_bd_set_offset'
      nixge_hw_dma_bd_set_offset(&priv->rx_bd_v[i], skb);
      ^~~~~~~~~~~~~~~~~~~~~~~~~~
   drivers/net//ethernet/ni/nixge.c:131:37: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast]
      (bd)->field##_hi = upper_32_bits(((u64)addr)); \
                                        ^
   include/linux/kernel.h:228:35: note: in definition of macro 'upper_32_bits'
    #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
                                      ^
>> drivers/net//ethernet/ni/nixge.c:145:2: note: in expansion of macro 'nixge_hw_dma_bd_set_addr'
     nixge_hw_dma_bd_set_addr((bd), sw_id_offset, (addr))
     ^~~~~~~~~~~~~~~~~~~~~~~~
>> drivers/net//ethernet/ni/nixge.c:326:3: note: in expansion of macro 'nixge_hw_dma_bd_set_offset'
      nixge_hw_dma_bd_set_offset(&priv->rx_bd_v[i], skb);
      ^~~~~~~~~~~~~~~~~~~~~~~~~~
   drivers/net//ethernet/ni/nixge.c: In function 'nixge_recv':
   drivers/net//ethernet/ni/nixge.c:604:9: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
      skb = (struct sk_buff *)nixge_hw_dma_bd_get_addr(cur_p,
            ^
   In file included from include/linux/skbuff.h:17:0,
                    from include/linux/if_ether.h:23,
                    from include/linux/etherdevice.h:25,
                    from drivers/net//ethernet/ni/nixge.c:7:
   drivers/net//ethernet/ni/nixge.c:130:37: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast]
      (bd)->field##_lo = lower_32_bits(((u64)addr)); \
                                        ^
   include/linux/kernel.h:234:33: note: in definition of macro 'lower_32_bits'
    #define lower_32_bits(n) ((u32)(n))
                                    ^
>> drivers/net//ethernet/ni/nixge.c:145:2: note: in expansion of macro 'nixge_hw_dma_bd_set_addr'
     nixge_hw_dma_bd_set_addr((bd), sw_id_offset, (addr))
     ^~~~~~~~~~~~~~~~~~~~~~~~
   drivers/net//ethernet/ni/nixge.c:646:3: note: in expansion of macro 'nixge_hw_dma_bd_set_offset'
      nixge_hw_dma_bd_set_offset(cur_p, new_skb);
      ^~~~~~~~~~~~~~~~~~~~~~~~~~
   drivers/net//ethernet/ni/nixge.c:131:37: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast]
      (bd)->field##_hi = upper_32_bits(((u64)addr)); \
                                        ^
   include/linux/kernel.h:228:35: note: in definition of macro 'upper_32_bits'
    #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
                                      ^
>> drivers/net//ethernet/ni/nixge.c:145:2: note: in expansion of macro 'nixge_hw_dma_bd_set_addr'
     nixge_hw_dma_bd_set_addr((bd), sw_id_offset, (addr))
     ^~~~~~~~~~~~~~~~~~~~~~~~
   drivers/net//ethernet/ni/nixge.c:646:3: note: in expansion of macro 'nixge_hw_dma_bd_set_offset'
      nixge_hw_dma_bd_set_offset(cur_p, new_skb);
      ^~~~~~~~~~~~~~~~~~~~~~~~~~

vim +/nixge_hw_dma_bd_set_addr +145 drivers/net//ethernet/ni/nixge.c

   > 7	#include <linux/etherdevice.h>
     8	#include <linux/module.h>
     9	#include <linux/netdevice.h>
    10	#include <linux/of_address.h>
    11	#include <linux/of_mdio.h>
    12	#include <linux/of_net.h>
    13	#include <linux/of_platform.h>
    14	#include <linux/of_irq.h>
    15	#include <linux/skbuff.h>
    16	#include <linux/phy.h>
    17	#include <linux/mii.h>
    18	#include <linux/nvmem-consumer.h>
    19	#include <linux/ethtool.h>
    20	#include <linux/iopoll.h>
    21	
    22	#define TX_BD_NUM		64
    23	#define RX_BD_NUM		128
    24	
    25	/* Axi DMA Register definitions */
    26	#define XAXIDMA_TX_CR_OFFSET	0x00 /* Channel control */
    27	#define XAXIDMA_TX_SR_OFFSET	0x04 /* Status */
    28	#define XAXIDMA_TX_CDESC_OFFSET	0x08 /* Current descriptor pointer */
    29	#define XAXIDMA_TX_TDESC_OFFSET	0x10 /* Tail descriptor pointer */
    30	
    31	#define XAXIDMA_RX_CR_OFFSET	0x30 /* Channel control */
    32	#define XAXIDMA_RX_SR_OFFSET	0x34 /* Status */
    33	#define XAXIDMA_RX_CDESC_OFFSET	0x38 /* Current descriptor pointer */
    34	#define XAXIDMA_RX_TDESC_OFFSET	0x40 /* Tail descriptor pointer */
    35	
    36	#define XAXIDMA_CR_RUNSTOP_MASK	0x1 /* Start/stop DMA channel */
    37	#define XAXIDMA_CR_RESET_MASK	0x4 /* Reset DMA engine */
    38	
    39	#define XAXIDMA_BD_CTRL_LENGTH_MASK	0x007FFFFF /* Requested len */
    40	#define XAXIDMA_BD_CTRL_TXSOF_MASK	0x08000000 /* First tx packet */
    41	#define XAXIDMA_BD_CTRL_TXEOF_MASK	0x04000000 /* Last tx packet */
    42	#define XAXIDMA_BD_CTRL_ALL_MASK	0x0C000000 /* All control bits */
    43	
    44	#define XAXIDMA_DELAY_MASK		0xFF000000 /* Delay timeout counter */
    45	#define XAXIDMA_COALESCE_MASK		0x00FF0000 /* Coalesce counter */
    46	
    47	#define XAXIDMA_DELAY_SHIFT		24
    48	#define XAXIDMA_COALESCE_SHIFT		16
    49	
    50	#define XAXIDMA_IRQ_IOC_MASK		0x00001000 /* Completion intr */
    51	#define XAXIDMA_IRQ_DELAY_MASK		0x00002000 /* Delay interrupt */
    52	#define XAXIDMA_IRQ_ERROR_MASK		0x00004000 /* Error interrupt */
    53	#define XAXIDMA_IRQ_ALL_MASK		0x00007000 /* All interrupts */
    54	
    55	/* Default TX/RX Threshold and waitbound values for SGDMA mode */
    56	#define XAXIDMA_DFT_TX_THRESHOLD	24
    57	#define XAXIDMA_DFT_TX_WAITBOUND	254
    58	#define XAXIDMA_DFT_RX_THRESHOLD	24
    59	#define XAXIDMA_DFT_RX_WAITBOUND	254
    60	
    61	#define XAXIDMA_BD_STS_ACTUAL_LEN_MASK	0x007FFFFF /* Actual len */
    62	#define XAXIDMA_BD_STS_COMPLETE_MASK	0x80000000 /* Completed */
    63	#define XAXIDMA_BD_STS_DEC_ERR_MASK	0x40000000 /* Decode error */
    64	#define XAXIDMA_BD_STS_SLV_ERR_MASK	0x20000000 /* Slave error */
    65	#define XAXIDMA_BD_STS_INT_ERR_MASK	0x10000000 /* Internal err */
    66	#define XAXIDMA_BD_STS_ALL_ERR_MASK	0x70000000 /* All errors */
    67	#define XAXIDMA_BD_STS_RXSOF_MASK	0x08000000 /* First rx pkt */
    68	#define XAXIDMA_BD_STS_RXEOF_MASK	0x04000000 /* Last rx pkt */
    69	#define XAXIDMA_BD_STS_ALL_MASK		0xFC000000 /* All status bits */
    70	
    71	#define NIXGE_REG_CTRL_OFFSET	0x4000
    72	#define NIXGE_REG_INFO		0x00
    73	#define NIXGE_REG_MAC_CTL	0x04
    74	#define NIXGE_REG_PHY_CTL	0x08
    75	#define NIXGE_REG_LED_CTL	0x0c
    76	#define NIXGE_REG_MDIO_DATA	0x10
    77	#define NIXGE_REG_MDIO_ADDR	0x14
    78	#define NIXGE_REG_MDIO_OP	0x18
    79	#define NIXGE_REG_MDIO_CTRL	0x1c
    80	
    81	#define NIXGE_ID_LED_CTL_EN	BIT(0)
    82	#define NIXGE_ID_LED_CTL_VAL	BIT(1)
    83	
    84	#define NIXGE_MDIO_CLAUSE45	BIT(12)
    85	#define NIXGE_MDIO_CLAUSE22	0
    86	#define NIXGE_MDIO_OP(n)     (((n) & 0x3) << 10)
    87	#define NIXGE_MDIO_OP_ADDRESS	0
    88	#define NIXGE_MDIO_C45_WRITE	BIT(0)
    89	#define NIXGE_MDIO_C45_READ	(BIT(1) | BIT(0))
    90	#define NIXGE_MDIO_C22_WRITE	BIT(0)
    91	#define NIXGE_MDIO_C22_READ	BIT(1)
    92	#define NIXGE_MDIO_ADDR(n)   (((n) & 0x1f) << 5)
    93	#define NIXGE_MDIO_MMD(n)    (((n) & 0x1f) << 0)
    94	
    95	#define NIXGE_REG_MAC_LSB	0x1000
    96	#define NIXGE_REG_MAC_MSB	0x1004
    97	
    98	/* Packet size info */
    99	#define NIXGE_HDR_SIZE		14 /* Size of Ethernet header */
   100	#define NIXGE_TRL_SIZE		4 /* Size of Ethernet trailer (FCS) */
   101	#define NIXGE_MTU		1500 /* Max MTU of an Ethernet frame */
   102	#define NIXGE_JUMBO_MTU		9000 /* Max MTU of a jumbo Eth. frame */
   103	
   104	#define NIXGE_MAX_FRAME_SIZE	 (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
   105	#define NIXGE_MAX_JUMBO_FRAME_SIZE \
   106		(NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
   107	
   108	struct nixge_hw_dma_bd {
   109		u32 next_lo;
   110		u32 next_hi;
   111		u32 phys_lo;
   112		u32 phys_hi;
   113		u32 reserved3;
   114		u32 reserved4;
   115		u32 cntrl;
   116		u32 status;
   117		u32 app0;
   118		u32 app1;
   119		u32 app2;
   120		u32 app3;
   121		u32 app4;
   122		u32 sw_id_offset_lo;
   123		u32 sw_id_offset_hi;
   124		u32 reserved6;
   125	};
   126	
   127	#ifdef CONFIG_PHYS_ADDR_T_64BIT
   128	#define nixge_hw_dma_bd_set_addr(bd, field, addr) \
   129		do { \
 > 130			(bd)->field##_lo = lower_32_bits(((u64)addr)); \
   131			(bd)->field##_hi = upper_32_bits(((u64)addr)); \
   132		} while (0)
   133	#else
   134	#define nixge_hw_dma_bd_set_addr(bd, field, addr) \
   135		((bd)->field##_lo = lower_32_bits((addr)))
   136	#endif
   137	
   138	#define nixge_hw_dma_bd_set_phys(bd, addr) \
   139		nixge_hw_dma_bd_set_addr((bd), phys, (addr))
   140	
   141	#define nixge_hw_dma_bd_set_next(bd, addr) \
   142		nixge_hw_dma_bd_set_addr((bd), next, (addr))
   143	
   144	#define nixge_hw_dma_bd_set_offset(bd, addr) \
 > 145		nixge_hw_dma_bd_set_addr((bd), sw_id_offset, (addr))
   146	
   147	#ifdef CONFIG_PHYS_ADDR_T_64BIT
   148	#define nixge_hw_dma_bd_get_addr(bd, field) \
   149		(dma_addr_t)((((u64)(bd)->field##_hi) << 32) | ((bd)->field##_lo))
   150	#else
   151	#define nixge_hw_dma_bd_get_addr(bd, field) \
   152		(dma_addr_t)((bd)->field##_lo)
   153	#endif
   154	
   155	struct nixge_tx_skb {
   156		struct sk_buff *skb;
   157		dma_addr_t mapping;
   158		size_t size;
   159		bool mapped_as_page;
   160	};
   161	
   162	struct nixge_priv {
   163		struct net_device *ndev;
   164		struct napi_struct napi;
   165		struct device *dev;
   166	
   167		/* Connection to PHY device */
   168		struct device_node *phy_node;
   169		phy_interface_t		phy_mode;
   170	
   171		int link;
   172		unsigned int speed;
   173		unsigned int duplex;
   174	
   175		/* MDIO bus data */
   176		struct mii_bus *mii_bus;	/* MII bus reference */
   177	
   178		/* IO registers, dma functions and IRQs */
   179		void __iomem *ctrl_regs;
   180		void __iomem *dma_regs;
   181	
   182		struct tasklet_struct dma_err_tasklet;
   183	
   184		int tx_irq;
   185		int rx_irq;
   186	
   187		/* Buffer descriptors */
   188		struct nixge_hw_dma_bd *tx_bd_v;
   189		struct nixge_tx_skb *tx_skb;
   190		dma_addr_t tx_bd_p;
   191	
   192		struct nixge_hw_dma_bd *rx_bd_v;
   193		dma_addr_t rx_bd_p;
   194		u32 tx_bd_ci;
   195		u32 tx_bd_tail;
   196		u32 rx_bd_ci;
   197	
   198		u32 coalesce_count_rx;
   199		u32 coalesce_count_tx;
   200	};
   201	
   202	static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
   203	{
   204		writel(val, priv->dma_regs + offset);
   205	}
   206	
   207	static void nixge_dma_write_desc_reg(struct nixge_priv *priv, off_t offset,
   208					     dma_addr_t addr)
   209	{
   210		writel(lower_32_bits(addr), priv->dma_regs + offset);
   211	#ifdef CONFIG_PHYS_ADDR_T_64BIT
   212		writel(upper_32_bits(addr), priv->dma_regs + offset + 4);
   213	#endif
   214	}
   215	
   216	static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset)
   217	{
   218		return readl(priv->dma_regs + offset);
   219	}
   220	
   221	static void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
   222	{
   223		writel(val, priv->ctrl_regs + offset);
   224	}
   225	
   226	static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset)
   227	{
   228		return readl(priv->ctrl_regs + offset);
   229	}
   230	
   231	#define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
   232		readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \
   233				   (sleep_us), (timeout_us))
   234	
   235	#define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
   236		readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \
   237				   (sleep_us), (timeout_us))
   238	
   239	static void nixge_hw_dma_bd_release(struct net_device *ndev)
   240	{
   241		struct nixge_priv *priv = netdev_priv(ndev);
   242		dma_addr_t phys_addr;
   243		struct sk_buff *skb;
   244		int i;
   245	
   246		for (i = 0; i < RX_BD_NUM; i++) {
   247			phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
   248							     phys);
   249	
   250			dma_unmap_single(ndev->dev.parent, phys_addr,
   251					 NIXGE_MAX_JUMBO_FRAME_SIZE,
   252					 DMA_FROM_DEVICE);
   253	
   254			skb = (struct sk_buff *)
   255				nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
   256							 sw_id_offset);
   257			dev_kfree_skb(skb);
   258		}
   259	
   260		if (priv->rx_bd_v)
   261			dma_free_coherent(ndev->dev.parent,
   262					  sizeof(*priv->rx_bd_v) * RX_BD_NUM,
   263					  priv->rx_bd_v,
   264					  priv->rx_bd_p);
   265	
   266		if (priv->tx_skb)
   267			devm_kfree(ndev->dev.parent, priv->tx_skb);
   268	
   269		if (priv->tx_bd_v)
   270			dma_free_coherent(ndev->dev.parent,
   271					  sizeof(*priv->tx_bd_v) * TX_BD_NUM,
   272					  priv->tx_bd_v,
   273					  priv->tx_bd_p);
   274	}
   275	
   276	static int nixge_hw_dma_bd_init(struct net_device *ndev)
   277	{
   278		struct nixge_priv *priv = netdev_priv(ndev);
   279		struct sk_buff *skb;
   280		dma_addr_t phys;
   281		u32 cr;
   282		int i;
   283	
   284		/* Reset the indexes which are used for accessing the BDs */
   285		priv->tx_bd_ci = 0;
   286		priv->tx_bd_tail = 0;
   287		priv->rx_bd_ci = 0;
   288	
   289		/* Allocate the Tx and Rx buffer descriptors. */
   290		priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
   291						    sizeof(*priv->tx_bd_v) * TX_BD_NUM,
   292						    &priv->tx_bd_p, GFP_KERNEL);
   293		if (!priv->tx_bd_v)
   294			goto out;
   295	
   296		priv->tx_skb = devm_kcalloc(ndev->dev.parent,
   297					    TX_BD_NUM, sizeof(*priv->tx_skb),
   298					    GFP_KERNEL);
   299		if (!priv->tx_skb)
   300			goto out;
   301	
   302		priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
   303						    sizeof(*priv->rx_bd_v) * RX_BD_NUM,
   304						    &priv->rx_bd_p, GFP_KERNEL);
   305		if (!priv->rx_bd_v)
   306			goto out;
   307	
   308		for (i = 0; i < TX_BD_NUM; i++) {
   309			nixge_hw_dma_bd_set_next(&priv->tx_bd_v[i],
   310						 priv->tx_bd_p +
   311						 sizeof(*priv->tx_bd_v) *
   312						 ((i + 1) % TX_BD_NUM));
   313		}
   314	
   315		for (i = 0; i < RX_BD_NUM; i++) {
   316			nixge_hw_dma_bd_set_next(&priv->rx_bd_v[i],
   317						 priv->rx_bd_p
   318						 + sizeof(*priv->rx_bd_v) *
   319						 ((i + 1) % RX_BD_NUM));
   320	
   321			skb = netdev_alloc_skb_ip_align(ndev,
   322							NIXGE_MAX_JUMBO_FRAME_SIZE);
   323			if (!skb)
   324				goto out;
   325	
 > 326			nixge_hw_dma_bd_set_offset(&priv->rx_bd_v[i], skb);
   327			phys = dma_map_single(ndev->dev.parent, skb->data,
   328					      NIXGE_MAX_JUMBO_FRAME_SIZE,
   329					      DMA_FROM_DEVICE);
   330	
   331			nixge_hw_dma_bd_set_phys(&priv->rx_bd_v[i], phys);
   332	
   333			priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
   334		}
   335	
   336		/* Start updating the Rx channel control register */
   337		cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
   338		/* Update the interrupt coalesce count */
   339		cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
   340		      ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
   341		/* Update the delay timer count */
   342		cr = ((cr & ~XAXIDMA_DELAY_MASK) |
   343		      (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
   344		/* Enable coalesce, delay timer and error interrupts */
   345		cr |= XAXIDMA_IRQ_ALL_MASK;
   346		/* Write to the Rx channel control register */
   347		nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
   348	
   349		/* Start updating the Tx channel control register */
   350		cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
   351		/* Update the interrupt coalesce count */
   352		cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
   353		      ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
   354		/* Update the delay timer count */
   355		cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
   356		      (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
   357		/* Enable coalesce, delay timer and error interrupts */
   358		cr |= XAXIDMA_IRQ_ALL_MASK;
   359		/* Write to the Tx channel control register */
   360		nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
   361	
   362		/* Populate the tail pointer and bring the Rx Axi DMA engine out of
   363		 * halted state. This will make the Rx side ready for reception.
   364		 */
   365		nixge_dma_write_desc_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p);
   366		cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
   367		nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
   368				    cr | XAXIDMA_CR_RUNSTOP_MASK);
   369		nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p +
   370				    (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1)));
   371	
   372		/* Write to the RS (Run-stop) bit in the Tx channel control register.
   373		 * Tx channel is now ready to run. But only after we write to the
   374		 * tail pointer register that the Tx channel will start transmitting.
   375		 */
   376		nixge_dma_write_desc_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p);
   377		cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
   378		nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
   379				    cr | XAXIDMA_CR_RUNSTOP_MASK);
   380	
   381		return 0;
   382	out:
   383		nixge_hw_dma_bd_release(ndev);
   384		return -ENOMEM;
   385	}
   386	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Download attachment ".config.gz" of type "application/gzip" (32795 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ