lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 27 Oct 2011 11:28:20 +0200
From:	saeed bishara <saeed.bishara@...il.com>
To:	Rob Herring <robherring2@...il.com>
Cc:	netdev@...r.kernel.org, devicetree-discuss@...ts.ozlabs.org,
	Rob Herring <rob.herring@...xeda.com>
Subject: Re: [PATCH] net: add calxeda xgmac ethernet driver

Hi Rob,
please note that ethernet drivers moved to drivers/net/ethernet.

here are more notes:
> +#define XGMAC_ADDR_HIGH(reg)   (0x00000040+(reg * 8))
please add brackets around reg
> +#define XGMAC_ADDR_LOW(reg)    (0x00000044+(reg * 8))
ditto
> +#define XGMAC_HASH(n)          (0x00000300 + (n) * 4) /* HASH table regs */
> +
> +struct xgmac_dma_desc {
> +       u32 flags;
please use __le32 for descriptors
> +       u32 buf_size;
> +       u32 buf1_addr;          /* Buffer 1 Address Pointer */
> +       u32 buf2_addr;          /* Buffer 2 Address Pointer */
> +       u32 ext_status;
> +       u32 res[3];
> +};

> +/* XGMAC Descriptor Access Helpers */
> +static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
> +{
> +       if (buf_sz > MAX_DESC_BUF_SZ)
> +               p->buf_size = MAX_DESC_BUF_SZ |
you should use cpu_to_leX when accessing descriptors
> +                       (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET;



> +static inline dma_addr_t desc_get_buf_addr(struct xgmac_dma_desc *p)
> +{
> +       return p->buf1_addr;
1. use le32_to_cpu()
2. your assuming that dma_addr_t is the same as u32 (or __le32), this
might be true in your system, but drivers should be written in generic
way
> +}
> +
> +static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
> +{
> +       u32 reg = readl(ioaddr + XGMAC_OMR);
> +       writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
> +
> +       do {} while (readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF);
1. change this to: while (read...);
2. that can lead to infinite loop. please consider using time/counter
based limit.
> +}
> +
> +



> +static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
> +                                   int num)
> +{
> +       u32 reg = 0;
> +
> +       memcpy(&reg, &addr[4], 2);
> +       reg |= num ? XGMAC_ADDR_AE : 0;
> +       writel(reg, ioaddr + XGMAC_ADDR_HIGH(num));
I think this code won't work in big endian mode
> +
> +       memcpy(&reg, addr, sizeof(reg));
> +       writel(reg, ioaddr + XGMAC_ADDR_LOW(num));
> +}
> +
> +static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
> +                                   int num)

> +
> +/**
> + * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings
> + * @dev: net device structure
> + * Description:  this function initializes the DMA RX/TX descriptors
> + * and allocates the socket buffers.
> + */
> +static void xgmac_dma_desc_rings_init(struct net_device *dev)
> +{
> +       struct xgmac_priv *priv = netdev_priv(dev);
> +       unsigned int bfsize;
> +
> +       /* Set the Buffer size according to the MTU;
> +        * indeed, in case of jumbo we need to bump-up the buffer sizes.
> +        */
> +       bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64,
> +                      64);
> +
> +       dev_dbg(priv->device, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
> +
> +       priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ,
> +                                 GFP_KERNEL);
> +       priv->dma_rx = dma_alloc_coherent(priv->device,
> +                                         DMA_RX_RING_SZ *
> +                                         sizeof(struct xgmac_dma_desc),
> +                                         &priv->dma_rx_phy,
> +                                         GFP_KERNEL);
> +       priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ,
> +                                 GFP_KERNEL);
> +       priv->dma_tx = dma_alloc_coherent(priv->device,
> +                                         DMA_TX_RING_SZ *
> +                                         sizeof(struct xgmac_dma_desc),
> +                                         &priv->dma_tx_phy,
> +                                         GFP_KERNEL);
> +       if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) {
> +               dev_err(priv->device, "ERROR allocating the DMA Tx/Rx desc\n");
> +               return;
the rx descs are not freed when tx desc allocation fails.
no check done for tx_skbuff/rx_skbuff allocation, consider using deem_kzalloc
> +       }
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ