[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2461f47f-1a5b-ed46-116b-35f683ab0206@gmail.com>
Date: Thu, 22 Feb 2018 14:08:36 -0800
From: Florian Fainelli <f.fainelli@...il.com>
To: Bryan Whitehead <Bryan.Whitehead@...rochip.com>,
davem@...emloft.net
Cc: UNGLinuxDriver@...rochip.com, netdev@...r.kernel.org
Subject: Re: [PATCH v2 net-next 1/2] lan743x: Add main source files for new
lan743x driver
On 02/21/2018 11:06 AM, Bryan Whitehead wrote:
> Add main source files for new lan743x driver.
>
> Signed-off-by: Bryan Whitehead <Bryan.Whitehead@...rochip.com>
> ---
> +lan743x-objs := lan743x_main.o
Should we assume that you have additional object files you would like to
contribute at a later point? If that is the case, this is fine, if this
is going to be only file of this driver, consider renaming it so you
don't even have to have this lan743x-objs line at all.
> diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
> new file mode 100644
> index 0000000..3de39e1
> --- /dev/null
> +++ b/drivers/net/ethernet/microchip/lan743x_main.c
> @@ -0,0 +1,2757 @@
> +/*
> + * Copyright (C) 2018 Microchip Technology
You should consider the SPDX license tags to reduce the license
boilerplate standard disclaimer.
> + *
> + * This program is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU General Public License
> + * as published by the Free Software Foundation; either version 2
> + * of the License, or (at your option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program; if not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include "lan743x_main.h"
This is not ideal, having all your header dependencies resolved by a
main header file usually leads to unnecessary inclusions when fewer are
needed.
> +
> +static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
> +{
> + struct lan743x_pci *pci = &adapter->pci;
> +
> + if (pci->init_flags & INIT_FLAG_PCI_REGIONS_REQUESTED) {
There is a pattern throughout the driver of maintaining flags to track
what was initialized and what was not, do you really need that, or can
you check for specific book keeping private information. Maintaining
flags is error prone and requires you to keep adding new ones, that does
not really scale.
[snip]
> +static void lan743x_intr_software_isr(void *context)
> +{
> + struct lan743x_adapter *adapter = context;
> + struct lan743x_intr *intr = &adapter->intr;
> + u32 int_sts;
> +
> + int_sts = lan743x_csr_read(adapter, INT_STS);
> + if (int_sts & INT_BIT_SW_GP_) {
> + lan743x_csr_write(adapter, INT_STS, INT_BIT_SW_GP_);
> + intr->software_isr_flag = 1;
> + }
> +}
> +
> +static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
> +{
> + struct lan743x_tx *tx = context;
> + struct lan743x_adapter *adapter = tx->adapter;
> + int enable_flag = 1;
This is inherently a boolean type.
> + u32 int_en = 0;
> +
> + int_en = lan743x_csr_read(adapter, INT_EN_SET);
> + if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
> + lan743x_csr_write(adapter, INT_EN_CLR,
> + INT_BIT_DMA_TX_(tx->channel_number));
> + }
> + if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) {
> + u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
> + u32 dmac_int_sts;
> + u32 dmac_int_en;
> +
> + if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
> + dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
> + else
> + dmac_int_sts = ioc_bit;
> + if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
> + dmac_int_en = lan743x_csr_read(adapter,
> + DMAC_INT_EN_SET);
> + else
> + dmac_int_en = ioc_bit;
> +
> + dmac_int_en &= ioc_bit;
> + dmac_int_sts &= dmac_int_en;
> + if (dmac_int_sts & ioc_bit) {
> + tasklet_schedule(&tx->tx_isr_bottom_half);
> + enable_flag = 0;/* tasklet will re-enable later */
> + }
Consider migrating your TX buffer reclamation to a NAPI context. If you
have one TX queue and one RX, the same NAPI context can be re-used, if
you have separate RX/TX queues, you may create a NAPI context per RX/TX
pair, or you may create separate NAPI contexts per TX queues and RX queues.
> + }
> + if (enable_flag)
> + /* enable isr */
> + lan743x_csr_write(adapter, INT_EN_SET,
> + INT_BIT_DMA_TX_(tx->channel_number));
> +}
> +
> +static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags)
> +{
> + struct lan743x_rx *rx = context;
> + struct lan743x_adapter *adapter = rx->adapter;
> + int enable_flag = 1;
> +
> + if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
> + lan743x_csr_write(adapter, INT_EN_CLR,
> + INT_BIT_DMA_RX_(rx->channel_number));
> + }
> + if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) {
> + u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number);
> + u32 dmac_int_sts;
> + u32 dmac_int_en;
> +
> + if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
> + dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
> + else
> + dmac_int_sts = rx_frame_bit;
> + if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
> + dmac_int_en = lan743x_csr_read(adapter,
> + DMAC_INT_EN_SET);
> + else
> + dmac_int_en = rx_frame_bit;
> +
> + dmac_int_en &= rx_frame_bit;
> + dmac_int_sts &= dmac_int_en;
> + if (dmac_int_sts & rx_frame_bit) {
> + napi_schedule(&rx->napi);
> + enable_flag = 0;/* poll function will re-enable later */
> + }
> + }
> + if (enable_flag) {
> + /* enable isr */
> + lan743x_csr_write(adapter, INT_EN_SET,
> + INT_BIT_DMA_RX_(rx->channel_number));
> + }
> +}
> +
> +static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
> +{
> + struct lan743x_adapter *adapter = context;
> + int channel;
unsigned int.
> +
> + if (int_sts & INT_BIT_ALL_RX_) {
> + for (channel = 0; channel < LAN743X_USED_RX_CHANNELS;
> + channel++) {
> + u32 int_bit = INT_BIT_DMA_RX_(channel);
> +
> + if (int_sts & int_bit) {
> + lan743x_rx_isr(&adapter->rx[channel],
> + int_bit, flags);
> + int_sts &= ~int_bit;
> + }
> + }
> + }
> + if (int_sts & INT_BIT_ALL_TX_) {
> + for (channel = 0; channel < LAN743X_USED_TX_CHANNELS;
> + channel++) {
> + u32 int_bit = INT_BIT_DMA_TX_(channel);
> +
> + if (int_sts & int_bit) {
> + lan743x_tx_isr(&adapter->tx[channel],
> + int_bit, flags);
> + int_sts &= ~int_bit;
> + }
> + }
> + }
> + if (int_sts & INT_BIT_ALL_OTHER_) {
> + if (int_sts & INT_BIT_SW_GP_) {
> + lan743x_intr_software_isr(adapter);
> + int_sts &= ~INT_BIT_SW_GP_;
> + }
> + }
> + if (int_sts)
> + lan743x_csr_write(adapter, INT_EN_CLR, int_sts);
> +}
> +
> +static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr)
> +{
> + struct lan743x_vector *vector = ptr;
> + struct lan743x_adapter *adapter = vector->adapter;
> + irqreturn_t result = IRQ_NONE;
> + u32 int_enables;
> + u32 int_sts;
> +
> + if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) {
> + int_sts = lan743x_csr_read(adapter, INT_STS);
> + } else if (vector->flags &
> + (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C |
> + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) {
> + int_sts = lan743x_csr_read(adapter, INT_STS_R2C);
> + } else {
> + /* use mask as implied status */
> + int_sts = vector->int_mask | INT_BIT_MAS_;
> + }
> + if (!(int_sts & INT_BIT_MAS_))
> + goto irq_done;
> + if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR)
> + /* disable vector interrupt */
> + lan743x_csr_write(adapter,
> + INT_VEC_EN_CLR,
> + INT_VEC_EN_(vector->vector_index));
> + if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR)
> + /* disable master interrupt */
> + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
> + if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) {
> + int_enables = lan743x_csr_read(adapter, INT_EN_SET);
> + } else {
> + /* use vector mask as implied enable mask */
> + int_enables = vector->int_mask;
> + }
Please put spaces to logically separate what you are doing, this is
barely readable.
> + /* setup auto duplex, and speed detection */
> + data = lan743x_csr_read(adapter, MAC_CR);
> + data |= MAC_CR_ADD_ | MAC_CR_ASD_;
> + data |= MAC_CR_CNTR_RST_;
> + lan743x_csr_write(adapter, MAC_CR, data);
> + mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH);
> + mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL);
> + mac->mac_address[0] = mac_addr_lo & 0xFF;
> + mac->mac_address[1] = (mac_addr_lo >> 8) & 0xFF;
> + mac->mac_address[2] = (mac_addr_lo >> 16) & 0xFF;
> + mac->mac_address[3] = (mac_addr_lo >> 24) & 0xFF;
> + mac->mac_address[4] = mac_addr_hi & 0xFF;
> + mac->mac_address[5] = (mac_addr_hi >> 8) & 0xFF;
> + if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) &&
> + mac_addr_lo == 0xFFFFFFFF) {
> + mac_address_valid = false;
> + } else if (!is_valid_ether_addr(mac->mac_address)) {
> + mac_address_valid = false;
> + }
> + if (!mac_address_valid)
> + random_ether_addr(mac->mac_address);
> + lan743x_mac_set_address(adapter, mac->mac_address);
> + ether_addr_copy(netdev->dev_addr, mac->mac_address);
> + return 0;
> +}
> +
> +static int lan743x_mac_open(struct lan743x_adapter *adapter)
> +{
> + int ret = 0;
> + u32 temp;
> +
> + temp = lan743x_csr_read(adapter, MAC_RX);
> + lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_);
> + temp = lan743x_csr_read(adapter, MAC_TX);
> + lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_);
> + return ret;
> +}
> +
> +static void lan743x_mac_close(struct lan743x_adapter *adapter)
> +{
> + u32 temp;
> +
> + temp = lan743x_csr_read(adapter, MAC_TX);
> + temp &= ~MAC_TX_TXEN_;
> + lan743x_csr_write(adapter, MAC_TX, temp);
> + lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_,
> + 1, 1000, 20000, 100);
> + temp = lan743x_csr_read(adapter, MAC_RX);
> + temp &= ~MAC_RX_RXEN_;
> + lan743x_csr_write(adapter, MAC_RX, temp);
> + lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
> + 1, 1000, 20000, 100);
> +}
> +
> +static void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
> + bool tx_enable, bool rx_enable)
> +{
> + u32 flow_setting = 0;
> +
> + /* set maximum pause time because when fifo space frees
> + * up a zero value pause frame will be sent to release the pause
> + */
> + flow_setting = MAC_FLOW_CR_FCPT_MASK_;
> + if (tx_enable)
> + flow_setting |= MAC_FLOW_CR_TX_FCEN_;
> + if (rx_enable)
> + flow_setting |= MAC_FLOW_CR_RX_FCEN_;
> + lan743x_csr_write(adapter, MAC_FLOW, flow_setting);
> +}
> +
> +static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
> +{
> + int enabled = 0;
> + u32 mac_rx = 0;
> +
> + mac_rx = lan743x_csr_read(adapter, MAC_RX);
> + if (mac_rx & MAC_RX_RXEN_) {
> + enabled = 1;
> + if (mac_rx & MAC_RX_RXD_) {
> + lan743x_csr_write(adapter, MAC_RX, mac_rx);
> + mac_rx &= ~MAC_RX_RXD_;
> + }
> + mac_rx &= ~MAC_RX_RXEN_;
> + lan743x_csr_write(adapter, MAC_RX, mac_rx);
> + lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
> + 1, 1000, 20000, 100);
> + lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_);
> + }
> + mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
> + mac_rx |= (((new_mtu + ETH_HLEN + 4) << MAC_RX_MAX_SIZE_SHIFT_) &
> + MAC_RX_MAX_SIZE_MASK_);
> + lan743x_csr_write(adapter, MAC_RX, mac_rx);
> + if (enabled) {
> + mac_rx |= MAC_RX_RXEN_;
> + lan743x_csr_write(adapter, MAC_RX, mac_rx);
> + }
> + return 0;
> +}
> +
> +/* PHY */
> +static int lan743x_phy_reset(struct lan743x_adapter *adapter)
> +{
> + u32 data;
> +
> + data = lan743x_csr_read(adapter, PMT_CTL);
> + data |= PMT_CTL_ETH_PHY_RST_;
> + lan743x_csr_write(adapter, PMT_CTL, data);
> +
> + return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data,
> + (!(data & PMT_CTL_ETH_PHY_RST_) &&
> + (data & PMT_CTL_READY_)),
> + 50000, 1000000);
> +}
> +
> +static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
> + u8 duplex, u16 local_adv,
> + u16 remote_adv)
> +{
> + struct lan743x_phy *phy = &adapter->phy;
> + u8 cap;
> +
> + if (phy->fc_autoneg)
> + cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
> + else
> + cap = phy->fc_request_control;
> + lan743x_mac_flow_ctrl_set_enables(adapter,
> + cap & FLOW_CTRL_TX,
> + cap & FLOW_CTRL_RX);
> +}
> +
> +static int lan743x_phy_init(struct lan743x_adapter *adapter)
> +{
> + struct net_device *netdev;
> + int ret;
> +
> + netdev = adapter->netdev;
> + ret = lan743x_phy_reset(adapter);
> + if (ret)
> + return ret;
> +
> + /* carrier off reporting is important to ethtool even BEFORE open */
> + netif_carrier_off(netdev);
Move this before the register_netdev() call and let the PHY library
manage the carrier state from there on.
> + return 0;
> +}
> +
> +static void lan743x_phy_link_status_change(struct net_device *netdev)
> +{
> + struct lan743x_adapter *adapter = netdev_priv(netdev);
> + struct phy_device *phydev = netdev->phydev;
> +
> + if (phydev) {
You should not allow your network device to be registered without a
valid PHY device, so this test should be entirely eliminated.
> + phy_print_status(phydev);
> + if (phydev->state == PHY_RUNNING) {
> + struct ethtool_link_ksettings ksettings;
> + struct lan743x_phy *phy = NULL;
> + int remote_advertisement = 0;
> + int local_advertisement = 0;
> +
> + phy = &adapter->phy;
> + memset(&ksettings, 0, sizeof(ksettings));
> + phy_ethtool_get_link_ksettings(netdev, &ksettings);
> + local_advertisement = phy_read(phydev, MII_ADVERTISE);
> + if (local_advertisement < 0)
> + return;
> + remote_advertisement = phy_read(phydev, MII_LPA);
> + if (remote_advertisement < 0)
> + return;
> + lan743x_phy_update_flowcontrol(adapter,
> + ksettings.base.duplex,
> + local_advertisement,
> + remote_advertisement);
> + }
> + }
> +}
> +
> +static void lan743x_phy_close(struct lan743x_adapter *adapter)
> +{
> + struct net_device *netdev = adapter->netdev;
> + struct lan743x_phy *phy = &adapter->phy;
> +
> + if (phy->flags & PHY_FLAG_OPENED) {
> + netif_carrier_off(netdev);
> + phy_stop(netdev->phydev);
netif_carrier_off() is already taken care of by the PHY library.
> + phy->flags &= ~PHY_FLAG_OPENED;
> + }
> + if (phy->flags & PHY_FLAG_ATTACHED) {
> + phy_disconnect(netdev->phydev);
> + netdev->phydev = NULL;
> + phy->flags &= ~PHY_FLAG_ATTACHED;
> + }
Again, unnecessary proliferation of status flags to keep track of your
initialization.
> +}
> +
> +static int lan743x_phy_open(struct lan743x_adapter *adapter)
> +{
> + struct lan743x_phy *phy = &adapter->phy;
> + struct phy_device *phydev;
> + struct net_device *netdev;
> + int ret = -EIO;
> + u32 mii_adv;
> +
> + netdev = adapter->netdev;
> + phydev = phy_find_first(adapter->mdiobus);
> + if (!phydev) {
> + ret = -EIO;
> + goto clean_up;
> + }
> + ret = phy_connect_direct(netdev, phydev,
> + lan743x_phy_link_status_change,
> + PHY_INTERFACE_MODE_GMII);
> + if (ret)
> + goto clean_up;
> + phy->flags |= PHY_FLAG_ATTACHED;
> +
> + /* MAC doesn't support 1000T Half */
> + phydev->supported &= ~SUPPORTED_1000baseT_Half;
> +
> + /* support both flow controls */
> + phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
> + phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
> + mii_adv = (u32)mii_advertise_flowctrl(phy->fc_request_control);
> + phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
> + phy->fc_autoneg = phydev->autoneg;
No driver does things like that, that appears to be really wrong.
> +
> + /* PHY interrupt enabled here */
> + phy_start(phydev);
> + phy_start_aneg(phydev);
> + phy->flags |= PHY_FLAG_OPENED;
> + return 0;
> +clean_up:
> + lan743x_phy_close(adapter);
> + return ret;
> +}
> +
> +static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter)
> +{
> + u8 mac_addr[ETH_ALEN];
> + u32 mac_addr_hi = 0;
> + u32 mac_addr_lo = 0;
> +
> + /* Add mac address to perfect Filter */
> + ether_addr_copy(mac_addr, adapter->mac.mac_address);
> + mac_addr_lo = ((((u32)(mac_addr[0])) << 0) |
> + (((u32)(mac_addr[1])) << 8) |
> + (((u32)(mac_addr[2])) << 16) |
> + (((u32)(mac_addr[3])) << 24));
> + mac_addr_hi = ((((u32)(mac_addr[4])) << 0) |
> + (((u32)(mac_addr[5])) << 8));
> + lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo);
> + lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0),
> + mac_addr_hi | RFE_ADDR_FILT_HI_VALID_);
> +}
> +
> +static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter)
> +{
> + struct net_device *netdev = adapter->netdev;
> + u32 hash_table[DP_SEL_VHF_HASH_LEN];
> + u32 rfctl;
> + u32 data;
> +
> + rfctl = lan743x_csr_read(adapter, RFE_CTL);
> + rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ |
> + RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
> + rfctl |= RFE_CTL_AB_;
> + if (netdev->flags & IFF_PROMISC) {
> + rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_;
> + } else {
> + if (netdev->flags & IFF_ALLMULTI)
> + rfctl |= RFE_CTL_AM_;
> + }
> + memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32));
> + if (netdev_mc_count(netdev)) {
> + struct netdev_hw_addr *ha;
> + int i;
> +
> + rfctl |= RFE_CTL_DA_PERFECT_;
> + i = 1;
> + netdev_for_each_mc_addr(ha, netdev) {
> + /* set first 32 into Perfect Filter */
> + if (i < 33) {
> + lan743x_csr_write(adapter,
> + RFE_ADDR_FILT_HI(i), 0);
> + data = ha->addr[3];
> + data = ha->addr[2] | (data << 8);
> + data = ha->addr[1] | (data << 8);
> + data = ha->addr[0] | (data << 8);
> + lan743x_csr_write(adapter,
> + RFE_ADDR_FILT_LO(i), data);
> + data = ha->addr[5];
> + data = ha->addr[4] | (data << 8);
> + data |= RFE_ADDR_FILT_HI_VALID_;
> + lan743x_csr_write(adapter,
> + RFE_ADDR_FILT_HI(i), data);
> + } else {
> + u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >>
> + 23) & 0x1FF;
> + hash_table[bitnum / 32] |= (1 << (bitnum % 32));
> + rfctl |= RFE_CTL_MCAST_HASH_;
> + }
> + i++;
> + }
> + }
> + lan743x_dp_write(adapter, DP_SEL_RFE_RAM,
> + DP_SEL_VHF_VLAN_LEN,
> + DP_SEL_VHF_HASH_LEN, hash_table);
> + lan743x_csr_write(adapter, RFE_CTL, rfctl);
> +}
> +
> +static int lan743x_dmac_init(struct lan743x_adapter *adapter)
> +{
> + struct lan743x_dmac *dmac = &adapter->dmac;
> + u32 data = 0;
> +
> + dmac->flags = 0;
> + dmac->descriptor_spacing = DEFAULT_DMA_DESCRIPTOR_SPACING;
> + lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_);
> + lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_,
> + 0, 1000, 20000, 100);
> + switch (dmac->descriptor_spacing) {
> + case DMA_DESCRIPTOR_SPACING_16:
> + data = DMAC_CFG_MAX_DSPACE_16_;
> + break;
> + case DMA_DESCRIPTOR_SPACING_32:
> + data = DMAC_CFG_MAX_DSPACE_32_;
> + break;
> + case DMA_DESCRIPTOR_SPACING_64:
> + data = DMAC_CFG_MAX_DSPACE_64_;
> + break;
> + case DMA_DESCRIPTOR_SPACING_128:
> + data = DMAC_CFG_MAX_DSPACE_128_;
> + break;
> + default:
> + return -EPERM;
-EINVAL maybe?
[snip]
> +
> +done:
> + memset(buffer_info, 0, sizeof(*buffer_info));
> + memset(descriptor, 0, sizeof(*descriptor));
You are likely missing barriers here, what guarantees that the
buffer_info and descriptor are going to be properly maintained in a
non-cache coherent architecture?
> +}
> +
> +static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
> +{
> + return ((++index) % tx->ring_size);
> +}
> +
> +static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
> +{
> + while ((*tx->head_cpu_ptr) != (tx->last_head)) {
> + lan743x_tx_release_desc(tx, tx->last_head, false);
> + tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
> + }
> +}
> +
> +static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx)
> +{
> + u32 original_head = 0;
> +
> + original_head = tx->last_head;
> + do {
> + lan743x_tx_release_desc(tx, tx->last_head, true);
> + tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
> + } while (tx->last_head != original_head);
> + memset(tx->ring_cpu_ptr, 0,
> + sizeof(*tx->ring_cpu_ptr) * (tx->ring_size));
> + memset(tx->buffer_info, 0,
> + sizeof(*tx->buffer_info) * (tx->ring_size));
This is coherent memory, but you still need to prevent possible
re-orderings, so you need at least a write barrier to guarantee all
pending writes are completed. Likely you won't ever see this as a
problem on x86 platforms.
> + tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS;
> + tx->frame_first = tx->last_tail;
> + tx->frame_tail = tx->frame_first;
> + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
> + buffer_info = &tx->buffer_info[tx->frame_tail];
> + dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length,
> + DMA_TO_DEVICE);
> + if (dma_mapping_error(dev, dma_ptr))
> + return -ENOMEM;
> + tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
> + tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
> + tx_descriptor->data3 = (frame_length << 16) &
> + TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
> + buffer_info->skb = NULL;
> + buffer_info->dma_ptr = dma_ptr;
> + buffer_info->buffer_length = first_buffer_length;
> + buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
> + tx->frame_data0 = (first_buffer_length &
> + TX_DESC_DATA0_BUF_LENGTH_MASK_) |
> + TX_DESC_DATA0_DTYPE_DATA_ |
> + TX_DESC_DATA0_FS_ |
> + TX_DESC_DATA0_FCS_;
> + if (check_sum)
> + tx->frame_data0 |= TX_DESC_DATA0_ICE_ |
> + TX_DESC_DATA0_IPE_ |
> + TX_DESC_DATA0_TPE_;
> +
> + /* data0 will be programmed in one of other frame assembler functions */
You again need some sort of write barrier here to guarantee ordering and
make sure the descriptors/buffer_info are properly written.
> + return 0;
> +}
> +
> +static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
> + unsigned int frame_length)
> +{
> + /* called only from within lan743x_tx_xmit_frame.
> + * assuming tx->ring_lock has already been acquired.
> + */
> + struct lan743x_tx_descriptor *tx_descriptor = NULL;
> + struct lan743x_tx_buffer_info *buffer_info = NULL;
> +
> + /* wrap up previous descriptor */
> + tx->frame_data0 |= TX_DESC_DATA0_EXT_;
> + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
> + tx_descriptor->data0 = tx->frame_data0;
> +
> + /* move to next descriptor */
> + tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
> + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
> + buffer_info = &tx->buffer_info[tx->frame_tail];
> +
> + /* add extension descriptor */
> + tx_descriptor->data1 = 0;
> + tx_descriptor->data2 = 0;
> + tx_descriptor->data3 = 0;
> + buffer_info->skb = NULL;
> + buffer_info->dma_ptr = 0;
> + buffer_info->buffer_length = 0;
> + buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
> + tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) |
> + TX_DESC_DATA0_DTYPE_EXT_ |
> + TX_DESC_DATA0_EXT_LSO_;
And here again.
> +
> + /* data0 will be programmed in one of other frame assembler functions */
> +}
> +
> +static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
> + const struct skb_frag_struct *fragment,
> + unsigned int frame_length)
> +{
> + /* called only from within lan743x_tx_xmit_frame
> + * assuming tx->ring_lock has already been acquired
> + */
> + struct lan743x_tx_descriptor *tx_descriptor = NULL;
> + struct lan743x_tx_buffer_info *buffer_info = NULL;
> + struct lan743x_adapter *adapter = tx->adapter;
> + struct device *dev = &adapter->pci.pdev->dev;
> + unsigned int fragment_length = 0;
> + dma_addr_t dma_ptr;
> +
> + fragment_length = skb_frag_size(fragment);
> + if (!fragment_length)
> + return 0;
> +
> + /* wrap up previous descriptor */
> + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
> + tx_descriptor->data0 = tx->frame_data0;
> +
> + /* move to next descriptor */
> + tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
> + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
> + buffer_info = &tx->buffer_info[tx->frame_tail];
> + dma_ptr = skb_frag_dma_map(dev, fragment,
> + 0, fragment_length,
> + DMA_TO_DEVICE);
> + if (dma_mapping_error(dev, dma_ptr)) {
> + int desc_index;
> +
> + /* cleanup all previously setup descriptors */
> + desc_index = tx->frame_first;
> + while (desc_index != tx->frame_tail) {
> + lan743x_tx_release_desc(tx, desc_index, true);
> + desc_index = lan743x_tx_next_index(tx, desc_index);
> + }
> + dma_wmb();
> + tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
> + tx->frame_first = 0;
> + tx->frame_data0 = 0;
> + tx->frame_tail = 0;
> + return -ENOMEM;
> + }
> + tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
> + tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
> + tx_descriptor->data3 = (frame_length << 16) &
> + TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
> + buffer_info->skb = NULL;
> + buffer_info->dma_ptr = dma_ptr;
> + buffer_info->buffer_length = fragment_length;
> + buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
> + buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT;
> + tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) |
> + TX_DESC_DATA0_DTYPE_DATA_ |
> + TX_DESC_DATA0_FCS_;
> +
> + /* data0 will be programmed in one of other frame assembler functions */
> + return 0;
> +}
> +
> +static void lan743x_tx_frame_end(struct lan743x_tx *tx,
> + struct sk_buff *skb,
> + bool ignore_sync)
> +{
> + /* called only from within lan743x_tx_xmit_frame
> + * assuming tx->ring_lock has already been acquired
> + */
> + struct lan743x_tx_descriptor *tx_descriptor = NULL;
> + struct lan743x_tx_buffer_info *buffer_info = NULL;
> + struct lan743x_adapter *adapter = tx->adapter;
> + u32 tx_tail_flags = 0;
> +
> + /* wrap up previous descriptor */
> + tx->frame_data0 |= TX_DESC_DATA0_LS_;
> + tx->frame_data0 |= TX_DESC_DATA0_IOC_;
> + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
> + buffer_info = &tx->buffer_info[tx->frame_tail];
> + buffer_info->skb = skb;
> + if (ignore_sync)
> + buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
> + tx_descriptor->data0 = tx->frame_data0;
> + tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
> + tx->last_tail = tx->frame_tail;
> + dma_wmb();
> + if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
> + tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_;
> + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)
> + tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ |
> + TX_TAIL_SET_TOP_INT_EN_;
> + lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
> + tx_tail_flags | tx->frame_tail);
> + tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
> +}
> +
> +static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
> + struct sk_buff *skb)
> +{
> + int required_number_of_descriptors = 0;
> + unsigned int start_frame_length = 0;
> + unsigned int frame_length = 0;
> + unsigned int head_length = 0;
> + unsigned long irq_flags = 0;
> + bool ignore_sync = false;
> + int nr_frags = 0;
> + bool gso = false;
> + int j;
> +
> + spin_lock_irqsave(&tx->ring_lock, irq_flags);
> + required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb);
This can certainly execute outside of the spinlock
> + if (required_number_of_descriptors >
> + lan743x_tx_get_avail_desc(tx)) {
This one, probably not.
> + if (required_number_of_descriptors > (tx->ring_size - 1)) {
> + dev_kfree_skb(skb);
> + } else {
> + /* save to overflow buffer */
> + tx->overflow_skb = skb;
> + netif_stop_queue(tx->adapter->netdev);
What is an overflow SKB?
[snip]
> +static struct net_device_stats *lan743x_netdev_get_stats(struct net_device *nd)
> +{
> + struct lan743x_adapter *adapter = netdev_priv(nd);
64-bit statistics please.
--
Florian
Powered by blists - more mailing lists