[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <1406195579-13339-1-git-send-email-sonic.adi@gmail.com>
Date: Thu, 24 Jul 2014 17:52:59 +0800
From: Sonic Zhang <sonic.adi@...il.com>
To: "David S. Miller" <davem@...emloft.net>, <netdev@...r.kernel.org>
CC: Francois Romieu <romieu@...zoreil.com>,
Eric Dumazet <eric.dumazet@...il.com>,
<adi-buildroot-devel@...ts.sourceforge.net>,
Sonic Zhang <sonic.zhang@...log.com>
Subject: [PATCH v9] bfin_mac: convert bfin Ethernet driver to NAPI framework
From: Sonic Zhang <sonic.zhang@...log.com>
Ethernet RX DMA buffers are polled in NAPI work queue other than received
directly in DMA RX interrupt handler.
Signed-off-by: Sonic Zhang <sonic.zhang@...log.com>
---
v2-changes:
- avoid test NAPI_STATE_NPSVC bit in net device driver
v3-changes:
- use tabs while indenting the code
v4-changes:
- unconditionally complete the NAPI poll and re-enable the MAC_RX IRQ
v5-changes:
- should match open parenthesis
v6-changes:
- keep a count of bfin_mac_interrupt disabled mac_rx irq in the device private
area then call enable_irq in bfin_mac_poll
v7-changes:
- only IRQ disable when rx_irq_disabled is zero before the increment
and only do the IRQ enable when decrementing rx_irq_disabled will cause
it to become zero
v8-changes:
- disable mac rx irq for each call to napi poll in net poll loop and keep irq balanced
if netpoll scavenges between napi_schedule_prep and lp->rx_irq_disabled++
on a different CPU, bfin_mac_interrupt may later __napi_schedule while
already napi_completed in netpoll...bfin_mac_poll(): bfin_mac_poll won't
be run in net_rx_action and IRQ_MAC_RX will stay disabled.
v9-changes:
- Limit rx buffers <= NAPI_POLL_WEIGHT 64.
- Remove useless budget parameter in bfin_mac_rx()
- Remove redundant parenthesis
---
drivers/net/ethernet/adi/Kconfig | 3 +-
drivers/net/ethernet/adi/bfin_mac.c | 79 +++++++++++++++++++++++--------------
drivers/net/ethernet/adi/bfin_mac.h | 3 ++
3 files changed, 53 insertions(+), 32 deletions(-)
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index f952fff..c9cd359 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -52,8 +52,7 @@ config BFIN_TX_DESC_NUM
config BFIN_RX_DESC_NUM
int "Number of receive buffer packets"
depends on BFIN_MAC
- range 20 100 if BFIN_MAC_USE_L1
- range 20 800
+ range 20 64
default "20"
---help---
Set the number of buffer packets used in driver.
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 7ae74d4..afa6684 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1218,11 +1218,11 @@ out:
#define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)
-static void bfin_mac_rx(struct net_device *dev)
+static void bfin_mac_rx(struct bfin_mac_local *lp)
{
+ struct net_device *dev = lp->ndev;
struct sk_buff *skb, *new_skb;
unsigned short len;
- struct bfin_mac_local *lp __maybe_unused = netdev_priv(dev);
#if defined(BFIN_MAC_CSUM_OFFLOAD)
unsigned int i;
unsigned char fcs[ETH_FCS_LEN + 1];
@@ -1256,7 +1256,7 @@ static void bfin_mac_rx(struct net_device *dev)
current_rx_ptr->skb = new_skb;
current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
- len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
+ len = (unsigned short)(current_rx_ptr->status.status_word & RX_FRLEN);
/* Deduce Ethernet FCS length from Ethernet payload length */
len -= ETH_FCS_LEN;
skb_put(skb, len);
@@ -1294,7 +1294,8 @@ static void bfin_mac_rx(struct net_device *dev)
}
#endif
- netif_rx(skb);
+ napi_gro_receive(&lp->napi, skb);
+
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
out:
@@ -1302,41 +1303,52 @@ out:
current_rx_ptr = current_rx_ptr->next;
}
+static int bfin_mac_poll(struct napi_struct *napi, int budget)
+{
+ int i = 0;
+ struct bfin_mac_local *lp = container_of(napi,
+ struct bfin_mac_local,
+ napi);
+
+ while (current_rx_ptr->status.status_word != 0 && i < budget) {
+ bfin_mac_rx(lp);
+ i++;
+ }
+
+ if (i < budget) {
+ napi_complete(napi);
+ if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags))
+ enable_irq(IRQ_MAC_RX);
+ }
+
+ return i;
+}
+
/* interrupt routine to handle rx and error signal */
static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = dev_id;
- int number = 0;
-
-get_one_packet:
- if (current_rx_ptr->status.status_word == 0) {
- /* no more new packet received */
- if (number == 0) {
- if (current_rx_ptr->next->status.status_word != 0) {
- current_rx_ptr = current_rx_ptr->next;
- goto real_rx;
- }
- }
- bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() |
- DMA_DONE | DMA_ERR);
- return IRQ_HANDLED;
+ struct bfin_mac_local *lp = netdev_priv(dev_id);
+ u32 status;
+
+ status = bfin_read_DMA1_IRQ_STATUS();
+
+ bfin_write_DMA1_IRQ_STATUS(status | DMA_DONE | DMA_ERR);
+ if (status & DMA_DONE) {
+ disable_irq_nosync(IRQ_MAC_RX);
+ set_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags);
+ napi_schedule(&lp->napi);
}
-real_rx:
- bfin_mac_rx(dev);
- number++;
- goto get_one_packet;
+ return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static void bfin_mac_poll(struct net_device *dev)
+static void bfin_mac_poll_controller(struct net_device *dev)
{
struct bfin_mac_local *lp = netdev_priv(dev);
- disable_irq(IRQ_MAC_RX);
bfin_mac_interrupt(IRQ_MAC_RX, dev);
tx_reclaim_skb(lp);
- enable_irq(IRQ_MAC_RX);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -1428,14 +1440,13 @@ static void bfin_mac_timeout(struct net_device *dev)
tx_list_head = tx_list_head->next;
}
- if (netif_queue_stopped(lp->ndev))
- netif_wake_queue(lp->ndev);
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
bfin_mac_enable(lp->phydev);
/* We can accept TX packets again */
dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue(dev);
}
static void bfin_mac_multicast_hash(struct net_device *dev)
@@ -1562,6 +1573,7 @@ static int bfin_mac_open(struct net_device *dev)
return ret;
pr_debug("hardware init finished\n");
+ napi_enable(&lp->napi);
netif_start_queue(dev);
netif_carrier_on(dev);
@@ -1579,6 +1591,7 @@ static int bfin_mac_close(struct net_device *dev)
pr_debug("%s: %s\n", dev->name, __func__);
netif_stop_queue(dev);
+ napi_disable(&lp->napi);
netif_carrier_off(dev);
phy_stop(lp->phydev);
@@ -1604,7 +1617,7 @@ static const struct net_device_ops bfin_mac_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = bfin_mac_poll,
+ .ndo_poll_controller = bfin_mac_poll_controller,
#endif
};
@@ -1689,6 +1702,9 @@ static int bfin_mac_probe(struct platform_device *pdev)
lp->tx_reclaim_timer.data = (unsigned long)lp;
lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
+ lp->flags = 0;
+ netif_napi_add(ndev, &lp->napi, bfin_mac_poll, CONFIG_BFIN_RX_DESC_NUM);
+
spin_lock_init(&lp->lock);
/* now, enable interrupts */
@@ -1723,6 +1739,7 @@ out_err_phc:
out_err_reg_ndev:
free_irq(IRQ_MAC_RX, ndev);
out_err_request_irq:
+ netif_napi_del(&lp->napi);
out_err_mii_probe:
mdiobus_unregister(lp->mii_bus);
mdiobus_free(lp->mii_bus);
@@ -1743,6 +1760,8 @@ static int bfin_mac_remove(struct platform_device *pdev)
unregister_netdev(ndev);
+ netif_napi_del(&lp->napi);
+
free_irq(IRQ_MAC_RX, ndev);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index 6dec86a..d1217db 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -26,6 +26,7 @@
#endif
#define TX_RECLAIM_JIFFIES (HZ / 5)
+#define BFIN_MAC_RX_IRQ_DISABLED 1
struct dma_descriptor {
struct dma_descriptor *next_dma_desc;
@@ -80,6 +81,8 @@ struct bfin_mac_local {
int irq_wake_requested;
struct timer_list tx_reclaim_timer;
struct net_device *ndev;
+ struct napi_struct napi;
+ unsigned long flags;
/* Data for EMAC_VLAN1 regs */
u16 vlan1_mask, vlan2_mask;
--
1.8.2.3
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists