[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9F4C7D19E8361D4C94921B95BE08B81B950713@zin33exm22.fsl.freescale.net>
Date: Thu, 5 Nov 2009 22:53:08 +0530
From: "Kumar Gopalpet-B05799" <B05799@...escale.com>
To: <avorontsov@...mvista.com>, "Jon Loeliger" <jdl@....com>
Cc: <linuxppc-dev@...abs.org>,
"Jason Wessel" <jason.wessel@...driver.com>,
"Fleming Andy-AFLEMING" <afleming@...escale.com>,
"David Miller" <davem@...emloft.net>, <netdev@...r.kernel.org>,
"Lennert Buytenhek" <buytenh@...tstofly.org>,
"Stephen Hemminger" <shemminger@...tta.com>
Subject: RE: [PATCH RFC] gianfar: Do not call skb recycling with disabled IRQs
[.....]
> drivers/net/gianfar.c | 19 +++----------------
> 1 files changed, 3 insertions(+), 16 deletions(-)
>
>diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
>index 197b358..a0ae604 100644
>--- a/drivers/net/gianfar.c
>+++ b/drivers/net/gianfar.c
>@@ -1899,10 +1899,8 @@ static int gfar_start_xmit(struct
>sk_buff *skb, struct net_device *dev)
> u32 lstatus;
> int i, rq = 0;
> u32 bufaddr;
>- unsigned long flags;
> unsigned int nr_frags, length;
>
>-
> rq = skb->queue_mapping;
> tx_queue = priv->tx_queue[rq];
> txq = netdev_get_tx_queue(dev, rq);
>@@ -1928,14 +1926,11 @@ static int gfar_start_xmit(struct
>sk_buff *skb, struct net_device *dev)
> /* total number of fragments in the SKB */
> nr_frags = skb_shinfo(skb)->nr_frags;
>
>- spin_lock_irqsave(&tx_queue->txlock, flags);
>-
> /* check if there is space to queue this packet */
> if ((nr_frags+1) > tx_queue->num_txbdfree) {
> /* no space, stop the queue */
> netif_tx_stop_queue(txq);
> dev->stats.tx_fifo_errors++;
>- spin_unlock_irqrestore(&tx_queue->txlock, flags);
> return NETDEV_TX_BUSY;
> }
>
>@@ -2033,9 +2028,6 @@ static int gfar_start_xmit(struct
>sk_buff *skb, struct net_device *dev)
> /* Tell the DMA to go go go */
> gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
>
>- /* Unlock priv */
>- spin_unlock_irqrestore(&tx_queue->txlock, flags);
>-
> return NETDEV_TX_OK;
> }
>
>@@ -2550,7 +2542,6 @@ static int gfar_poll(struct napi_struct
>*napi, int budget)
> int tx_cleaned = 0, i, left_over_budget = budget;
> unsigned long serviced_queues = 0;
> int num_queues = 0;
>- unsigned long flags;
>
> num_queues = gfargrp->num_rx_queues;
> budget_per_queue = budget/num_queues;
>@@ -2570,13 +2561,9 @@ static int gfar_poll(struct napi_struct
>*napi, int budget)
> rx_queue = priv->rx_queue[i];
> tx_queue = priv->tx_queue[rx_queue->qindex];
>
>- /* If we fail to get the lock,
>- * don't bother with the TX BDs */
>- if
>(spin_trylock_irqsave(&tx_queue->txlock, flags)) {
>- tx_cleaned +=
>gfar_clean_tx_ring(tx_queue);
>-
>spin_unlock_irqrestore(&tx_queue->txlock,
>- flags);
>- }
>+ netif_tx_lock_bh(priv->ndev);
Will this not lead to locking all the tx queues even though at this
point we are working on a "particular queue" ?
>+ tx_cleaned += gfar_clean_tx_ring(tx_queue);
>+ netif_tx_unlock_bh(priv->ndev);
>
> rx_cleaned_per_queue =
>gfar_clean_rx_ring(rx_queue,
>
>budget_per_queue);
>--
--
Thanks
Sandeep
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists