[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1294870394.3335.75.camel@edumazet-laptop>
Date: Wed, 12 Jan 2011 23:13:14 +0100
From: Eric Dumazet <eric.dumazet@...il.com>
To: Jarek Poplawski <jarkao2@...il.com>
Cc: David Miller <davem@...emloft.net>,
David Brownell <david-b@...bell.net>,
Neil Jones <neiljay@...il.com>, linux-usb@...r.kernel.org,
netdev@...r.kernel.org,
Alexander Duyck <alexander.h.duyck@...el.com>,
Jeff Kirsher <jeffrey.t.kirsher@...el.com>,
Michal Nazarewicz <mina86@...a86.com>,
Sandeep Gopalpet <sandeep.kumar@...escale.com>
Subject: Re: [PATCH] net: remove dev_txq_stats_fold()
Le mercredi 12 janvier 2011 à 22:18 +0100, Jarek Poplawski a écrit :
> Btw, I updated Michal's email (according to linux-kernel).
>
> Jarek P.
>
> On Wed, Jan 12, 2011 at 10:05:18PM +0100, Jarek Poplawski wrote:
> > On Wed, Jan 12, 2011 at 04:02:00PM +0100, Eric Dumazet wrote:
> > > Le mercredi 12 janvier 2011 ?? 14:52 +0100, Eric Dumazet a écrit :
> > >
> > > > Or even better, remove these counters since there is no users left but
> > > > ixgbe.
> > > >
> > > > (vlans, tunnels, ... now use percpu stats)
> > > >
> > > >
> > >
> > > Thanks Jarek for the reminder :)
> > >
> > > [PATCH] net: remove dev_txq_stats_fold()
> > >
> > > After recent changes, (percpu stats on vlan/tunnels...), we dont need
> > > anymore per struct netdev_queue tx_bytes/tx_packets/tx_dropped counters.
> > >
> > > Only remaining users are ixgbe, sch_teql & macvlan :
> >
> > And gianfar (not counting staging/bcm) but I'm not sure if that's all.
> >
Hmm, I did a "allyesconfig", but on x86_64 ;)
> > >
> > > 1) ixgbe can be converted to use existing tx_ring counters.
> > >
> > > 2) macvlan incremented txq->tx_dropped, it can use the
> > > dev->stats.tx_dropped counter.
> > >
> > > 3) sch_teql : almost revert ab35cd4b8f42 (Use net_device internal stats)
> > > Now we have ndo_get_stats64(), use it.
> >
> > Btw, why doesn't sch_teql need locking (for 32-bits)?
Yes you're right, thanks !
[PATCH v2] net: remove dev_txq_stats_fold()
After recent changes, (percpu stats on vlan/tunnels...), we dont need
anymore per struct netdev_queue tx_bytes/tx_packets/tx_dropped counters.
Only remaining users are ixgbe, sch_teql, gianfar & macvlan :
1) ixgbe can be converted to use existing tx_ring counters.
2) macvlan incremented txq->tx_dropped, it can use the
dev->stats.tx_dropped counter.
3) sch_teql : almost revert ab35cd4b8f42 (Use net_device internal stats)
Now we have ndo_get_stats64(), use it, even for "unsigned long"
fields (No need to bring back a struct net_device_stats)
4) gianfar adds a stats structure per tx queue to hold
tx_bytes/tx_packets
This removes a lockdep warning (and possible lockup) in rndis gadget,
calling dev_get_stats() from hard IRQ context.
Ref: http://www.spinics.net/lists/netdev/msg149202.html
Reported-by: Neil Jones <neiljay@...il.com>
Signed-off-by: Eric Dumazet <eric.dumazet@...il.com>
CC: Jarek Poplawski <jarkao2@...il.com>
CC: Alexander Duyck <alexander.h.duyck@...el.com>
CC: Jeff Kirsher <jeffrey.t.kirsher@...el.com>
CC: Sandeep Gopalpet <sandeep.kumar@...escale.com>
CC: Michal Nazarewicz <mina86@...a86.com>
---
v2: added gianfar, removed u64 fields from sch_teql to avoid extra
locking
drivers/net/gianfar.c | 10 ++++------
drivers/net/gianfar.h | 10 ++++++++++
drivers/net/ixgbe/ixgbe_main.c | 23 ++++++++++++++++-------
drivers/net/macvtap.c | 2 +-
include/linux/netdevice.h | 5 -----
net/core/dev.c | 29 -----------------------------
net/sched/sch_teql.c | 26 +++++++++++++++++++++-----
7 files changed, 52 insertions(+), 53 deletions(-)
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 6de4675..119aa20 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -434,7 +434,6 @@ static void gfar_init_mac(struct net_device *ndev)
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- struct netdev_queue *txq;
unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
unsigned long tx_packets = 0, tx_bytes = 0;
int i = 0;
@@ -450,9 +449,8 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
dev->stats.rx_dropped = rx_dropped;
for (i = 0; i < priv->num_tx_queues; i++) {
- txq = netdev_get_tx_queue(dev, i);
- tx_bytes += txq->tx_bytes;
- tx_packets += txq->tx_packets;
+ tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
+ tx_packets += priv->tx_queue[i]->stats.tx_packets;
}
dev->stats.tx_bytes = tx_bytes;
@@ -2109,8 +2107,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Update transmit stats */
- txq->tx_bytes += skb->len;
- txq->tx_packets ++;
+ tx_queue->stats.tx_bytes += skb->len;
+ tx_queue->stats.tx_packets++;
txbdp = txbdp_start = tx_queue->cur_tx;
lstatus = txbdp->lstatus;
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 68984eb..54de413 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -907,12 +907,21 @@ enum {
MQ_MG_MODE
};
+/*
+ * Per TX queue stats
+ */
+struct tx_q_stats {
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+};
+
/**
* struct gfar_priv_tx_q - per tx queue structure
* @txlock: per queue tx spin lock
* @tx_skbuff:skb pointers
* @skb_curtx: to be used skb pointer
* @skb_dirtytx:the last used skb pointer
+ * @stats: bytes/packets stats
* @qindex: index of this queue
* @dev: back pointer to the dev structure
* @grp: back pointer to the group to which this queue belongs
@@ -934,6 +943,7 @@ struct gfar_priv_tx_q {
struct txbd8 *tx_bd_base;
struct txbd8 *cur_tx;
struct txbd8 *dirty_tx;
+ struct tx_q_stats stats;
struct net_device *dev;
struct gfar_priv_grp *grp;
u16 skb_curtx;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index a060610..602078b 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -6667,8 +6667,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
{
- struct net_device *netdev = tx_ring->netdev;
- struct netdev_queue *txq;
unsigned int first;
unsigned int tx_flags = 0;
u8 hdr_len = 0;
@@ -6765,9 +6763,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
/* add the ATR filter if ATR is on */
if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
ixgbe_atr(tx_ring, skb, tx_flags, protocol);
- txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
- txq->tx_bytes += skb->len;
- txq->tx_packets++;
ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
@@ -6925,8 +6920,6 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
- /* accurate rx/tx bytes/packets stats */
- dev_txq_stats_fold(netdev, stats);
rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
@@ -6943,6 +6936,22 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
stats->rx_bytes += bytes;
}
}
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ }
+ }
rcu_read_unlock();
/* following stats updated by ixgbe_watchdog_task() */
stats->multicast = netdev->stats.multicast;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 21845af..5933621 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -585,7 +585,7 @@ err:
rcu_read_lock_bh();
vlan = rcu_dereference(q->vlan);
if (vlan)
- netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++;
+ vlan->dev->stats.tx_dropped++;
rcu_read_unlock_bh();
return err;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index be4957c..d971346 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -520,9 +520,6 @@ struct netdev_queue {
* please use this field instead of dev->trans_start
*/
unsigned long trans_start;
- u64 tx_bytes;
- u64 tx_packets;
- u64 tx_dropped;
} ____cacheline_aligned_in_smp;
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -2265,8 +2262,6 @@ extern void dev_load(struct net *net, const char *name);
extern void dev_mcast_init(void);
extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *storage);
-extern void dev_txq_stats_fold(const struct net_device *dev,
- struct rtnl_link_stats64 *stats);
extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
diff --git a/net/core/dev.c b/net/core/dev.c
index a3ef808..83507c2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5523,34 +5523,6 @@ void netdev_run_todo(void)
}
}
-/**
- * dev_txq_stats_fold - fold tx_queues stats
- * @dev: device to get statistics from
- * @stats: struct rtnl_link_stats64 to hold results
- */
-void dev_txq_stats_fold(const struct net_device *dev,
- struct rtnl_link_stats64 *stats)
-{
- u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
- unsigned int i;
- struct netdev_queue *txq;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- txq = netdev_get_tx_queue(dev, i);
- spin_lock_bh(&txq->_xmit_lock);
- tx_bytes += txq->tx_bytes;
- tx_packets += txq->tx_packets;
- tx_dropped += txq->tx_dropped;
- spin_unlock_bh(&txq->_xmit_lock);
- }
- if (tx_bytes || tx_packets || tx_dropped) {
- stats->tx_bytes = tx_bytes;
- stats->tx_packets = tx_packets;
- stats->tx_dropped = tx_dropped;
- }
-}
-EXPORT_SYMBOL(dev_txq_stats_fold);
-
/* Convert net_device_stats to rtnl_link_stats64. They have the same
* fields in the same order, with only the type differing.
*/
@@ -5594,7 +5566,6 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
} else {
netdev_stats_to_stats64(storage, &dev->stats);
- dev_txq_stats_fold(dev, storage);
}
storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
return storage;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index af9360d..8ab66a4 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -59,6 +59,10 @@ struct teql_master
struct net_device *dev;
struct Qdisc *slaves;
struct list_head master_list;
+ unsigned long tx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_errors;
+ unsigned long tx_dropped;
};
struct teql_sched_data
@@ -274,7 +278,6 @@ static inline int teql_resolve(struct sk_buff *skb,
static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct teql_master *master = netdev_priv(dev);
- struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
struct Qdisc *start, *q;
int busy;
int nores;
@@ -314,8 +317,8 @@ restart:
__netif_tx_unlock(slave_txq);
master->slaves = NEXT_SLAVE(q);
netif_wake_queue(dev);
- txq->tx_packets++;
- txq->tx_bytes += length;
+ master->tx_packets++;
+ master->tx_bytes += length;
return NETDEV_TX_OK;
}
__netif_tx_unlock(slave_txq);
@@ -342,10 +345,10 @@ restart:
netif_stop_queue(dev);
return NETDEV_TX_BUSY;
}
- dev->stats.tx_errors++;
+ master->tx_errors++;
drop:
- txq->tx_dropped++;
+ master->tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -398,6 +401,18 @@ static int teql_master_close(struct net_device *dev)
return 0;
}
+static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct teql_master *m = netdev_priv(dev);
+
+ stats->tx_packets = m->tx_packets;
+ stats->tx_bytes = m->tx_bytes;
+ stats->tx_errors = m->tx_errors;
+ stats->tx_dropped = m->tx_dropped;
+ return stats;
+}
+
static int teql_master_mtu(struct net_device *dev, int new_mtu)
{
struct teql_master *m = netdev_priv(dev);
@@ -422,6 +437,7 @@ static const struct net_device_ops teql_netdev_ops = {
.ndo_open = teql_master_open,
.ndo_stop = teql_master_close,
.ndo_start_xmit = teql_master_xmit,
+ .ndo_get_stats64 = teql_master_stats64,
.ndo_change_mtu = teql_master_mtu,
};
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists