[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20121014143548.230457808@decadent.org.uk>
Date: Sun, 14 Oct 2012 15:37:20 +0100
From: Ben Hutchings <ben@...adent.org.uk>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
Cc: akpm@...ux-foundation.org, alan@...rguk.ukuu.org.uk,
Alexandre Bounine <alexandre.bounine@....com>,
Matt Porter <mporter@...nel.crashing.org>,
"David S. Miller" <davem@...emloft.net>,
Linus Torvalds <torvalds@...ux-foundation.org>
Subject: [ 107/147] rapidio/rionet: fix multicast packet transmit logic
3.2-stable review patch. If anyone has any objections, please let me know.
------------------
From: Alexandre Bounine <alexandre.bounine@....com>
commit 7c4a6106d6451fc03c491e61df37c044505d843a upstream.
Fix multicast packet transmit logic to account for repetitive transmission
of single skb:
- correct check for available buffers (this bug may produce NULL pointer
crash dump in case of heavy traffic);
- update skb user count (incorrect user counter causes a warning dump from
net_tx_action routine during multicast transfers in systems with three or
more rionet participants).
Signed-off-by: Alexandre Bounine <alexandre.bounine@....com>
Cc: Matt Porter <mporter@...nel.crashing.org>
Cc: David S. Miller <davem@...emloft.net>
Signed-off-by: Andrew Morton <akpm@...ux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@...ux-foundation.org>
Signed-off-by: Ben Hutchings <ben@...adent.org.uk>
---
drivers/net/rionet.c | 20 +++++++++++++++++---
1 file changed, 17 insertions(+), 3 deletions(-)
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 91d2588..1470d3e 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -79,6 +79,7 @@ static int rionet_capable = 1;
* on system trade-offs.
*/
static struct rio_dev **rionet_active;
+static int nact; /* total number of active rionet peers */
#define is_rionet_capable(src_ops, dst_ops) \
((src_ops & RIO_SRC_OPS_DATA_MSG) && \
@@ -175,6 +176,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct ethhdr *eth = (struct ethhdr *)skb->data;
u16 destid;
unsigned long flags;
+ int add_num = 1;
local_irq_save(flags);
if (!spin_trylock(&rnet->tx_lock)) {
@@ -182,7 +184,10 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_LOCKED;
}
- if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
+ if (is_multicast_ether_addr(eth->h_dest))
+ add_num = nact;
+
+ if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
netif_stop_queue(ndev);
spin_unlock_irqrestore(&rnet->tx_lock, flags);
printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
@@ -191,11 +196,16 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
if (is_multicast_ether_addr(eth->h_dest)) {
+ int count = 0;
for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
i++)
- if (rionet_active[i])
+ if (rionet_active[i]) {
rionet_queue_tx_msg(skb, ndev,
rionet_active[i]);
+ if (count)
+ atomic_inc(&skb->users);
+ count++;
+ }
} else if (RIONET_MAC_MATCH(eth->h_dest)) {
destid = RIONET_GET_DESTID(eth->h_dest);
if (rionet_active[destid])
@@ -220,14 +230,17 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
if (info == RIONET_DOORBELL_JOIN) {
if (!rionet_active[sid]) {
list_for_each_entry(peer, &rionet_peers, node) {
- if (peer->rdev->destid == sid)
+ if (peer->rdev->destid == sid) {
rionet_active[sid] = peer->rdev;
+ nact++;
+ }
}
rio_mport_send_doorbell(mport, sid,
RIONET_DOORBELL_JOIN);
}
} else if (info == RIONET_DOORBELL_LEAVE) {
rionet_active[sid] = NULL;
+ nact--;
} else {
if (netif_msg_intr(rnet))
printk(KERN_WARNING "%s: unhandled doorbell\n",
@@ -523,6 +536,7 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
rc = rionet_setup_netdev(rdev->net->hport, ndev);
rionet_check = 1;
+ nact = 0;
}
/*
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists