[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20080714212223.GA4849@ami.dom.local>
Date: Mon, 14 Jul 2008 23:22:23 +0200
From: Jarek Poplawski <jarkao2@...il.com>
To: Josip Rodin <joy@...uzijast.net>
Cc: Patrick McHardy <kaber@...sh.net>,
David Miller <davem@...emloft.net>, mchan@...adcom.com,
billfink@...dspring.com, bhutchings@...arflare.com,
netdev@...r.kernel.org, mirrors@...ian.org, devik@....cz
Subject: Re: bnx2_poll panicking kernel
On Mon, Jul 14, 2008 at 10:21:51PM +0200, Josip Rodin wrote:
> On Mon, Jul 14, 2008 at 07:20:55PM +0200, Jarek Poplawski wrote:
...
> > BTW, I wonder how Josip's testing?
>
> Do you want me to try out David's patch? I was hoping to get a definite okay
> from someone before applying it, given that this is a production machine
> for us and we do need it in *some* kind of an operational state.
Hmm... if you trust me more than David?! As a matter of fact, I thought
it's under testing already, but since I've some doubts, I attach below
my version of David's patch. IMHO, it looks safe, but never knows...
> I've got many more tens of kilobytes of logs from the previous debugging
> patches, if you want I can send them over.
This debugging, I guess, shows corrupted data, but the reason is hard
to find. David found one of possible reasons and it needs checking. I
don't think this patch in any version can do more damage than doing
nothing - unless you prefer to use Michael's patch to next kernels
(BTW, don't remove this patch yet).
Thanks,
Jarek P.
---
diff -Nurp 2.6.25.4-/net/sched/sch_htb.c 2.6.25.4+/net/sched/sch_htb.c
--- 2.6.25.4-/net/sched/sch_htb.c 2008-05-17 19:23:44.000000000 +0200
+++ 2.6.25.4+/net/sched/sch_htb.c 2008-07-14 22:39:28.000000000 +0200
@@ -576,6 +576,7 @@ static int htb_enqueue(struct sk_buff *s
if (q->direct_queue.qlen < q->direct_qlen) {
__skb_queue_tail(&q->direct_queue, skb);
q->direct_pkts++;
+ ret = NET_XMIT_SUCCESS;
} else {
kfree_skb(skb);
sch->qstats.drops++;
@@ -588,22 +589,27 @@ static int htb_enqueue(struct sk_buff *s
kfree_skb(skb);
return ret;
#endif
- } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
- NET_XMIT_SUCCESS) {
- sch->qstats.drops++;
- cl->qstats.drops++;
- return NET_XMIT_DROP;
} else {
- cl->bstats.packets +=
- skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
- cl->bstats.bytes += skb->len;
- htb_activate(q, cl);
+ ret = cl->un.leaf.q->enqueue(skb, cl->un.leaf.q);
+ if (ret == NET_XMIT_DROP) {
+ sch->qstats.drops++;
+ cl->qstats.drops++;
+ } else {
+ cl->bstats.packets +=
+ skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
+ cl->bstats.bytes += skb->len;
+ if (ret == NET_XMIT_SUCCESS)
+ htb_activate(q, cl);
+ }
}
- sch->q.qlen++;
- sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
- sch->bstats.bytes += skb->len;
- return NET_XMIT_SUCCESS;
+ if (ret == NET_XMIT_SUCCESS) {
+ sch->q.qlen++;
+ sch->bstats.packets +=
+ skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
+ sch->bstats.bytes += skb->len;
+ }
+ return ret;
}
/* TODO: requeuing packet charges it to policers again !! */
@@ -618,6 +624,7 @@ static int htb_requeue(struct sk_buff *s
/* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen) {
__skb_queue_head(&q->direct_queue, skb);
+ ret = NET_XMIT_SUCCESS;
} else {
__skb_queue_head(&q->direct_queue, skb);
tskb = __skb_dequeue_tail(&q->direct_queue);
@@ -632,17 +639,21 @@ static int htb_requeue(struct sk_buff *s
kfree_skb(skb);
return ret;
#endif
- } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
- NET_XMIT_SUCCESS) {
- sch->qstats.drops++;
- cl->qstats.drops++;
- return NET_XMIT_DROP;
- } else
- htb_activate(q, cl);
-
- sch->q.qlen++;
- sch->qstats.requeues++;
- return NET_XMIT_SUCCESS;
+ } else {
+ ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q);
+ if (ret == NET_XMIT_DROP) {
+ sch->qstats.drops++;
+ cl->qstats.drops++;
+ } else if (ret == NET_XMIT_SUCCESS) {
+ htb_activate(q, cl);
+ }
+ }
+
+ if (ret == NET_XMIT_SUCCESS) {
+ sch->q.qlen++;
+ sch->qstats.requeues++;
+ }
+ return ret;
}
/**
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists